1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/CodeGen/Analysis.h"
14#include "llvm/Analysis/ValueTracking.h"
15#include "llvm/CodeGen/MachineFunction.h"
16#include "llvm/CodeGen/TargetInstrInfo.h"
17#include "llvm/CodeGen/TargetLowering.h"
18#include "llvm/CodeGen/TargetSubtargetInfo.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/DerivedTypes.h"
21#include "llvm/IR/Function.h"
22#include "llvm/IR/Instructions.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/IR/Module.h"
25#include "llvm/Support/ErrorHandling.h"
26#include "llvm/Target/TargetMachine.h"
27
28using namespace llvm;
29
30/// Compute the linearized index of a member in a nested aggregate/struct/array
31/// by recursing and accumulating CurIndex as long as there are indices in the
32/// index list.
33unsigned llvm::ComputeLinearIndex(Type *Ty,
34 const unsigned *Indices,
35 const unsigned *IndicesEnd,
36 unsigned CurIndex) {
37 // Base case: We're done.
38 if (Indices && Indices == IndicesEnd)
39 return CurIndex;
40
41 // Given a struct type, recursively traverse the elements.
42 if (StructType *STy = dyn_cast<StructType>(Val: Ty)) {
43 for (auto I : llvm::enumerate(First: STy->elements())) {
44 Type *ET = I.value();
45 if (Indices && *Indices == I.index())
46 return ComputeLinearIndex(Ty: ET, Indices: Indices + 1, IndicesEnd, CurIndex);
47 CurIndex = ComputeLinearIndex(Ty: ET, Indices: nullptr, IndicesEnd: nullptr, CurIndex);
48 }
49 assert(!Indices && "Unexpected out of bound");
50 return CurIndex;
51 }
52 // Given an array type, recursively traverse the elements.
53 else if (ArrayType *ATy = dyn_cast<ArrayType>(Val: Ty)) {
54 Type *EltTy = ATy->getElementType();
55 unsigned NumElts = ATy->getNumElements();
56 // Compute the Linear offset when jumping one element of the array
57 unsigned EltLinearOffset = ComputeLinearIndex(Ty: EltTy, Indices: nullptr, IndicesEnd: nullptr, CurIndex: 0);
58 if (Indices) {
59 assert(*Indices < NumElts && "Unexpected out of bound");
60 // If the indice is inside the array, compute the index to the requested
61 // elt and recurse inside the element with the end of the indices list
62 CurIndex += EltLinearOffset* *Indices;
63 return ComputeLinearIndex(Ty: EltTy, Indices: Indices+1, IndicesEnd, CurIndex);
64 }
65 CurIndex += EltLinearOffset*NumElts;
66 return CurIndex;
67 }
68 // We haven't found the type we're looking for, so keep searching.
69 return CurIndex + 1;
70}
71
72/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
73/// EVTs that represent all the individual underlying
74/// non-aggregate types that comprise it.
75///
76/// If Offsets is non-null, it points to a vector to be filled in
77/// with the in-memory offsets of each of the individual values.
78///
79void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
80 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
81 SmallVectorImpl<EVT> *MemVTs,
82 SmallVectorImpl<TypeSize> *Offsets,
83 TypeSize StartingOffset) {
84 assert((Ty->isScalableTy() == StartingOffset.isScalable() ||
85 StartingOffset.isZero()) &&
86 "Offset/TypeSize mismatch!");
87 // Given a struct type, recursively traverse the elements.
88 if (StructType *STy = dyn_cast<StructType>(Val: Ty)) {
89 // If the Offsets aren't needed, don't query the struct layout. This allows
90 // us to support structs with scalable vectors for operations that don't
91 // need offsets.
92 const StructLayout *SL = Offsets ? DL.getStructLayout(Ty: STy) : nullptr;
93 for (StructType::element_iterator EB = STy->element_begin(),
94 EI = EB,
95 EE = STy->element_end();
96 EI != EE; ++EI) {
97 // Don't compute the element offset if we didn't get a StructLayout above.
98 TypeSize EltOffset =
99 SL ? SL->getElementOffset(Idx: EI - EB) : TypeSize::getZero();
100 ComputeValueVTs(TLI, DL, Ty: *EI, ValueVTs, MemVTs, Offsets,
101 StartingOffset: StartingOffset + EltOffset);
102 }
103 return;
104 }
105 // Given an array type, recursively traverse the elements.
106 if (ArrayType *ATy = dyn_cast<ArrayType>(Val: Ty)) {
107 Type *EltTy = ATy->getElementType();
108 TypeSize EltSize = DL.getTypeAllocSize(Ty: EltTy);
109 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
110 ComputeValueVTs(TLI, DL, Ty: EltTy, ValueVTs, MemVTs, Offsets,
111 StartingOffset: StartingOffset + i * EltSize);
112 return;
113 }
114 // Interpret void as zero return values.
115 if (Ty->isVoidTy())
116 return;
117 // Base case: we can get an EVT for this LLVM IR type.
118 ValueVTs.push_back(Elt: TLI.getValueType(DL, Ty));
119 if (MemVTs)
120 MemVTs->push_back(Elt: TLI.getMemValueType(DL, Ty));
121 if (Offsets)
122 Offsets->push_back(Elt: StartingOffset);
123}
124
125void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
126 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
127 SmallVectorImpl<EVT> *MemVTs,
128 SmallVectorImpl<uint64_t> *FixedOffsets,
129 uint64_t StartingOffset) {
130 TypeSize Offset = TypeSize::getFixed(ExactSize: StartingOffset);
131 if (FixedOffsets) {
132 SmallVector<TypeSize, 4> Offsets;
133 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, Offsets: &Offsets, StartingOffset: Offset);
134 for (TypeSize Offset : Offsets)
135 FixedOffsets->push_back(Elt: Offset.getFixedValue());
136 } else {
137 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, Offsets: nullptr, StartingOffset: Offset);
138 }
139}
140
141void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
142 SmallVectorImpl<LLT> &ValueTys,
143 SmallVectorImpl<uint64_t> *Offsets,
144 uint64_t StartingOffset) {
145 // Given a struct type, recursively traverse the elements.
146 if (StructType *STy = dyn_cast<StructType>(Val: &Ty)) {
147 // If the Offsets aren't needed, don't query the struct layout. This allows
148 // us to support structs with scalable vectors for operations that don't
149 // need offsets.
150 const StructLayout *SL = Offsets ? DL.getStructLayout(Ty: STy) : nullptr;
151 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
152 uint64_t EltOffset = SL ? SL->getElementOffset(Idx: I) : 0;
153 computeValueLLTs(DL, Ty&: *STy->getElementType(N: I), ValueTys, Offsets,
154 StartingOffset: StartingOffset + EltOffset);
155 }
156 return;
157 }
158 // Given an array type, recursively traverse the elements.
159 if (ArrayType *ATy = dyn_cast<ArrayType>(Val: &Ty)) {
160 Type *EltTy = ATy->getElementType();
161 uint64_t EltSize = DL.getTypeAllocSize(Ty: EltTy).getFixedValue();
162 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
163 computeValueLLTs(DL, Ty&: *EltTy, ValueTys, Offsets,
164 StartingOffset: StartingOffset + i * EltSize);
165 return;
166 }
167 // Interpret void as zero return values.
168 if (Ty.isVoidTy())
169 return;
170 // Base case: we can get an LLT for this LLVM IR type.
171 ValueTys.push_back(Elt: getLLTForType(Ty, DL));
172 if (Offsets != nullptr)
173 Offsets->push_back(Elt: StartingOffset * 8);
174}
175
176/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
177GlobalValue *llvm::ExtractTypeInfo(Value *V) {
178 V = V->stripPointerCasts();
179 GlobalValue *GV = dyn_cast<GlobalValue>(Val: V);
180 GlobalVariable *Var = dyn_cast<GlobalVariable>(Val: V);
181
182 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
183 assert(Var->hasInitializer() &&
184 "The EH catch-all value must have an initializer");
185 Value *Init = Var->getInitializer();
186 GV = dyn_cast<GlobalValue>(Val: Init);
187 if (!GV) V = cast<ConstantPointerNull>(Val: Init);
188 }
189
190 assert((GV || isa<ConstantPointerNull>(V)) &&
191 "TypeInfo must be a global variable or NULL");
192 return GV;
193}
194
195/// getFCmpCondCode - Return the ISD condition code corresponding to
196/// the given LLVM IR floating-point condition code. This includes
197/// consideration of global floating-point math flags.
198///
199ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
200 switch (Pred) {
201 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
202 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
203 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
204 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
205 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
206 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
207 case FCmpInst::FCMP_ONE: return ISD::SETONE;
208 case FCmpInst::FCMP_ORD: return ISD::SETO;
209 case FCmpInst::FCMP_UNO: return ISD::SETUO;
210 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
211 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
212 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
213 case FCmpInst::FCMP_ULT: return ISD::SETULT;
214 case FCmpInst::FCMP_ULE: return ISD::SETULE;
215 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
216 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
217 default: llvm_unreachable("Invalid FCmp predicate opcode!");
218 }
219}
220
221ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
222 switch (CC) {
223 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
224 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
225 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
226 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
227 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
228 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
229 default: return CC;
230 }
231}
232
233ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
234 switch (Pred) {
235 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
236 case ICmpInst::ICMP_NE: return ISD::SETNE;
237 case ICmpInst::ICMP_SLE: return ISD::SETLE;
238 case ICmpInst::ICMP_ULE: return ISD::SETULE;
239 case ICmpInst::ICMP_SGE: return ISD::SETGE;
240 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
241 case ICmpInst::ICMP_SLT: return ISD::SETLT;
242 case ICmpInst::ICMP_ULT: return ISD::SETULT;
243 case ICmpInst::ICMP_SGT: return ISD::SETGT;
244 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
245 default:
246 llvm_unreachable("Invalid ICmp predicate opcode!");
247 }
248}
249
250ICmpInst::Predicate llvm::getICmpCondCode(ISD::CondCode Pred) {
251 switch (Pred) {
252 case ISD::SETEQ:
253 return ICmpInst::ICMP_EQ;
254 case ISD::SETNE:
255 return ICmpInst::ICMP_NE;
256 case ISD::SETLE:
257 return ICmpInst::ICMP_SLE;
258 case ISD::SETULE:
259 return ICmpInst::ICMP_ULE;
260 case ISD::SETGE:
261 return ICmpInst::ICMP_SGE;
262 case ISD::SETUGE:
263 return ICmpInst::ICMP_UGE;
264 case ISD::SETLT:
265 return ICmpInst::ICMP_SLT;
266 case ISD::SETULT:
267 return ICmpInst::ICMP_ULT;
268 case ISD::SETGT:
269 return ICmpInst::ICMP_SGT;
270 case ISD::SETUGT:
271 return ICmpInst::ICMP_UGT;
272 default:
273 llvm_unreachable("Invalid ISD integer condition code!");
274 }
275}
276
277static bool isNoopBitcast(Type *T1, Type *T2,
278 const TargetLoweringBase& TLI) {
279 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
280 (isa<VectorType>(Val: T1) && isa<VectorType>(Val: T2) &&
281 TLI.isTypeLegal(VT: EVT::getEVT(Ty: T1)) && TLI.isTypeLegal(VT: EVT::getEVT(Ty: T2)));
282}
283
284/// Look through operations that will be free to find the earliest source of
285/// this value.
286///
287/// @param ValLoc If V has aggregate type, we will be interested in a particular
288/// scalar component. This records its address; the reverse of this list gives a
289/// sequence of indices appropriate for an extractvalue to locate the important
290/// value. This value is updated during the function and on exit will indicate
291/// similar information for the Value returned.
292///
293/// @param DataBits If this function looks through truncate instructions, this
294/// will record the smallest size attained.
295static const Value *getNoopInput(const Value *V,
296 SmallVectorImpl<unsigned> &ValLoc,
297 unsigned &DataBits,
298 const TargetLoweringBase &TLI,
299 const DataLayout &DL) {
300 while (true) {
301 // Try to look through V1; if V1 is not an instruction, it can't be looked
302 // through.
303 const Instruction *I = dyn_cast<Instruction>(Val: V);
304 if (!I || I->getNumOperands() == 0) return V;
305 const Value *NoopInput = nullptr;
306
307 Value *Op = I->getOperand(i: 0);
308 if (isa<BitCastInst>(Val: I)) {
309 // Look through truly no-op bitcasts.
310 if (isNoopBitcast(T1: Op->getType(), T2: I->getType(), TLI))
311 NoopInput = Op;
312 } else if (isa<GetElementPtrInst>(Val: I)) {
313 // Look through getelementptr
314 if (cast<GetElementPtrInst>(Val: I)->hasAllZeroIndices())
315 NoopInput = Op;
316 } else if (isa<IntToPtrInst>(Val: I)) {
317 // Look through inttoptr.
318 // Make sure this isn't a truncating or extending cast. We could
319 // support this eventually, but don't bother for now.
320 if (!isa<VectorType>(Val: I->getType()) &&
321 DL.getPointerSizeInBits() ==
322 cast<IntegerType>(Val: Op->getType())->getBitWidth())
323 NoopInput = Op;
324 } else if (isa<PtrToIntInst>(Val: I)) {
325 // Look through ptrtoint.
326 // Make sure this isn't a truncating or extending cast. We could
327 // support this eventually, but don't bother for now.
328 if (!isa<VectorType>(Val: I->getType()) &&
329 DL.getPointerSizeInBits() ==
330 cast<IntegerType>(Val: I->getType())->getBitWidth())
331 NoopInput = Op;
332 } else if (isa<TruncInst>(Val: I) &&
333 TLI.allowTruncateForTailCall(FromTy: Op->getType(), ToTy: I->getType())) {
334 DataBits =
335 std::min(a: (uint64_t)DataBits,
336 b: I->getType()->getPrimitiveSizeInBits().getFixedValue());
337 NoopInput = Op;
338 } else if (auto *CB = dyn_cast<CallBase>(Val: I)) {
339 const Value *ReturnedOp = CB->getReturnedArgOperand();
340 if (ReturnedOp && isNoopBitcast(T1: ReturnedOp->getType(), T2: I->getType(), TLI))
341 NoopInput = ReturnedOp;
342 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Val: V)) {
343 // Value may come from either the aggregate or the scalar
344 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
345 if (ValLoc.size() >= InsertLoc.size() &&
346 std::equal(first1: InsertLoc.begin(), last1: InsertLoc.end(), first2: ValLoc.rbegin())) {
347 // The type being inserted is a nested sub-type of the aggregate; we
348 // have to remove those initial indices to get the location we're
349 // interested in for the operand.
350 ValLoc.resize(N: ValLoc.size() - InsertLoc.size());
351 NoopInput = IVI->getInsertedValueOperand();
352 } else {
353 // The struct we're inserting into has the value we're interested in, no
354 // change of address.
355 NoopInput = Op;
356 }
357 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val: V)) {
358 // The part we're interested in will inevitably be some sub-section of the
359 // previous aggregate. Combine the two paths to obtain the true address of
360 // our element.
361 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
362 ValLoc.append(in_start: ExtractLoc.rbegin(), in_end: ExtractLoc.rend());
363 NoopInput = Op;
364 }
365 // Terminate if we couldn't find anything to look through.
366 if (!NoopInput)
367 return V;
368
369 V = NoopInput;
370 }
371}
372
373/// Return true if this scalar return value only has bits discarded on its path
374/// from the "tail call" to the "ret". This includes the obvious noop
375/// instructions handled by getNoopInput above as well as free truncations (or
376/// extensions prior to the call).
377static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
378 SmallVectorImpl<unsigned> &RetIndices,
379 SmallVectorImpl<unsigned> &CallIndices,
380 bool AllowDifferingSizes,
381 const TargetLoweringBase &TLI,
382 const DataLayout &DL) {
383
384 // Trace the sub-value needed by the return value as far back up the graph as
385 // possible, in the hope that it will intersect with the value produced by the
386 // call. In the simple case with no "returned" attribute, the hope is actually
387 // that we end up back at the tail call instruction itself.
388 unsigned BitsRequired = UINT_MAX;
389 RetVal = getNoopInput(V: RetVal, ValLoc&: RetIndices, DataBits&: BitsRequired, TLI, DL);
390
391 // If this slot in the value returned is undef, it doesn't matter what the
392 // call puts there, it'll be fine.
393 if (isa<UndefValue>(Val: RetVal))
394 return true;
395
396 // Now do a similar search up through the graph to find where the value
397 // actually returned by the "tail call" comes from. In the simple case without
398 // a "returned" attribute, the search will be blocked immediately and the loop
399 // a Noop.
400 unsigned BitsProvided = UINT_MAX;
401 CallVal = getNoopInput(V: CallVal, ValLoc&: CallIndices, DataBits&: BitsProvided, TLI, DL);
402
403 // There's no hope if we can't actually trace them to (the same part of!) the
404 // same value.
405 if (CallVal != RetVal || CallIndices != RetIndices)
406 return false;
407
408 // However, intervening truncates may have made the call non-tail. Make sure
409 // all the bits that are needed by the "ret" have been provided by the "tail
410 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
411 // extensions too.
412 if (BitsProvided < BitsRequired ||
413 (!AllowDifferingSizes && BitsProvided != BitsRequired))
414 return false;
415
416 return true;
417}
418
419/// For an aggregate type, determine whether a given index is within bounds or
420/// not.
421static bool indexReallyValid(Type *T, unsigned Idx) {
422 if (ArrayType *AT = dyn_cast<ArrayType>(Val: T))
423 return Idx < AT->getNumElements();
424
425 return Idx < cast<StructType>(Val: T)->getNumElements();
426}
427
428/// Move the given iterators to the next leaf type in depth first traversal.
429///
430/// Performs a depth-first traversal of the type as specified by its arguments,
431/// stopping at the next leaf node (which may be a legitimate scalar type or an
432/// empty struct or array).
433///
434/// @param SubTypes List of the partial components making up the type from
435/// outermost to innermost non-empty aggregate. The element currently
436/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
437///
438/// @param Path Set of extractvalue indices leading from the outermost type
439/// (SubTypes[0]) to the leaf node currently represented.
440///
441/// @returns true if a new type was found, false otherwise. Calling this
442/// function again on a finished iterator will repeatedly return
443/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
444/// aggregate or a non-aggregate
445static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
446 SmallVectorImpl<unsigned> &Path) {
447 // First march back up the tree until we can successfully increment one of the
448 // coordinates in Path.
449 while (!Path.empty() && !indexReallyValid(T: SubTypes.back(), Idx: Path.back() + 1)) {
450 Path.pop_back();
451 SubTypes.pop_back();
452 }
453
454 // If we reached the top, then the iterator is done.
455 if (Path.empty())
456 return false;
457
458 // We know there's *some* valid leaf now, so march back down the tree picking
459 // out the left-most element at each node.
460 ++Path.back();
461 Type *DeeperType =
462 ExtractValueInst::getIndexedType(Agg: SubTypes.back(), Idxs: Path.back());
463 while (DeeperType->isAggregateType()) {
464 if (!indexReallyValid(T: DeeperType, Idx: 0))
465 return true;
466
467 SubTypes.push_back(Elt: DeeperType);
468 Path.push_back(Elt: 0);
469
470 DeeperType = ExtractValueInst::getIndexedType(Agg: DeeperType, Idxs: 0);
471 }
472
473 return true;
474}
475
476/// Find the first non-empty, scalar-like type in Next and setup the iterator
477/// components.
478///
479/// Assuming Next is an aggregate of some kind, this function will traverse the
480/// tree from left to right (i.e. depth-first) looking for the first
481/// non-aggregate type which will play a role in function return.
482///
483/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
484/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
485/// i32 in that type.
486static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
487 SmallVectorImpl<unsigned> &Path) {
488 // First initialise the iterator components to the first "leaf" node
489 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
490 // despite nominally being an aggregate).
491 while (Type *FirstInner = ExtractValueInst::getIndexedType(Agg: Next, Idxs: 0)) {
492 SubTypes.push_back(Elt: Next);
493 Path.push_back(Elt: 0);
494 Next = FirstInner;
495 }
496
497 // If there's no Path now, Next was originally scalar already (or empty
498 // leaf). We're done.
499 if (Path.empty())
500 return true;
501
502 // Otherwise, use normal iteration to keep looking through the tree until we
503 // find a non-aggregate type.
504 while (ExtractValueInst::getIndexedType(Agg: SubTypes.back(), Idxs: Path.back())
505 ->isAggregateType()) {
506 if (!advanceToNextLeafType(SubTypes, Path))
507 return false;
508 }
509
510 return true;
511}
512
513/// Set the iterator data-structures to the next non-empty, non-aggregate
514/// subtype.
515static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
516 SmallVectorImpl<unsigned> &Path) {
517 do {
518 if (!advanceToNextLeafType(SubTypes, Path))
519 return false;
520
521 assert(!Path.empty() && "found a leaf but didn't set the path?");
522 } while (ExtractValueInst::getIndexedType(Agg: SubTypes.back(), Idxs: Path.back())
523 ->isAggregateType());
524
525 return true;
526}
527
528
529/// Test if the given instruction is in a position to be optimized
530/// with a tail-call. This roughly means that it's in a block with
531/// a return and there's nothing that needs to be scheduled
532/// between it and the return.
533///
534/// This function only tests target-independent requirements.
535bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
536 const BasicBlock *ExitBB = Call.getParent();
537 const Instruction *Term = ExitBB->getTerminator();
538 const ReturnInst *Ret = dyn_cast<ReturnInst>(Val: Term);
539
540 // The block must end in a return statement or unreachable.
541 //
542 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
543 // an unreachable, for now. The way tailcall optimization is currently
544 // implemented means it will add an epilogue followed by a jump. That is
545 // not profitable. Also, if the callee is a special function (e.g.
546 // longjmp on x86), it can end up causing miscompilation that has not
547 // been fully understood.
548 if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
549 Call.getCallingConv() != CallingConv::Tail &&
550 Call.getCallingConv() != CallingConv::SwiftTail) ||
551 !isa<UnreachableInst>(Val: Term)))
552 return false;
553
554 // If I will have a chain, make sure no other instruction that will have a
555 // chain interposes between I and the return.
556 // Check for all calls including speculatable functions.
557 for (BasicBlock::const_iterator BBI = std::prev(x: ExitBB->end(), n: 2);; --BBI) {
558 if (&*BBI == &Call)
559 break;
560 // Debug info intrinsics do not get in the way of tail call optimization.
561 // Pseudo probe intrinsics do not block tail call optimization either.
562 if (BBI->isDebugOrPseudoInst())
563 continue;
564 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
565 // call optimization.
566 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Val&: BBI))
567 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
568 II->getIntrinsicID() == Intrinsic::assume ||
569 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
570 continue;
571 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
572 !isSafeToSpeculativelyExecute(I: &*BBI))
573 return false;
574 }
575
576 const Function *F = ExitBB->getParent();
577 return returnTypeIsEligibleForTailCall(
578 F, I: &Call, Ret, TLI: *TM.getSubtargetImpl(*F)->getTargetLowering());
579}
580
581bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
582 const ReturnInst *Ret,
583 const TargetLoweringBase &TLI,
584 bool *AllowDifferingSizes) {
585 // ADS may be null, so don't write to it directly.
586 bool DummyADS;
587 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
588 ADS = true;
589
590 AttrBuilder CallerAttrs(F->getContext(), F->getAttributes().getRetAttrs());
591 AttrBuilder CalleeAttrs(F->getContext(),
592 cast<CallInst>(Val: I)->getAttributes().getRetAttrs());
593
594 // Following attributes are completely benign as far as calling convention
595 // goes, they shouldn't affect whether the call is a tail call.
596 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
597 Attribute::DereferenceableOrNull, Attribute::NoAlias,
598 Attribute::NonNull, Attribute::NoUndef}) {
599 CallerAttrs.removeAttribute(Attr);
600 CalleeAttrs.removeAttribute(Attr);
601 }
602
603 if (CallerAttrs.contains(Attribute::ZExt)) {
604 if (!CalleeAttrs.contains(Attribute::ZExt))
605 return false;
606
607 ADS = false;
608 CallerAttrs.removeAttribute(Attribute::ZExt);
609 CalleeAttrs.removeAttribute(Attribute::ZExt);
610 } else if (CallerAttrs.contains(Attribute::SExt)) {
611 if (!CalleeAttrs.contains(Attribute::SExt))
612 return false;
613
614 ADS = false;
615 CallerAttrs.removeAttribute(Attribute::SExt);
616 CalleeAttrs.removeAttribute(Attribute::SExt);
617 }
618
619 // Drop sext and zext return attributes if the result is not used.
620 // This enables tail calls for code like:
621 //
622 // define void @caller() {
623 // entry:
624 // %unused_result = tail call zeroext i1 @callee()
625 // br label %retlabel
626 // retlabel:
627 // ret void
628 // }
629 if (I->use_empty()) {
630 CalleeAttrs.removeAttribute(Attribute::SExt);
631 CalleeAttrs.removeAttribute(Attribute::ZExt);
632 }
633
634 // If they're still different, there's some facet we don't understand
635 // (currently only "inreg", but in future who knows). It may be OK but the
636 // only safe option is to reject the tail call.
637 return CallerAttrs == CalleeAttrs;
638}
639
640/// Check whether B is a bitcast of a pointer type to another pointer type,
641/// which is equal to A.
642static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
643 assert(A && B && "Expected non-null inputs!");
644
645 auto *BitCastIn = dyn_cast<BitCastInst>(Val: B);
646
647 if (!BitCastIn)
648 return false;
649
650 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
651 return false;
652
653 return A == BitCastIn->getOperand(i_nocapture: 0);
654}
655
656bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
657 const Instruction *I,
658 const ReturnInst *Ret,
659 const TargetLoweringBase &TLI) {
660 // If the block ends with a void return or unreachable, it doesn't matter
661 // what the call's return type is.
662 if (!Ret || Ret->getNumOperands() == 0) return true;
663
664 // If the return value is undef, it doesn't matter what the call's
665 // return type is.
666 if (isa<UndefValue>(Val: Ret->getOperand(i_nocapture: 0))) return true;
667
668 // Make sure the attributes attached to each return are compatible.
669 bool AllowDifferingSizes;
670 if (!attributesPermitTailCall(F, I, Ret, TLI, AllowDifferingSizes: &AllowDifferingSizes))
671 return false;
672
673 const Value *RetVal = Ret->getOperand(i_nocapture: 0), *CallVal = I;
674 // Intrinsic like llvm.memcpy has no return value, but the expanded
675 // libcall may or may not have return value. On most platforms, it
676 // will be expanded as memcpy in libc, which returns the first
677 // argument. On other platforms like arm-none-eabi, memcpy may be
678 // expanded as library call without return value, like __aeabi_memcpy.
679 const CallInst *Call = cast<CallInst>(Val: I);
680 if (Function *F = Call->getCalledFunction()) {
681 Intrinsic::ID IID = F->getIntrinsicID();
682 if (((IID == Intrinsic::memcpy &&
683 TLI.getLibcallName(Call: RTLIB::MEMCPY) == StringRef("memcpy")) ||
684 (IID == Intrinsic::memmove &&
685 TLI.getLibcallName(Call: RTLIB::MEMMOVE) == StringRef("memmove")) ||
686 (IID == Intrinsic::memset &&
687 TLI.getLibcallName(Call: RTLIB::MEMSET) == StringRef("memset"))) &&
688 (RetVal == Call->getArgOperand(i: 0) ||
689 isPointerBitcastEqualTo(A: RetVal, B: Call->getArgOperand(i: 0))))
690 return true;
691 }
692
693 SmallVector<unsigned, 4> RetPath, CallPath;
694 SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
695
696 bool RetEmpty = !firstRealType(Next: RetVal->getType(), SubTypes&: RetSubTypes, Path&: RetPath);
697 bool CallEmpty = !firstRealType(Next: CallVal->getType(), SubTypes&: CallSubTypes, Path&: CallPath);
698
699 // Nothing's actually returned, it doesn't matter what the callee put there
700 // it's a valid tail call.
701 if (RetEmpty)
702 return true;
703
704 // Iterate pairwise through each of the value types making up the tail call
705 // and the corresponding return. For each one we want to know whether it's
706 // essentially going directly from the tail call to the ret, via operations
707 // that end up not generating any code.
708 //
709 // We allow a certain amount of covariance here. For example it's permitted
710 // for the tail call to define more bits than the ret actually cares about
711 // (e.g. via a truncate).
712 do {
713 if (CallEmpty) {
714 // We've exhausted the values produced by the tail call instruction, the
715 // rest are essentially undef. The type doesn't really matter, but we need
716 // *something*.
717 Type *SlotType =
718 ExtractValueInst::getIndexedType(Agg: RetSubTypes.back(), Idxs: RetPath.back());
719 CallVal = UndefValue::get(T: SlotType);
720 }
721
722 // The manipulations performed when we're looking through an insertvalue or
723 // an extractvalue would happen at the front of the RetPath list, so since
724 // we have to copy it anyway it's more efficient to create a reversed copy.
725 SmallVector<unsigned, 4> TmpRetPath(llvm::reverse(C&: RetPath));
726 SmallVector<unsigned, 4> TmpCallPath(llvm::reverse(C&: CallPath));
727
728 // Finally, we can check whether the value produced by the tail call at this
729 // index is compatible with the value we return.
730 if (!slotOnlyDiscardsData(RetVal, CallVal, RetIndices&: TmpRetPath, CallIndices&: TmpCallPath,
731 AllowDifferingSizes, TLI,
732 DL: F->getParent()->getDataLayout()))
733 return false;
734
735 CallEmpty = !nextRealType(SubTypes&: CallSubTypes, Path&: CallPath);
736 } while(nextRealType(SubTypes&: RetSubTypes, Path&: RetPath));
737
738 return true;
739}
740
741static void collectEHScopeMembers(
742 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
743 const MachineBasicBlock *MBB) {
744 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
745 while (!Worklist.empty()) {
746 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
747 // Don't follow blocks which start new scopes.
748 if (Visiting->isEHPad() && Visiting != MBB)
749 continue;
750
751 // Add this MBB to our scope.
752 auto P = EHScopeMembership.insert(KV: std::make_pair(x&: Visiting, y&: EHScope));
753
754 // Don't revisit blocks.
755 if (!P.second) {
756 assert(P.first->second == EHScope && "MBB is part of two scopes!");
757 continue;
758 }
759
760 // Returns are boundaries where scope transfer can occur, don't follow
761 // successors.
762 if (Visiting->isEHScopeReturnBlock())
763 continue;
764
765 append_range(C&: Worklist, R: Visiting->successors());
766 }
767}
768
769DenseMap<const MachineBasicBlock *, int>
770llvm::getEHScopeMembership(const MachineFunction &MF) {
771 DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
772
773 // We don't have anything to do if there aren't any EH pads.
774 if (!MF.hasEHScopes())
775 return EHScopeMembership;
776
777 int EntryBBNumber = MF.front().getNumber();
778 bool IsSEH = isAsynchronousEHPersonality(
779 Pers: classifyEHPersonality(Pers: MF.getFunction().getPersonalityFn()));
780
781 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
782 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
783 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
784 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
785 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
786 for (const MachineBasicBlock &MBB : MF) {
787 if (MBB.isEHScopeEntry()) {
788 EHScopeBlocks.push_back(Elt: &MBB);
789 } else if (IsSEH && MBB.isEHPad()) {
790 SEHCatchPads.push_back(Elt: &MBB);
791 } else if (MBB.pred_empty()) {
792 UnreachableBlocks.push_back(Elt: &MBB);
793 }
794
795 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
796
797 // CatchPads are not scopes for SEH so do not consider CatchRet to
798 // transfer control to another scope.
799 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
800 continue;
801
802 // FIXME: SEH CatchPads are not necessarily in the parent function:
803 // they could be inside a finally block.
804 const MachineBasicBlock *Successor = MBBI->getOperand(i: 0).getMBB();
805 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(i: 1).getMBB();
806 CatchRetSuccessors.push_back(
807 Elt: {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
808 }
809
810 // We don't have anything to do if there aren't any EH pads.
811 if (EHScopeBlocks.empty())
812 return EHScopeMembership;
813
814 // Identify all the basic blocks reachable from the function entry.
815 collectEHScopeMembers(EHScopeMembership, EHScope: EntryBBNumber, MBB: &MF.front());
816 // All blocks not part of a scope are in the parent function.
817 for (const MachineBasicBlock *MBB : UnreachableBlocks)
818 collectEHScopeMembers(EHScopeMembership, EHScope: EntryBBNumber, MBB);
819 // Next, identify all the blocks inside the scopes.
820 for (const MachineBasicBlock *MBB : EHScopeBlocks)
821 collectEHScopeMembers(EHScopeMembership, EHScope: MBB->getNumber(), MBB);
822 // SEH CatchPads aren't really scopes, handle them separately.
823 for (const MachineBasicBlock *MBB : SEHCatchPads)
824 collectEHScopeMembers(EHScopeMembership, EHScope: EntryBBNumber, MBB);
825 // Finally, identify all the targets of a catchret.
826 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
827 CatchRetSuccessors)
828 collectEHScopeMembers(EHScopeMembership, EHScope: CatchRetPair.second,
829 MBB: CatchRetPair.first);
830 return EHScopeMembership;
831}
832

source code of llvm/lib/CodeGen/Analysis.cpp