1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/Twine.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/IR/CFG.h"
27#include "llvm/IR/Constant.h"
28#include "llvm/IR/DerivedTypes.h"
29#include "llvm/IR/InstrTypes.h"
30#include "llvm/IR/Instruction.h"
31#include "llvm/IR/OperandTraits.h"
32#include "llvm/IR/Use.h"
33#include "llvm/IR/User.h"
34#include "llvm/Support/AtomicOrdering.h"
35#include "llvm/Support/ErrorHandling.h"
36#include <cassert>
37#include <cstddef>
38#include <cstdint>
39#include <iterator>
40#include <optional>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class ConstantInt;
48class DataLayout;
49class StringRef;
50class Type;
51class Value;
52class UnreachableInst;
53
54//===----------------------------------------------------------------------===//
55// AllocaInst Class
56//===----------------------------------------------------------------------===//
57
58/// an instruction to allocate memory on the stack
59class AllocaInst : public UnaryInstruction {
60 Type *AllocatedType;
61
62 using AlignmentField = AlignmentBitfieldElementT<0>;
63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
66 SwiftErrorField>(),
67 "Bitfields must be contiguous");
68
69protected:
70 // Note: Instruction needs to be a friend here to call cloneImpl.
71 friend class Instruction;
72
73 AllocaInst *cloneImpl() const;
74
75public:
76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
77 const Twine &Name, BasicBlock::iterator InsertBefore);
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 BasicBlock::iterator InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
86 Instruction *InsertBefore);
87 AllocaInst(Type *Ty, unsigned AddrSpace,
88 const Twine &Name, BasicBlock *InsertAtEnd);
89
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock::iterator);
92 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
93 const Twine &Name = "", Instruction *InsertBefore = nullptr);
94 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
95 const Twine &Name, BasicBlock *InsertAtEnd);
96
97 /// Return true if there is an allocation size parameter to the allocation
98 /// instruction that is not 1.
99 bool isArrayAllocation() const;
100
101 /// Get the number of elements allocated. For a simple allocation of a single
102 /// element, this will return a constant 1 value.
103 const Value *getArraySize() const { return getOperand(i_nocapture: 0); }
104 Value *getArraySize() { return getOperand(i_nocapture: 0); }
105
106 /// Overload to return most specific pointer type.
107 PointerType *getType() const {
108 return cast<PointerType>(Val: Instruction::getType());
109 }
110
111 /// Return the address space for the allocation.
112 unsigned getAddressSpace() const {
113 return getType()->getAddressSpace();
114 }
115
116 /// Get allocation size in bytes. Returns std::nullopt if size can't be
117 /// determined, e.g. in case of a VLA.
118 std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const;
119
120 /// Get allocation size in bits. Returns std::nullopt if size can't be
121 /// determined, e.g. in case of a VLA.
122 std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
123
124 /// Return the type that is being allocated by the instruction.
125 Type *getAllocatedType() const { return AllocatedType; }
126 /// for use only in special circumstances that need to generically
127 /// transform a whole instruction (eg: IR linking and vectorization).
128 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
129
130 /// Return the alignment of the memory that is being allocated by the
131 /// instruction.
132 Align getAlign() const {
133 return Align(1ULL << getSubclassData<AlignmentField>());
134 }
135
136 void setAlignment(Align Align) {
137 setSubclassData<AlignmentField>(Log2(A: Align));
138 }
139
140 /// Return true if this alloca is in the entry block of the function and is a
141 /// constant size. If so, the code generator will fold it into the
142 /// prolog/epilog code, so it is basically free.
143 bool isStaticAlloca() const;
144
145 /// Return true if this alloca is used as an inalloca argument to a call. Such
146 /// allocas are never considered static even if they are in the entry block.
147 bool isUsedWithInAlloca() const {
148 return getSubclassData<UsedWithInAllocaField>();
149 }
150
151 /// Specify whether this alloca is used to represent the arguments to a call.
152 void setUsedWithInAlloca(bool V) {
153 setSubclassData<UsedWithInAllocaField>(V);
154 }
155
156 /// Return true if this alloca is used as a swifterror argument to a call.
157 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
158 /// Specify whether this alloca is used to represent a swifterror.
159 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
160
161 // Methods for support type inquiry through isa, cast, and dyn_cast:
162 static bool classof(const Instruction *I) {
163 return (I->getOpcode() == Instruction::Alloca);
164 }
165 static bool classof(const Value *V) {
166 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
167 }
168
169private:
170 // Shadow Instruction::setInstructionSubclassData with a private forwarding
171 // method so that subclasses cannot accidentally use it.
172 template <typename Bitfield>
173 void setSubclassData(typename Bitfield::Type Value) {
174 Instruction::setSubclassData<Bitfield>(Value);
175 }
176};
177
178//===----------------------------------------------------------------------===//
179// LoadInst Class
180//===----------------------------------------------------------------------===//
181
182/// An instruction for reading from memory. This uses the SubclassData field in
183/// Value to store whether or not the load is volatile.
184class LoadInst : public UnaryInstruction {
185 using VolatileField = BoolBitfieldElementT<0>;
186 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
187 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
188 static_assert(
189 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
190 "Bitfields must be contiguous");
191
192 void AssertOK();
193
194protected:
195 // Note: Instruction needs to be a friend here to call cloneImpl.
196 friend class Instruction;
197
198 LoadInst *cloneImpl() const;
199
200public:
201 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
202 BasicBlock::iterator InsertBefore);
203 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
204 Instruction *InsertBefore);
205 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 BasicBlock::iterator InsertBefore);
208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209 Instruction *InsertBefore);
210 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
211 BasicBlock *InsertAtEnd);
212 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
213 Align Align, BasicBlock::iterator InsertBefore);
214 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
215 Align Align, Instruction *InsertBefore = nullptr);
216 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
217 Align Align, BasicBlock *InsertAtEnd);
218 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
219 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
220 BasicBlock::iterator InsertBefore);
221 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
222 Align Align, AtomicOrdering Order,
223 SyncScope::ID SSID = SyncScope::System,
224 Instruction *InsertBefore = nullptr);
225 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
226 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
227 BasicBlock *InsertAtEnd);
228
229 /// Return true if this is a load from a volatile memory location.
230 bool isVolatile() const { return getSubclassData<VolatileField>(); }
231
232 /// Specify whether this is a volatile load or not.
233 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
234
235 /// Return the alignment of the access that is being performed.
236 Align getAlign() const {
237 return Align(1ULL << (getSubclassData<AlignmentField>()));
238 }
239
240 void setAlignment(Align Align) {
241 setSubclassData<AlignmentField>(Log2(A: Align));
242 }
243
244 /// Returns the ordering constraint of this load instruction.
245 AtomicOrdering getOrdering() const {
246 return getSubclassData<OrderingField>();
247 }
248 /// Sets the ordering constraint of this load instruction. May not be Release
249 /// or AcquireRelease.
250 void setOrdering(AtomicOrdering Ordering) {
251 setSubclassData<OrderingField>(Ordering);
252 }
253
254 /// Returns the synchronization scope ID of this load instruction.
255 SyncScope::ID getSyncScopeID() const {
256 return SSID;
257 }
258
259 /// Sets the synchronization scope ID of this load instruction.
260 void setSyncScopeID(SyncScope::ID SSID) {
261 this->SSID = SSID;
262 }
263
264 /// Sets the ordering constraint and the synchronization scope ID of this load
265 /// instruction.
266 void setAtomic(AtomicOrdering Ordering,
267 SyncScope::ID SSID = SyncScope::System) {
268 setOrdering(Ordering);
269 setSyncScopeID(SSID);
270 }
271
272 bool isSimple() const { return !isAtomic() && !isVolatile(); }
273
274 bool isUnordered() const {
275 return (getOrdering() == AtomicOrdering::NotAtomic ||
276 getOrdering() == AtomicOrdering::Unordered) &&
277 !isVolatile();
278 }
279
280 Value *getPointerOperand() { return getOperand(i_nocapture: 0); }
281 const Value *getPointerOperand() const { return getOperand(i_nocapture: 0); }
282 static unsigned getPointerOperandIndex() { return 0U; }
283 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
284
285 /// Returns the address space of the pointer operand.
286 unsigned getPointerAddressSpace() const {
287 return getPointerOperandType()->getPointerAddressSpace();
288 }
289
290 // Methods for support type inquiry through isa, cast, and dyn_cast:
291 static bool classof(const Instruction *I) {
292 return I->getOpcode() == Instruction::Load;
293 }
294 static bool classof(const Value *V) {
295 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
296 }
297
298private:
299 // Shadow Instruction::setInstructionSubclassData with a private forwarding
300 // method so that subclasses cannot accidentally use it.
301 template <typename Bitfield>
302 void setSubclassData(typename Bitfield::Type Value) {
303 Instruction::setSubclassData<Bitfield>(Value);
304 }
305
306 /// The synchronization scope ID of this load instruction. Not quite enough
307 /// room in SubClassData for everything, so synchronization scope ID gets its
308 /// own field.
309 SyncScope::ID SSID;
310};
311
312//===----------------------------------------------------------------------===//
313// StoreInst Class
314//===----------------------------------------------------------------------===//
315
316/// An instruction for storing to memory.
317class StoreInst : public Instruction {
318 using VolatileField = BoolBitfieldElementT<0>;
319 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
320 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
321 static_assert(
322 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
323 "Bitfields must be contiguous");
324
325 void AssertOK();
326
327protected:
328 // Note: Instruction needs to be a friend here to call cloneImpl.
329 friend class Instruction;
330
331 StoreInst *cloneImpl() const;
332
333public:
334 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
335 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
336 StoreInst(Value *Val, Value *Ptr, BasicBlock::iterator InsertBefore);
337 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
338 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
339 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
340 BasicBlock::iterator InsertBefore);
341 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
342 Instruction *InsertBefore = nullptr);
343 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
344 BasicBlock *InsertAtEnd);
345 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
346 BasicBlock::iterator InsertBefore);
347 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
348 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
349 Instruction *InsertBefore = nullptr);
350 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
351 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
352 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
353 AtomicOrdering Order, SyncScope::ID SSID,
354 BasicBlock::iterator InsertBefore);
355
356 // allocate space for exactly two operands
357 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
358 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
359
360 /// Return true if this is a store to a volatile memory location.
361 bool isVolatile() const { return getSubclassData<VolatileField>(); }
362
363 /// Specify whether this is a volatile store or not.
364 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
365
366 /// Transparently provide more efficient getOperand methods.
367 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
368
369 Align getAlign() const {
370 return Align(1ULL << (getSubclassData<AlignmentField>()));
371 }
372
373 void setAlignment(Align Align) {
374 setSubclassData<AlignmentField>(Log2(A: Align));
375 }
376
377 /// Returns the ordering constraint of this store instruction.
378 AtomicOrdering getOrdering() const {
379 return getSubclassData<OrderingField>();
380 }
381
382 /// Sets the ordering constraint of this store instruction. May not be
383 /// Acquire or AcquireRelease.
384 void setOrdering(AtomicOrdering Ordering) {
385 setSubclassData<OrderingField>(Ordering);
386 }
387
388 /// Returns the synchronization scope ID of this store instruction.
389 SyncScope::ID getSyncScopeID() const {
390 return SSID;
391 }
392
393 /// Sets the synchronization scope ID of this store instruction.
394 void setSyncScopeID(SyncScope::ID SSID) {
395 this->SSID = SSID;
396 }
397
398 /// Sets the ordering constraint and the synchronization scope ID of this
399 /// store instruction.
400 void setAtomic(AtomicOrdering Ordering,
401 SyncScope::ID SSID = SyncScope::System) {
402 setOrdering(Ordering);
403 setSyncScopeID(SSID);
404 }
405
406 bool isSimple() const { return !isAtomic() && !isVolatile(); }
407
408 bool isUnordered() const {
409 return (getOrdering() == AtomicOrdering::NotAtomic ||
410 getOrdering() == AtomicOrdering::Unordered) &&
411 !isVolatile();
412 }
413
414 Value *getValueOperand() { return getOperand(0); }
415 const Value *getValueOperand() const { return getOperand(0); }
416
417 Value *getPointerOperand() { return getOperand(1); }
418 const Value *getPointerOperand() const { return getOperand(1); }
419 static unsigned getPointerOperandIndex() { return 1U; }
420 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
421
422 /// Returns the address space of the pointer operand.
423 unsigned getPointerAddressSpace() const {
424 return getPointerOperandType()->getPointerAddressSpace();
425 }
426
427 // Methods for support type inquiry through isa, cast, and dyn_cast:
428 static bool classof(const Instruction *I) {
429 return I->getOpcode() == Instruction::Store;
430 }
431 static bool classof(const Value *V) {
432 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
433 }
434
435private:
436 // Shadow Instruction::setInstructionSubclassData with a private forwarding
437 // method so that subclasses cannot accidentally use it.
438 template <typename Bitfield>
439 void setSubclassData(typename Bitfield::Type Value) {
440 Instruction::setSubclassData<Bitfield>(Value);
441 }
442
443 /// The synchronization scope ID of this store instruction. Not quite enough
444 /// room in SubClassData for everything, so synchronization scope ID gets its
445 /// own field.
446 SyncScope::ID SSID;
447};
448
449template <>
450struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
451};
452
453DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
454
455//===----------------------------------------------------------------------===//
456// FenceInst Class
457//===----------------------------------------------------------------------===//
458
459/// An instruction for ordering other memory operations.
460class FenceInst : public Instruction {
461 using OrderingField = AtomicOrderingBitfieldElementT<0>;
462
463 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
464
465protected:
466 // Note: Instruction needs to be a friend here to call cloneImpl.
467 friend class Instruction;
468
469 FenceInst *cloneImpl() const;
470
471public:
472 // Ordering may only be Acquire, Release, AcquireRelease, or
473 // SequentiallyConsistent.
474 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
475 BasicBlock::iterator InsertBefore);
476 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
477 SyncScope::ID SSID = SyncScope::System,
478 Instruction *InsertBefore = nullptr);
479 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
480 BasicBlock *InsertAtEnd);
481
482 // allocate space for exactly zero operands
483 void *operator new(size_t S) { return User::operator new(Size: S, Us: 0); }
484 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
485
486 /// Returns the ordering constraint of this fence instruction.
487 AtomicOrdering getOrdering() const {
488 return getSubclassData<OrderingField>();
489 }
490
491 /// Sets the ordering constraint of this fence instruction. May only be
492 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
493 void setOrdering(AtomicOrdering Ordering) {
494 setSubclassData<OrderingField>(Ordering);
495 }
496
497 /// Returns the synchronization scope ID of this fence instruction.
498 SyncScope::ID getSyncScopeID() const {
499 return SSID;
500 }
501
502 /// Sets the synchronization scope ID of this fence instruction.
503 void setSyncScopeID(SyncScope::ID SSID) {
504 this->SSID = SSID;
505 }
506
507 // Methods for support type inquiry through isa, cast, and dyn_cast:
508 static bool classof(const Instruction *I) {
509 return I->getOpcode() == Instruction::Fence;
510 }
511 static bool classof(const Value *V) {
512 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
513 }
514
515private:
516 // Shadow Instruction::setInstructionSubclassData with a private forwarding
517 // method so that subclasses cannot accidentally use it.
518 template <typename Bitfield>
519 void setSubclassData(typename Bitfield::Type Value) {
520 Instruction::setSubclassData<Bitfield>(Value);
521 }
522
523 /// The synchronization scope ID of this fence instruction. Not quite enough
524 /// room in SubClassData for everything, so synchronization scope ID gets its
525 /// own field.
526 SyncScope::ID SSID;
527};
528
529//===----------------------------------------------------------------------===//
530// AtomicCmpXchgInst Class
531//===----------------------------------------------------------------------===//
532
533/// An instruction that atomically checks whether a
534/// specified value is in a memory location, and, if it is, stores a new value
535/// there. The value returned by this instruction is a pair containing the
536/// original value as first element, and an i1 indicating success (true) or
537/// failure (false) as second element.
538///
539class AtomicCmpXchgInst : public Instruction {
540 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
541 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
542 SyncScope::ID SSID);
543
544 template <unsigned Offset>
545 using AtomicOrderingBitfieldElement =
546 typename Bitfield::Element<AtomicOrdering, Offset, 3,
547 AtomicOrdering::LAST>;
548
549protected:
550 // Note: Instruction needs to be a friend here to call cloneImpl.
551 friend class Instruction;
552
553 AtomicCmpXchgInst *cloneImpl() const;
554
555public:
556 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
557 AtomicOrdering SuccessOrdering,
558 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
559 BasicBlock::iterator InsertBefore);
560 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
561 AtomicOrdering SuccessOrdering,
562 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
563 Instruction *InsertBefore = nullptr);
564 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
565 AtomicOrdering SuccessOrdering,
566 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
567 BasicBlock *InsertAtEnd);
568
569 // allocate space for exactly three operands
570 void *operator new(size_t S) { return User::operator new(Size: S, Us: 3); }
571 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
572
573 using VolatileField = BoolBitfieldElementT<0>;
574 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
575 using SuccessOrderingField =
576 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
577 using FailureOrderingField =
578 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
579 using AlignmentField =
580 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
581 static_assert(
582 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
583 FailureOrderingField, AlignmentField>(),
584 "Bitfields must be contiguous");
585
586 /// Return the alignment of the memory that is being allocated by the
587 /// instruction.
588 Align getAlign() const {
589 return Align(1ULL << getSubclassData<AlignmentField>());
590 }
591
592 void setAlignment(Align Align) {
593 setSubclassData<AlignmentField>(Log2(A: Align));
594 }
595
596 /// Return true if this is a cmpxchg from a volatile memory
597 /// location.
598 ///
599 bool isVolatile() const { return getSubclassData<VolatileField>(); }
600
601 /// Specify whether this is a volatile cmpxchg.
602 ///
603 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
604
605 /// Return true if this cmpxchg may spuriously fail.
606 bool isWeak() const { return getSubclassData<WeakField>(); }
607
608 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
609
610 /// Transparently provide more efficient getOperand methods.
611 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
612
613 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
614 return Ordering != AtomicOrdering::NotAtomic &&
615 Ordering != AtomicOrdering::Unordered;
616 }
617
618 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
619 return Ordering != AtomicOrdering::NotAtomic &&
620 Ordering != AtomicOrdering::Unordered &&
621 Ordering != AtomicOrdering::AcquireRelease &&
622 Ordering != AtomicOrdering::Release;
623 }
624
625 /// Returns the success ordering constraint of this cmpxchg instruction.
626 AtomicOrdering getSuccessOrdering() const {
627 return getSubclassData<SuccessOrderingField>();
628 }
629
630 /// Sets the success ordering constraint of this cmpxchg instruction.
631 void setSuccessOrdering(AtomicOrdering Ordering) {
632 assert(isValidSuccessOrdering(Ordering) &&
633 "invalid CmpXchg success ordering");
634 setSubclassData<SuccessOrderingField>(Ordering);
635 }
636
637 /// Returns the failure ordering constraint of this cmpxchg instruction.
638 AtomicOrdering getFailureOrdering() const {
639 return getSubclassData<FailureOrderingField>();
640 }
641
642 /// Sets the failure ordering constraint of this cmpxchg instruction.
643 void setFailureOrdering(AtomicOrdering Ordering) {
644 assert(isValidFailureOrdering(Ordering) &&
645 "invalid CmpXchg failure ordering");
646 setSubclassData<FailureOrderingField>(Ordering);
647 }
648
649 /// Returns a single ordering which is at least as strong as both the
650 /// success and failure orderings for this cmpxchg.
651 AtomicOrdering getMergedOrdering() const {
652 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
653 return AtomicOrdering::SequentiallyConsistent;
654 if (getFailureOrdering() == AtomicOrdering::Acquire) {
655 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
656 return AtomicOrdering::Acquire;
657 if (getSuccessOrdering() == AtomicOrdering::Release)
658 return AtomicOrdering::AcquireRelease;
659 }
660 return getSuccessOrdering();
661 }
662
663 /// Returns the synchronization scope ID of this cmpxchg instruction.
664 SyncScope::ID getSyncScopeID() const {
665 return SSID;
666 }
667
668 /// Sets the synchronization scope ID of this cmpxchg instruction.
669 void setSyncScopeID(SyncScope::ID SSID) {
670 this->SSID = SSID;
671 }
672
673 Value *getPointerOperand() { return getOperand(0); }
674 const Value *getPointerOperand() const { return getOperand(0); }
675 static unsigned getPointerOperandIndex() { return 0U; }
676
677 Value *getCompareOperand() { return getOperand(1); }
678 const Value *getCompareOperand() const { return getOperand(1); }
679
680 Value *getNewValOperand() { return getOperand(2); }
681 const Value *getNewValOperand() const { return getOperand(2); }
682
683 /// Returns the address space of the pointer operand.
684 unsigned getPointerAddressSpace() const {
685 return getPointerOperand()->getType()->getPointerAddressSpace();
686 }
687
688 /// Returns the strongest permitted ordering on failure, given the
689 /// desired ordering on success.
690 ///
691 /// If the comparison in a cmpxchg operation fails, there is no atomic store
692 /// so release semantics cannot be provided. So this function drops explicit
693 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
694 /// operation would remain SequentiallyConsistent.
695 static AtomicOrdering
696 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
697 switch (SuccessOrdering) {
698 default:
699 llvm_unreachable("invalid cmpxchg success ordering");
700 case AtomicOrdering::Release:
701 case AtomicOrdering::Monotonic:
702 return AtomicOrdering::Monotonic;
703 case AtomicOrdering::AcquireRelease:
704 case AtomicOrdering::Acquire:
705 return AtomicOrdering::Acquire;
706 case AtomicOrdering::SequentiallyConsistent:
707 return AtomicOrdering::SequentiallyConsistent;
708 }
709 }
710
711 // Methods for support type inquiry through isa, cast, and dyn_cast:
712 static bool classof(const Instruction *I) {
713 return I->getOpcode() == Instruction::AtomicCmpXchg;
714 }
715 static bool classof(const Value *V) {
716 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
717 }
718
719private:
720 // Shadow Instruction::setInstructionSubclassData with a private forwarding
721 // method so that subclasses cannot accidentally use it.
722 template <typename Bitfield>
723 void setSubclassData(typename Bitfield::Type Value) {
724 Instruction::setSubclassData<Bitfield>(Value);
725 }
726
727 /// The synchronization scope ID of this cmpxchg instruction. Not quite
728 /// enough room in SubClassData for everything, so synchronization scope ID
729 /// gets its own field.
730 SyncScope::ID SSID;
731};
732
733template <>
734struct OperandTraits<AtomicCmpXchgInst> :
735 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
736};
737
738DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
739
740//===----------------------------------------------------------------------===//
741// AtomicRMWInst Class
742//===----------------------------------------------------------------------===//
743
744/// an instruction that atomically reads a memory location,
745/// combines it with another value, and then stores the result back. Returns
746/// the old value.
747///
748class AtomicRMWInst : public Instruction {
749protected:
750 // Note: Instruction needs to be a friend here to call cloneImpl.
751 friend class Instruction;
752
753 AtomicRMWInst *cloneImpl() const;
754
755public:
756 /// This enumeration lists the possible modifications atomicrmw can make. In
757 /// the descriptions, 'p' is the pointer to the instruction's memory location,
758 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
759 /// instruction. These instructions always return 'old'.
760 enum BinOp : unsigned {
761 /// *p = v
762 Xchg,
763 /// *p = old + v
764 Add,
765 /// *p = old - v
766 Sub,
767 /// *p = old & v
768 And,
769 /// *p = ~(old & v)
770 Nand,
771 /// *p = old | v
772 Or,
773 /// *p = old ^ v
774 Xor,
775 /// *p = old >signed v ? old : v
776 Max,
777 /// *p = old <signed v ? old : v
778 Min,
779 /// *p = old >unsigned v ? old : v
780 UMax,
781 /// *p = old <unsigned v ? old : v
782 UMin,
783
784 /// *p = old + v
785 FAdd,
786
787 /// *p = old - v
788 FSub,
789
790 /// *p = maxnum(old, v)
791 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
792 FMax,
793
794 /// *p = minnum(old, v)
795 /// \p minnum matches the behavior of \p llvm.minnum.*.
796 FMin,
797
798 /// Increment one up to a maximum value.
799 /// *p = (old u>= v) ? 0 : (old + 1)
800 UIncWrap,
801
802 /// Decrement one until a minimum value or zero.
803 /// *p = ((old == 0) || (old u> v)) ? v : (old - 1)
804 UDecWrap,
805
806 FIRST_BINOP = Xchg,
807 LAST_BINOP = UDecWrap,
808 BAD_BINOP
809 };
810
811private:
812 template <unsigned Offset>
813 using AtomicOrderingBitfieldElement =
814 typename Bitfield::Element<AtomicOrdering, Offset, 3,
815 AtomicOrdering::LAST>;
816
817 template <unsigned Offset>
818 using BinOpBitfieldElement =
819 typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>;
820
821public:
822 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
823 AtomicOrdering Ordering, SyncScope::ID SSID,
824 BasicBlock::iterator InsertBefore);
825 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
826 AtomicOrdering Ordering, SyncScope::ID SSID,
827 Instruction *InsertBefore = nullptr);
828 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
829 AtomicOrdering Ordering, SyncScope::ID SSID,
830 BasicBlock *InsertAtEnd);
831
832 // allocate space for exactly two operands
833 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
834 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
835
836 using VolatileField = BoolBitfieldElementT<0>;
837 using AtomicOrderingField =
838 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
839 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
840 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
841 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
842 OperationField, AlignmentField>(),
843 "Bitfields must be contiguous");
844
845 BinOp getOperation() const { return getSubclassData<OperationField>(); }
846
847 static StringRef getOperationName(BinOp Op);
848
849 static bool isFPOperation(BinOp Op) {
850 switch (Op) {
851 case AtomicRMWInst::FAdd:
852 case AtomicRMWInst::FSub:
853 case AtomicRMWInst::FMax:
854 case AtomicRMWInst::FMin:
855 return true;
856 default:
857 return false;
858 }
859 }
860
861 void setOperation(BinOp Operation) {
862 setSubclassData<OperationField>(Operation);
863 }
864
865 /// Return the alignment of the memory that is being allocated by the
866 /// instruction.
867 Align getAlign() const {
868 return Align(1ULL << getSubclassData<AlignmentField>());
869 }
870
871 void setAlignment(Align Align) {
872 setSubclassData<AlignmentField>(Log2(A: Align));
873 }
874
875 /// Return true if this is a RMW on a volatile memory location.
876 ///
877 bool isVolatile() const { return getSubclassData<VolatileField>(); }
878
879 /// Specify whether this is a volatile RMW or not.
880 ///
881 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
882
883 /// Transparently provide more efficient getOperand methods.
884 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
885
886 /// Returns the ordering constraint of this rmw instruction.
887 AtomicOrdering getOrdering() const {
888 return getSubclassData<AtomicOrderingField>();
889 }
890
891 /// Sets the ordering constraint of this rmw instruction.
892 void setOrdering(AtomicOrdering Ordering) {
893 assert(Ordering != AtomicOrdering::NotAtomic &&
894 "atomicrmw instructions can only be atomic.");
895 assert(Ordering != AtomicOrdering::Unordered &&
896 "atomicrmw instructions cannot be unordered.");
897 setSubclassData<AtomicOrderingField>(Ordering);
898 }
899
900 /// Returns the synchronization scope ID of this rmw instruction.
901 SyncScope::ID getSyncScopeID() const {
902 return SSID;
903 }
904
905 /// Sets the synchronization scope ID of this rmw instruction.
906 void setSyncScopeID(SyncScope::ID SSID) {
907 this->SSID = SSID;
908 }
909
910 Value *getPointerOperand() { return getOperand(0); }
911 const Value *getPointerOperand() const { return getOperand(0); }
912 static unsigned getPointerOperandIndex() { return 0U; }
913
914 Value *getValOperand() { return getOperand(1); }
915 const Value *getValOperand() const { return getOperand(1); }
916
917 /// Returns the address space of the pointer operand.
918 unsigned getPointerAddressSpace() const {
919 return getPointerOperand()->getType()->getPointerAddressSpace();
920 }
921
922 bool isFloatingPointOperation() const {
923 return isFPOperation(Op: getOperation());
924 }
925
926 // Methods for support type inquiry through isa, cast, and dyn_cast:
927 static bool classof(const Instruction *I) {
928 return I->getOpcode() == Instruction::AtomicRMW;
929 }
930 static bool classof(const Value *V) {
931 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
932 }
933
934private:
935 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
936 AtomicOrdering Ordering, SyncScope::ID SSID);
937
938 // Shadow Instruction::setInstructionSubclassData with a private forwarding
939 // method so that subclasses cannot accidentally use it.
940 template <typename Bitfield>
941 void setSubclassData(typename Bitfield::Type Value) {
942 Instruction::setSubclassData<Bitfield>(Value);
943 }
944
945 /// The synchronization scope ID of this rmw instruction. Not quite enough
946 /// room in SubClassData for everything, so synchronization scope ID gets its
947 /// own field.
948 SyncScope::ID SSID;
949};
950
951template <>
952struct OperandTraits<AtomicRMWInst>
953 : public FixedNumOperandTraits<AtomicRMWInst,2> {
954};
955
956DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
957
958//===----------------------------------------------------------------------===//
959// GetElementPtrInst Class
960//===----------------------------------------------------------------------===//
961
962// checkGEPType - Simple wrapper function to give a better assertion failure
963// message on bad indexes for a gep instruction.
964//
965inline Type *checkGEPType(Type *Ty) {
966 assert(Ty && "Invalid GetElementPtrInst indices for type!");
967 return Ty;
968}
969
970/// an instruction for type-safe pointer arithmetic to
971/// access elements of arrays and structs
972///
973class GetElementPtrInst : public Instruction {
974 Type *SourceElementType;
975 Type *ResultElementType;
976
977 GetElementPtrInst(const GetElementPtrInst &GEPI);
978
979 /// Constructors - Create a getelementptr instruction with a base pointer an
980 /// list of indices. The first and second ctor can optionally insert before an
981 /// existing instruction, the third appends the new instruction to the
982 /// specified BasicBlock.
983 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
984 ArrayRef<Value *> IdxList, unsigned Values,
985 const Twine &NameStr,
986 BasicBlock::iterator InsertBefore);
987 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
988 ArrayRef<Value *> IdxList, unsigned Values,
989 const Twine &NameStr, Instruction *InsertBefore);
990 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
991 ArrayRef<Value *> IdxList, unsigned Values,
992 const Twine &NameStr, BasicBlock *InsertAtEnd);
993
994 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
995
996protected:
997 // Note: Instruction needs to be a friend here to call cloneImpl.
998 friend class Instruction;
999
1000 GetElementPtrInst *cloneImpl() const;
1001
1002public:
1003 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1004 ArrayRef<Value *> IdxList,
1005 const Twine &NameStr,
1006 BasicBlock::iterator InsertBefore) {
1007 unsigned Values = 1 + unsigned(IdxList.size());
1008 assert(PointeeType && "Must specify element type");
1009 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
1010 NameStr, InsertBefore);
1011 }
1012
1013 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1014 ArrayRef<Value *> IdxList,
1015 const Twine &NameStr = "",
1016 Instruction *InsertBefore = nullptr) {
1017 unsigned Values = 1 + unsigned(IdxList.size());
1018 assert(PointeeType && "Must specify element type");
1019 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
1020 NameStr, InsertBefore);
1021 }
1022
1023 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
1024 ArrayRef<Value *> IdxList,
1025 const Twine &NameStr,
1026 BasicBlock *InsertAtEnd) {
1027 unsigned Values = 1 + unsigned(IdxList.size());
1028 assert(PointeeType && "Must specify element type");
1029 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
1030 NameStr, InsertAtEnd);
1031 }
1032
1033 /// Create an "inbounds" getelementptr. See the documentation for the
1034 /// "inbounds" flag in LangRef.html for details.
1035 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1036 ArrayRef<Value *> IdxList,
1037 const Twine &NameStr,
1038 BasicBlock::iterator InsertBefore) {
1039 GetElementPtrInst *GEP =
1040 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1041 GEP->setIsInBounds(true);
1042 return GEP;
1043 }
1044
1045 static GetElementPtrInst *
1046 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
1047 const Twine &NameStr = "",
1048 Instruction *InsertBefore = nullptr) {
1049 GetElementPtrInst *GEP =
1050 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1051 GEP->setIsInBounds(true);
1052 return GEP;
1053 }
1054
1055 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1056 ArrayRef<Value *> IdxList,
1057 const Twine &NameStr,
1058 BasicBlock *InsertAtEnd) {
1059 GetElementPtrInst *GEP =
1060 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1061 GEP->setIsInBounds(true);
1062 return GEP;
1063 }
1064
1065 /// Transparently provide more efficient getOperand methods.
1066 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1067
1068 Type *getSourceElementType() const { return SourceElementType; }
1069
1070 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1071 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1072
1073 Type *getResultElementType() const {
1074 return ResultElementType;
1075 }
1076
1077 /// Returns the address space of this instruction's pointer type.
1078 unsigned getAddressSpace() const {
1079 // Note that this is always the same as the pointer operand's address space
1080 // and that is cheaper to compute, so cheat here.
1081 return getPointerAddressSpace();
1082 }
1083
1084 /// Returns the result type of a getelementptr with the given source
1085 /// element type and indexes.
1086 ///
1087 /// Null is returned if the indices are invalid for the specified
1088 /// source element type.
1089 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1090 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1091 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1092
1093 /// Return the type of the element at the given index of an indexable
1094 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1095 ///
1096 /// Returns null if the type can't be indexed, or the given index is not
1097 /// legal for the given type.
1098 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1099 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1100
1101 inline op_iterator idx_begin() { return op_begin()+1; }
1102 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1103 inline op_iterator idx_end() { return op_end(); }
1104 inline const_op_iterator idx_end() const { return op_end(); }
1105
1106 inline iterator_range<op_iterator> indices() {
1107 return make_range(x: idx_begin(), y: idx_end());
1108 }
1109
1110 inline iterator_range<const_op_iterator> indices() const {
1111 return make_range(x: idx_begin(), y: idx_end());
1112 }
1113
1114 Value *getPointerOperand() {
1115 return getOperand(0);
1116 }
1117 const Value *getPointerOperand() const {
1118 return getOperand(0);
1119 }
1120 static unsigned getPointerOperandIndex() {
1121 return 0U; // get index for modifying correct operand.
1122 }
1123
1124 /// Method to return the pointer operand as a
1125 /// PointerType.
1126 Type *getPointerOperandType() const {
1127 return getPointerOperand()->getType();
1128 }
1129
1130 /// Returns the address space of the pointer operand.
1131 unsigned getPointerAddressSpace() const {
1132 return getPointerOperandType()->getPointerAddressSpace();
1133 }
1134
1135 /// Returns the pointer type returned by the GEP
1136 /// instruction, which may be a vector of pointers.
1137 static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
1138 // Vector GEP
1139 Type *Ty = Ptr->getType();
1140 if (Ty->isVectorTy())
1141 return Ty;
1142
1143 for (Value *Index : IdxList)
1144 if (auto *IndexVTy = dyn_cast<VectorType>(Val: Index->getType())) {
1145 ElementCount EltCount = IndexVTy->getElementCount();
1146 return VectorType::get(ElementType: Ty, EC: EltCount);
1147 }
1148 // Scalar GEP
1149 return Ty;
1150 }
1151
1152 unsigned getNumIndices() const { // Note: always non-negative
1153 return getNumOperands() - 1;
1154 }
1155
1156 bool hasIndices() const {
1157 return getNumOperands() > 1;
1158 }
1159
1160 /// Return true if all of the indices of this GEP are
1161 /// zeros. If so, the result pointer and the first operand have the same
1162 /// value, just potentially different types.
1163 bool hasAllZeroIndices() const;
1164
1165 /// Return true if all of the indices of this GEP are
1166 /// constant integers. If so, the result pointer and the first operand have
1167 /// a constant offset between them.
1168 bool hasAllConstantIndices() const;
1169
1170 /// Set or clear the inbounds flag on this GEP instruction.
1171 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1172 void setIsInBounds(bool b = true);
1173
1174 /// Determine whether the GEP has the inbounds flag.
1175 bool isInBounds() const;
1176
1177 /// Accumulate the constant address offset of this GEP if possible.
1178 ///
1179 /// This routine accepts an APInt into which it will accumulate the constant
1180 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1181 /// all-constant, it returns false and the value of the offset APInt is
1182 /// undefined (it is *not* preserved!). The APInt passed into this routine
1183 /// must be at least as wide as the IntPtr type for the address space of
1184 /// the base GEP pointer.
1185 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1186 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1187 MapVector<Value *, APInt> &VariableOffsets,
1188 APInt &ConstantOffset) const;
1189 // Methods for support type inquiry through isa, cast, and dyn_cast:
1190 static bool classof(const Instruction *I) {
1191 return (I->getOpcode() == Instruction::GetElementPtr);
1192 }
1193 static bool classof(const Value *V) {
1194 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1195 }
1196};
1197
1198template <>
1199struct OperandTraits<GetElementPtrInst> :
1200 public VariadicOperandTraits<GetElementPtrInst, 1> {
1201};
1202
1203GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1204 ArrayRef<Value *> IdxList, unsigned Values,
1205 const Twine &NameStr,
1206 BasicBlock::iterator InsertBefore)
1207 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1208 OperandTraits<GetElementPtrInst>::op_end(U: this) - Values,
1209 Values, InsertBefore),
1210 SourceElementType(PointeeType),
1211 ResultElementType(getIndexedType(Ty: PointeeType, IdxList)) {
1212 init(Ptr, IdxList, NameStr);
1213}
1214
1215GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1216 ArrayRef<Value *> IdxList, unsigned Values,
1217 const Twine &NameStr,
1218 Instruction *InsertBefore)
1219 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1220 OperandTraits<GetElementPtrInst>::op_end(U: this) - Values,
1221 Values, InsertBefore),
1222 SourceElementType(PointeeType),
1223 ResultElementType(getIndexedType(Ty: PointeeType, IdxList)) {
1224 init(Ptr, IdxList, NameStr);
1225}
1226
1227GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1228 ArrayRef<Value *> IdxList, unsigned Values,
1229 const Twine &NameStr,
1230 BasicBlock *InsertAtEnd)
1231 : Instruction(getGEPReturnType(Ptr, IdxList), GetElementPtr,
1232 OperandTraits<GetElementPtrInst>::op_end(U: this) - Values,
1233 Values, InsertAtEnd),
1234 SourceElementType(PointeeType),
1235 ResultElementType(getIndexedType(Ty: PointeeType, IdxList)) {
1236 init(Ptr, IdxList, NameStr);
1237}
1238
1239DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
1240
1241//===----------------------------------------------------------------------===//
1242// ICmpInst Class
1243//===----------------------------------------------------------------------===//
1244
1245/// This instruction compares its operands according to the predicate given
1246/// to the constructor. It only operates on integers or pointers. The operands
1247/// must be identical types.
1248/// Represent an integer comparison operator.
1249class ICmpInst: public CmpInst {
1250 void AssertOK() {
1251 assert(isIntPredicate() &&
1252 "Invalid ICmp predicate value");
1253 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1254 "Both operands to ICmp instruction are not of the same type!");
1255 // Check that the operands are the right type
1256 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
1257 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
1258 "Invalid operand types for ICmp instruction");
1259 }
1260
1261protected:
1262 // Note: Instruction needs to be a friend here to call cloneImpl.
1263 friend class Instruction;
1264
1265 /// Clone an identical ICmpInst
1266 ICmpInst *cloneImpl() const;
1267
1268public:
1269 /// Constructor with insert-before-instruction semantics.
1270 ICmpInst(
1271 BasicBlock::iterator InsertBefore, ///< Where to insert
1272 Predicate pred, ///< The predicate to use for the comparison
1273 Value *LHS, ///< The left-hand-side of the expression
1274 Value *RHS, ///< The right-hand-side of the expression
1275 const Twine &NameStr = "" ///< Name of the instruction
1276 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1277 Instruction::ICmp, pred, LHS, RHS, NameStr,
1278 InsertBefore) {
1279#ifndef NDEBUG
1280 AssertOK();
1281#endif
1282 }
1283
1284 /// Constructor with insert-before-instruction semantics.
1285 ICmpInst(
1286 Instruction *InsertBefore, ///< Where to insert
1287 Predicate pred, ///< The predicate to use for the comparison
1288 Value *LHS, ///< The left-hand-side of the expression
1289 Value *RHS, ///< The right-hand-side of the expression
1290 const Twine &NameStr = "" ///< Name of the instruction
1291 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1292 Instruction::ICmp, pred, LHS, RHS, NameStr,
1293 InsertBefore) {
1294#ifndef NDEBUG
1295 AssertOK();
1296#endif
1297 }
1298
1299 /// Constructor with insert-at-end semantics.
1300 ICmpInst(
1301 BasicBlock *InsertAtEnd, ///< Block to insert into.
1302 Predicate pred, ///< The predicate to use for the comparison
1303 Value *LHS, ///< The left-hand-side of the expression
1304 Value *RHS, ///< The right-hand-side of the expression
1305 const Twine &NameStr = "" ///< Name of the instruction
1306 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1307 Instruction::ICmp, pred, LHS, RHS, NameStr,
1308 InsertAtEnd) {
1309#ifndef NDEBUG
1310 AssertOK();
1311#endif
1312 }
1313
1314 /// Constructor with no-insertion semantics
1315 ICmpInst(
1316 Predicate pred, ///< The predicate to use for the comparison
1317 Value *LHS, ///< The left-hand-side of the expression
1318 Value *RHS, ///< The right-hand-side of the expression
1319 const Twine &NameStr = "" ///< Name of the instruction
1320 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1321 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1322#ifndef NDEBUG
1323 AssertOK();
1324#endif
1325 }
1326
1327 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1328 /// @returns the predicate that would be the result if the operand were
1329 /// regarded as signed.
1330 /// Return the signed version of the predicate
1331 Predicate getSignedPredicate() const {
1332 return getSignedPredicate(pred: getPredicate());
1333 }
1334
1335 /// This is a static version that you can use without an instruction.
1336 /// Return the signed version of the predicate.
1337 static Predicate getSignedPredicate(Predicate pred);
1338
1339 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1340 /// @returns the predicate that would be the result if the operand were
1341 /// regarded as unsigned.
1342 /// Return the unsigned version of the predicate
1343 Predicate getUnsignedPredicate() const {
1344 return getUnsignedPredicate(pred: getPredicate());
1345 }
1346
1347 /// This is a static version that you can use without an instruction.
1348 /// Return the unsigned version of the predicate.
1349 static Predicate getUnsignedPredicate(Predicate pred);
1350
1351 /// Return true if this predicate is either EQ or NE. This also
1352 /// tests for commutativity.
1353 static bool isEquality(Predicate P) {
1354 return P == ICMP_EQ || P == ICMP_NE;
1355 }
1356
1357 /// Return true if this predicate is either EQ or NE. This also
1358 /// tests for commutativity.
1359 bool isEquality() const {
1360 return isEquality(P: getPredicate());
1361 }
1362
1363 /// @returns true if the predicate of this ICmpInst is commutative
1364 /// Determine if this relation is commutative.
1365 bool isCommutative() const { return isEquality(); }
1366
1367 /// Return true if the predicate is relational (not EQ or NE).
1368 ///
1369 bool isRelational() const {
1370 return !isEquality();
1371 }
1372
1373 /// Return true if the predicate is relational (not EQ or NE).
1374 ///
1375 static bool isRelational(Predicate P) {
1376 return !isEquality(P);
1377 }
1378
1379 /// Return true if the predicate is SGT or UGT.
1380 ///
1381 static bool isGT(Predicate P) {
1382 return P == ICMP_SGT || P == ICMP_UGT;
1383 }
1384
1385 /// Return true if the predicate is SLT or ULT.
1386 ///
1387 static bool isLT(Predicate P) {
1388 return P == ICMP_SLT || P == ICMP_ULT;
1389 }
1390
1391 /// Return true if the predicate is SGE or UGE.
1392 ///
1393 static bool isGE(Predicate P) {
1394 return P == ICMP_SGE || P == ICMP_UGE;
1395 }
1396
1397 /// Return true if the predicate is SLE or ULE.
1398 ///
1399 static bool isLE(Predicate P) {
1400 return P == ICMP_SLE || P == ICMP_ULE;
1401 }
1402
1403 /// Returns the sequence of all ICmp predicates.
1404 ///
1405 static auto predicates() { return ICmpPredicates(); }
1406
1407 /// Exchange the two operands to this instruction in such a way that it does
1408 /// not modify the semantics of the instruction. The predicate value may be
1409 /// changed to retain the same result if the predicate is order dependent
1410 /// (e.g. ult).
1411 /// Swap operands and adjust predicate.
1412 void swapOperands() {
1413 setPredicate(getSwappedPredicate());
1414 Op<0>().swap(RHS&: Op<1>());
1415 }
1416
1417 /// Return result of `LHS Pred RHS` comparison.
1418 static bool compare(const APInt &LHS, const APInt &RHS,
1419 ICmpInst::Predicate Pred);
1420
1421 // Methods for support type inquiry through isa, cast, and dyn_cast:
1422 static bool classof(const Instruction *I) {
1423 return I->getOpcode() == Instruction::ICmp;
1424 }
1425 static bool classof(const Value *V) {
1426 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1427 }
1428};
1429
1430//===----------------------------------------------------------------------===//
1431// FCmpInst Class
1432//===----------------------------------------------------------------------===//
1433
1434/// This instruction compares its operands according to the predicate given
1435/// to the constructor. It only operates on floating point values or packed
1436/// vectors of floating point values. The operands must be identical types.
1437/// Represents a floating point comparison operator.
1438class FCmpInst: public CmpInst {
1439 void AssertOK() {
1440 assert(isFPPredicate() && "Invalid FCmp predicate value");
1441 assert(getOperand(0)->getType() == getOperand(1)->getType() &&
1442 "Both operands to FCmp instruction are not of the same type!");
1443 // Check that the operands are the right type
1444 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
1445 "Invalid operand types for FCmp instruction");
1446 }
1447
1448protected:
1449 // Note: Instruction needs to be a friend here to call cloneImpl.
1450 friend class Instruction;
1451
1452 /// Clone an identical FCmpInst
1453 FCmpInst *cloneImpl() const;
1454
1455public:
1456 /// Constructor with insert-before-instruction semantics.
1457 FCmpInst(
1458 BasicBlock::iterator InsertBefore, ///< Where to insert
1459 Predicate pred, ///< The predicate to use for the comparison
1460 Value *LHS, ///< The left-hand-side of the expression
1461 Value *RHS, ///< The right-hand-side of the expression
1462 const Twine &NameStr = "" ///< Name of the instruction
1463 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1464 Instruction::FCmp, pred, LHS, RHS, NameStr,
1465 InsertBefore) {
1466 AssertOK();
1467 }
1468
1469 /// Constructor with insert-before-instruction semantics.
1470 FCmpInst(
1471 Instruction *InsertBefore, ///< Where to insert
1472 Predicate pred, ///< The predicate to use for the comparison
1473 Value *LHS, ///< The left-hand-side of the expression
1474 Value *RHS, ///< The right-hand-side of the expression
1475 const Twine &NameStr = "" ///< Name of the instruction
1476 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1477 Instruction::FCmp, pred, LHS, RHS, NameStr,
1478 InsertBefore) {
1479 AssertOK();
1480 }
1481
1482 /// Constructor with insert-at-end semantics.
1483 FCmpInst(
1484 BasicBlock *InsertAtEnd, ///< Block to insert into.
1485 Predicate pred, ///< The predicate to use for the comparison
1486 Value *LHS, ///< The left-hand-side of the expression
1487 Value *RHS, ///< The right-hand-side of the expression
1488 const Twine &NameStr = "" ///< Name of the instruction
1489 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()),
1490 Instruction::FCmp, pred, LHS, RHS, NameStr,
1491 InsertAtEnd) {
1492 AssertOK();
1493 }
1494
1495 /// Constructor with no-insertion semantics
1496 FCmpInst(
1497 Predicate Pred, ///< The predicate to use for the comparison
1498 Value *LHS, ///< The left-hand-side of the expression
1499 Value *RHS, ///< The right-hand-side of the expression
1500 const Twine &NameStr = "", ///< Name of the instruction
1501 Instruction *FlagsSource = nullptr
1502 ) : CmpInst(makeCmpResultType(opnd_type: LHS->getType()), Instruction::FCmp, Pred, LHS,
1503 RHS, NameStr, nullptr, FlagsSource) {
1504 AssertOK();
1505 }
1506
1507 /// @returns true if the predicate of this instruction is EQ or NE.
1508 /// Determine if this is an equality predicate.
1509 static bool isEquality(Predicate Pred) {
1510 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1511 Pred == FCMP_UNE;
1512 }
1513
1514 /// @returns true if the predicate of this instruction is EQ or NE.
1515 /// Determine if this is an equality predicate.
1516 bool isEquality() const { return isEquality(Pred: getPredicate()); }
1517
1518 /// @returns true if the predicate of this instruction is commutative.
1519 /// Determine if this is a commutative predicate.
1520 bool isCommutative() const {
1521 return isEquality() ||
1522 getPredicate() == FCMP_FALSE ||
1523 getPredicate() == FCMP_TRUE ||
1524 getPredicate() == FCMP_ORD ||
1525 getPredicate() == FCMP_UNO;
1526 }
1527
1528 /// @returns true if the predicate is relational (not EQ or NE).
1529 /// Determine if this a relational predicate.
1530 bool isRelational() const { return !isEquality(); }
1531
1532 /// Exchange the two operands to this instruction in such a way that it does
1533 /// not modify the semantics of the instruction. The predicate value may be
1534 /// changed to retain the same result if the predicate is order dependent
1535 /// (e.g. ult).
1536 /// Swap operands and adjust predicate.
1537 void swapOperands() {
1538 setPredicate(getSwappedPredicate());
1539 Op<0>().swap(RHS&: Op<1>());
1540 }
1541
1542 /// Returns the sequence of all FCmp predicates.
1543 ///
1544 static auto predicates() { return FCmpPredicates(); }
1545
1546 /// Return result of `LHS Pred RHS` comparison.
1547 static bool compare(const APFloat &LHS, const APFloat &RHS,
1548 FCmpInst::Predicate Pred);
1549
1550 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1551 static bool classof(const Instruction *I) {
1552 return I->getOpcode() == Instruction::FCmp;
1553 }
1554 static bool classof(const Value *V) {
1555 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1556 }
1557};
1558
1559//===----------------------------------------------------------------------===//
1560/// This class represents a function call, abstracting a target
1561/// machine's calling convention. This class uses low bit of the SubClassData
1562/// field to indicate whether or not this is a tail call. The rest of the bits
1563/// hold the calling convention of the call.
1564///
1565class CallInst : public CallBase {
1566 CallInst(const CallInst &CI);
1567
1568 /// Construct a CallInst from a range of arguments
1569 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1570 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1571 BasicBlock::iterator InsertBefore);
1572
1573 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1574 const Twine &NameStr, BasicBlock::iterator InsertBefore)
1575 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1576
1577 /// Construct a CallInst given a range of arguments.
1578 /// Construct a CallInst from a range of arguments
1579 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1580 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1581 Instruction *InsertBefore);
1582
1583 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1584 const Twine &NameStr, Instruction *InsertBefore)
1585 : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {}
1586
1587 /// Construct a CallInst given a range of arguments.
1588 /// Construct a CallInst from a range of arguments
1589 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1590 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1591 BasicBlock *InsertAtEnd);
1592
1593 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1594 BasicBlock::iterator InsertBefore);
1595
1596 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1597 Instruction *InsertBefore);
1598
1599 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1600 BasicBlock *InsertAtEnd);
1601
1602 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1603 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1604 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1605
1606 /// Compute the number of operands to allocate.
1607 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1608 // We need one operand for the called function, plus the input operand
1609 // counts provided.
1610 return 1 + NumArgs + NumBundleInputs;
1611 }
1612
1613protected:
1614 // Note: Instruction needs to be a friend here to call cloneImpl.
1615 friend class Instruction;
1616
1617 CallInst *cloneImpl() const;
1618
1619public:
1620 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1621 BasicBlock::iterator InsertBefore) {
1622 return new (ComputeNumOperands(NumArgs: 0)) CallInst(Ty, F, NameStr, InsertBefore);
1623 }
1624
1625 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1626 Instruction *InsertBefore = nullptr) {
1627 return new (ComputeNumOperands(NumArgs: 0)) CallInst(Ty, F, NameStr, InsertBefore);
1628 }
1629
1630 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1631 const Twine &NameStr,
1632 BasicBlock::iterator InsertBefore) {
1633 return new (ComputeNumOperands(NumArgs: Args.size()))
1634 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1635 }
1636
1637 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1638 const Twine &NameStr,
1639 Instruction *InsertBefore = nullptr) {
1640 return new (ComputeNumOperands(NumArgs: Args.size()))
1641 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore);
1642 }
1643
1644 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1645 ArrayRef<OperandBundleDef> Bundles,
1646 const Twine &NameStr,
1647 BasicBlock::iterator InsertBefore) {
1648 const int NumOperands =
1649 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
1650 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1651
1652 return new (NumOperands, DescriptorBytes)
1653 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1654 }
1655
1656 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1657 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1658 const Twine &NameStr = "",
1659 Instruction *InsertBefore = nullptr) {
1660 const int NumOperands =
1661 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
1662 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1663
1664 return new (NumOperands, DescriptorBytes)
1665 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1666 }
1667
1668 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1669 BasicBlock *InsertAtEnd) {
1670 return new (ComputeNumOperands(NumArgs: 0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1671 }
1672
1673 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1674 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1675 return new (ComputeNumOperands(NumArgs: Args.size()))
1676 CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd);
1677 }
1678
1679 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1680 ArrayRef<OperandBundleDef> Bundles,
1681 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1682 const int NumOperands =
1683 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
1684 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1685
1686 return new (NumOperands, DescriptorBytes)
1687 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1688 }
1689
1690 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1691 BasicBlock::iterator InsertBefore) {
1692 return Create(Ty: Func.getFunctionType(), F: Func.getCallee(), NameStr,
1693 InsertBefore);
1694 }
1695
1696 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1697 Instruction *InsertBefore = nullptr) {
1698 return Create(Ty: Func.getFunctionType(), F: Func.getCallee(), NameStr,
1699 InsertBefore);
1700 }
1701
1702 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1703 ArrayRef<OperandBundleDef> Bundles,
1704 const Twine &NameStr,
1705 BasicBlock::iterator InsertBefore) {
1706 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, Bundles,
1707 NameStr, InsertBefore);
1708 }
1709
1710 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1711 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
1712 const Twine &NameStr = "",
1713 Instruction *InsertBefore = nullptr) {
1714 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, Bundles,
1715 NameStr, InsertBefore);
1716 }
1717
1718 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1719 const Twine &NameStr,
1720 BasicBlock::iterator InsertBefore) {
1721 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, NameStr,
1722 InsertBefore);
1723 }
1724
1725 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1726 const Twine &NameStr,
1727 Instruction *InsertBefore = nullptr) {
1728 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, NameStr,
1729 InsertBefore);
1730 }
1731
1732 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1733 BasicBlock *InsertAtEnd) {
1734 return Create(Ty: Func.getFunctionType(), F: Func.getCallee(), NameStr,
1735 InsertAtEnd);
1736 }
1737
1738 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1739 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1740 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, NameStr,
1741 InsertAtEnd);
1742 }
1743
1744 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1745 ArrayRef<OperandBundleDef> Bundles,
1746 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1747 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), Args, Bundles,
1748 NameStr, InsertAtEnd);
1749 }
1750
1751 /// Create a clone of \p CI with a different set of operand bundles and
1752 /// insert it before \p InsertPt.
1753 ///
1754 /// The returned call instruction is identical \p CI in every way except that
1755 /// the operand bundles for the new instruction are set to the operand bundles
1756 /// in \p Bundles.
1757 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1758 BasicBlock::iterator InsertPt);
1759 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1760 Instruction *InsertPt = nullptr);
1761
1762 // Note that 'musttail' implies 'tail'.
1763 enum TailCallKind : unsigned {
1764 TCK_None = 0,
1765 TCK_Tail = 1,
1766 TCK_MustTail = 2,
1767 TCK_NoTail = 3,
1768 TCK_LAST = TCK_NoTail
1769 };
1770
1771 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1772 static_assert(
1773 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1774 "Bitfields must be contiguous");
1775
1776 TailCallKind getTailCallKind() const {
1777 return getSubclassData<TailCallKindField>();
1778 }
1779
1780 bool isTailCall() const {
1781 TailCallKind Kind = getTailCallKind();
1782 return Kind == TCK_Tail || Kind == TCK_MustTail;
1783 }
1784
1785 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1786
1787 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1788
1789 void setTailCallKind(TailCallKind TCK) {
1790 setSubclassData<TailCallKindField>(TCK);
1791 }
1792
1793 void setTailCall(bool IsTc = true) {
1794 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1795 }
1796
1797 /// Return true if the call can return twice
1798 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1799 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1800
1801 // Methods for support type inquiry through isa, cast, and dyn_cast:
1802 static bool classof(const Instruction *I) {
1803 return I->getOpcode() == Instruction::Call;
1804 }
1805 static bool classof(const Value *V) {
1806 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1807 }
1808
1809 /// Updates profile metadata by scaling it by \p S / \p T.
1810 void updateProfWeight(uint64_t S, uint64_t T);
1811
1812private:
1813 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1814 // method so that subclasses cannot accidentally use it.
1815 template <typename Bitfield>
1816 void setSubclassData(typename Bitfield::Type Value) {
1817 Instruction::setSubclassData<Bitfield>(Value);
1818 }
1819};
1820
1821CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1822 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1823 BasicBlock *InsertAtEnd)
1824 : CallBase(Ty->getReturnType(), Instruction::Call,
1825 OperandTraits<CallBase>::op_end(U: this) -
1826 (Args.size() + CountBundleInputs(Bundles) + 1),
1827 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1828 InsertAtEnd) {
1829 init(FTy: Ty, Func, Args, Bundles, NameStr);
1830}
1831
1832CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1833 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1834 BasicBlock::iterator InsertBefore)
1835 : CallBase(Ty->getReturnType(), Instruction::Call,
1836 OperandTraits<CallBase>::op_end(U: this) -
1837 (Args.size() + CountBundleInputs(Bundles) + 1),
1838 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1839 InsertBefore) {
1840 init(FTy: Ty, Func, Args, Bundles, NameStr);
1841}
1842
1843CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1844 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1845 Instruction *InsertBefore)
1846 : CallBase(Ty->getReturnType(), Instruction::Call,
1847 OperandTraits<CallBase>::op_end(U: this) -
1848 (Args.size() + CountBundleInputs(Bundles) + 1),
1849 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1850 InsertBefore) {
1851 init(FTy: Ty, Func, Args, Bundles, NameStr);
1852}
1853
1854//===----------------------------------------------------------------------===//
1855// SelectInst Class
1856//===----------------------------------------------------------------------===//
1857
1858/// This class represents the LLVM 'select' instruction.
1859///
1860class SelectInst : public Instruction {
1861 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1862 BasicBlock::iterator InsertBefore)
1863 : Instruction(S1->getType(), Instruction::Select, &Op<0>(), 3,
1864 InsertBefore) {
1865 init(C, S1, S2);
1866 setName(NameStr);
1867 }
1868
1869 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1870 Instruction *InsertBefore)
1871 : Instruction(S1->getType(), Instruction::Select,
1872 &Op<0>(), 3, InsertBefore) {
1873 init(C, S1, S2);
1874 setName(NameStr);
1875 }
1876
1877 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1878 BasicBlock *InsertAtEnd)
1879 : Instruction(S1->getType(), Instruction::Select,
1880 &Op<0>(), 3, InsertAtEnd) {
1881 init(C, S1, S2);
1882 setName(NameStr);
1883 }
1884
1885 void init(Value *C, Value *S1, Value *S2) {
1886 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
1887 Op<0>() = C;
1888 Op<1>() = S1;
1889 Op<2>() = S2;
1890 }
1891
1892protected:
1893 // Note: Instruction needs to be a friend here to call cloneImpl.
1894 friend class Instruction;
1895
1896 SelectInst *cloneImpl() const;
1897
1898public:
1899 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1900 const Twine &NameStr,
1901 BasicBlock::iterator InsertBefore,
1902 Instruction *MDFrom = nullptr) {
1903 SelectInst *Sel = new (3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1904 if (MDFrom)
1905 Sel->copyMetadata(SrcInst: *MDFrom);
1906 return Sel;
1907 }
1908
1909 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1910 const Twine &NameStr = "",
1911 Instruction *InsertBefore = nullptr,
1912 Instruction *MDFrom = nullptr) {
1913 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1914 if (MDFrom)
1915 Sel->copyMetadata(SrcInst: *MDFrom);
1916 return Sel;
1917 }
1918
1919 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1920 const Twine &NameStr,
1921 BasicBlock *InsertAtEnd) {
1922 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1923 }
1924
1925 const Value *getCondition() const { return Op<0>(); }
1926 const Value *getTrueValue() const { return Op<1>(); }
1927 const Value *getFalseValue() const { return Op<2>(); }
1928 Value *getCondition() { return Op<0>(); }
1929 Value *getTrueValue() { return Op<1>(); }
1930 Value *getFalseValue() { return Op<2>(); }
1931
1932 void setCondition(Value *V) { Op<0>() = V; }
1933 void setTrueValue(Value *V) { Op<1>() = V; }
1934 void setFalseValue(Value *V) { Op<2>() = V; }
1935
1936 /// Swap the true and false values of the select instruction.
1937 /// This doesn't swap prof metadata.
1938 void swapValues() { Op<1>().swap(RHS&: Op<2>()); }
1939
1940 /// Return a string if the specified operands are invalid
1941 /// for a select operation, otherwise return null.
1942 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1943
1944 /// Transparently provide more efficient getOperand methods.
1945 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
1946
1947 OtherOps getOpcode() const {
1948 return static_cast<OtherOps>(Instruction::getOpcode());
1949 }
1950
1951 // Methods for support type inquiry through isa, cast, and dyn_cast:
1952 static bool classof(const Instruction *I) {
1953 return I->getOpcode() == Instruction::Select;
1954 }
1955 static bool classof(const Value *V) {
1956 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
1957 }
1958};
1959
1960template <>
1961struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1962};
1963
1964DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
1965
1966//===----------------------------------------------------------------------===//
1967// VAArgInst Class
1968//===----------------------------------------------------------------------===//
1969
1970/// This class represents the va_arg llvm instruction, which returns
1971/// an argument of the specified type given a va_list and increments that list
1972///
1973class VAArgInst : public UnaryInstruction {
1974protected:
1975 // Note: Instruction needs to be a friend here to call cloneImpl.
1976 friend class Instruction;
1977
1978 VAArgInst *cloneImpl() const;
1979
1980public:
1981 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1982 BasicBlock::iterator InsertBefore)
1983 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1984 setName(NameStr);
1985 }
1986
1987 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1988 Instruction *InsertBefore = nullptr)
1989 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1990 setName(NameStr);
1991 }
1992
1993 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1994 BasicBlock *InsertAtEnd)
1995 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1996 setName(NameStr);
1997 }
1998
1999 Value *getPointerOperand() { return getOperand(i_nocapture: 0); }
2000 const Value *getPointerOperand() const { return getOperand(i_nocapture: 0); }
2001 static unsigned getPointerOperandIndex() { return 0U; }
2002
2003 // Methods for support type inquiry through isa, cast, and dyn_cast:
2004 static bool classof(const Instruction *I) {
2005 return I->getOpcode() == VAArg;
2006 }
2007 static bool classof(const Value *V) {
2008 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2009 }
2010};
2011
2012//===----------------------------------------------------------------------===//
2013// ExtractElementInst Class
2014//===----------------------------------------------------------------------===//
2015
2016/// This instruction extracts a single (scalar)
2017/// element from a VectorType value
2018///
2019class ExtractElementInst : public Instruction {
2020 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2021 BasicBlock::iterator InsertBefore);
2022 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
2023 Instruction *InsertBefore = nullptr);
2024 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
2025 BasicBlock *InsertAtEnd);
2026
2027protected:
2028 // Note: Instruction needs to be a friend here to call cloneImpl.
2029 friend class Instruction;
2030
2031 ExtractElementInst *cloneImpl() const;
2032
2033public:
2034 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2035 const Twine &NameStr,
2036 BasicBlock::iterator InsertBefore) {
2037 return new (2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2038 }
2039
2040 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2041 const Twine &NameStr = "",
2042 Instruction *InsertBefore = nullptr) {
2043 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
2044 }
2045
2046 static ExtractElementInst *Create(Value *Vec, Value *Idx,
2047 const Twine &NameStr,
2048 BasicBlock *InsertAtEnd) {
2049 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
2050 }
2051
2052 /// Return true if an extractelement instruction can be
2053 /// formed with the specified operands.
2054 static bool isValidOperands(const Value *Vec, const Value *Idx);
2055
2056 Value *getVectorOperand() { return Op<0>(); }
2057 Value *getIndexOperand() { return Op<1>(); }
2058 const Value *getVectorOperand() const { return Op<0>(); }
2059 const Value *getIndexOperand() const { return Op<1>(); }
2060
2061 VectorType *getVectorOperandType() const {
2062 return cast<VectorType>(Val: getVectorOperand()->getType());
2063 }
2064
2065 /// Transparently provide more efficient getOperand methods.
2066 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2067
2068 // Methods for support type inquiry through isa, cast, and dyn_cast:
2069 static bool classof(const Instruction *I) {
2070 return I->getOpcode() == Instruction::ExtractElement;
2071 }
2072 static bool classof(const Value *V) {
2073 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2074 }
2075};
2076
2077template <>
2078struct OperandTraits<ExtractElementInst> :
2079 public FixedNumOperandTraits<ExtractElementInst, 2> {
2080};
2081
2082DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
2083
2084//===----------------------------------------------------------------------===//
2085// InsertElementInst Class
2086//===----------------------------------------------------------------------===//
2087
2088/// This instruction inserts a single (scalar)
2089/// element into a VectorType value
2090///
2091class InsertElementInst : public Instruction {
2092 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2093 BasicBlock::iterator InsertBefore);
2094 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
2095 const Twine &NameStr = "",
2096 Instruction *InsertBefore = nullptr);
2097 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
2098 BasicBlock *InsertAtEnd);
2099
2100protected:
2101 // Note: Instruction needs to be a friend here to call cloneImpl.
2102 friend class Instruction;
2103
2104 InsertElementInst *cloneImpl() const;
2105
2106public:
2107 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2108 const Twine &NameStr,
2109 BasicBlock::iterator InsertBefore) {
2110 return new (3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2111 }
2112
2113 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2114 const Twine &NameStr = "",
2115 Instruction *InsertBefore = nullptr) {
2116 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
2117 }
2118
2119 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
2120 const Twine &NameStr,
2121 BasicBlock *InsertAtEnd) {
2122 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
2123 }
2124
2125 /// Return true if an insertelement instruction can be
2126 /// formed with the specified operands.
2127 static bool isValidOperands(const Value *Vec, const Value *NewElt,
2128 const Value *Idx);
2129
2130 /// Overload to return most specific vector type.
2131 ///
2132 VectorType *getType() const {
2133 return cast<VectorType>(Val: Instruction::getType());
2134 }
2135
2136 /// Transparently provide more efficient getOperand methods.
2137 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2138
2139 // Methods for support type inquiry through isa, cast, and dyn_cast:
2140 static bool classof(const Instruction *I) {
2141 return I->getOpcode() == Instruction::InsertElement;
2142 }
2143 static bool classof(const Value *V) {
2144 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2145 }
2146};
2147
2148template <>
2149struct OperandTraits<InsertElementInst> :
2150 public FixedNumOperandTraits<InsertElementInst, 3> {
2151};
2152
2153DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
2154
2155//===----------------------------------------------------------------------===//
2156// ShuffleVectorInst Class
2157//===----------------------------------------------------------------------===//
2158
2159constexpr int PoisonMaskElem = -1;
2160
2161/// This instruction constructs a fixed permutation of two
2162/// input vectors.
2163///
2164/// For each element of the result vector, the shuffle mask selects an element
2165/// from one of the input vectors to copy to the result. Non-negative elements
2166/// in the mask represent an index into the concatenated pair of input vectors.
2167/// PoisonMaskElem (-1) specifies that the result element is poison.
2168///
2169/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2170/// requirement may be relaxed in the future.
2171class ShuffleVectorInst : public Instruction {
2172 SmallVector<int, 4> ShuffleMask;
2173 Constant *ShuffleMaskForBitcode;
2174
2175protected:
2176 // Note: Instruction needs to be a friend here to call cloneImpl.
2177 friend class Instruction;
2178
2179 ShuffleVectorInst *cloneImpl() const;
2180
2181public:
2182 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2183 BasicBlock::iterator InsertBefore);
2184 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2185 Instruction *InsertBefore = nullptr);
2186 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2187 BasicBlock *InsertAtEnd);
2188 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2189 BasicBlock::iterator InsertBefore);
2190 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2191 Instruction *InsertBefore = nullptr);
2192 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2193 BasicBlock *InsertAtEnd);
2194 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, const Twine &NameStr,
2195 BasicBlock::iterator InsertBefor);
2196 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2197 const Twine &NameStr = "",
2198 Instruction *InsertBefor = nullptr);
2199 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2200 const Twine &NameStr, BasicBlock *InsertAtEnd);
2201 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2202 const Twine &NameStr, BasicBlock::iterator InsertBefor);
2203 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2204 const Twine &NameStr = "",
2205 Instruction *InsertBefor = nullptr);
2206 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2207 const Twine &NameStr, BasicBlock *InsertAtEnd);
2208
2209 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
2210 void operator delete(void *Ptr) { return User::operator delete(Usr: Ptr); }
2211
2212 /// Swap the operands and adjust the mask to preserve the semantics
2213 /// of the instruction.
2214 void commute();
2215
2216 /// Return true if a shufflevector instruction can be
2217 /// formed with the specified operands.
2218 static bool isValidOperands(const Value *V1, const Value *V2,
2219 const Value *Mask);
2220 static bool isValidOperands(const Value *V1, const Value *V2,
2221 ArrayRef<int> Mask);
2222
2223 /// Overload to return most specific vector type.
2224 ///
2225 VectorType *getType() const {
2226 return cast<VectorType>(Val: Instruction::getType());
2227 }
2228
2229 /// Transparently provide more efficient getOperand methods.
2230 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2231
2232 /// Return the shuffle mask value of this instruction for the given element
2233 /// index. Return PoisonMaskElem if the element is undef.
2234 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2235
2236 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2237 /// elements of the mask are returned as PoisonMaskElem.
2238 static void getShuffleMask(const Constant *Mask,
2239 SmallVectorImpl<int> &Result);
2240
2241 /// Return the mask for this instruction as a vector of integers. Undefined
2242 /// elements of the mask are returned as PoisonMaskElem.
2243 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2244 Result.assign(in_start: ShuffleMask.begin(), in_end: ShuffleMask.end());
2245 }
2246
2247 /// Return the mask for this instruction, for use in bitcode.
2248 ///
2249 /// TODO: This is temporary until we decide a new bitcode encoding for
2250 /// shufflevector.
2251 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2252
2253 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2254 Type *ResultTy);
2255
2256 void setShuffleMask(ArrayRef<int> Mask);
2257
2258 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2259
2260 /// Return true if this shuffle returns a vector with a different number of
2261 /// elements than its source vectors.
2262 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2263 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2264 bool changesLength() const {
2265 unsigned NumSourceElts = cast<VectorType>(Val: Op<0>()->getType())
2266 ->getElementCount()
2267 .getKnownMinValue();
2268 unsigned NumMaskElts = ShuffleMask.size();
2269 return NumSourceElts != NumMaskElts;
2270 }
2271
2272 /// Return true if this shuffle returns a vector with a greater number of
2273 /// elements than its source vectors.
2274 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2275 bool increasesLength() const {
2276 unsigned NumSourceElts = cast<VectorType>(Val: Op<0>()->getType())
2277 ->getElementCount()
2278 .getKnownMinValue();
2279 unsigned NumMaskElts = ShuffleMask.size();
2280 return NumSourceElts < NumMaskElts;
2281 }
2282
2283 /// Return true if this shuffle mask chooses elements from exactly one source
2284 /// vector.
2285 /// Example: <7,5,undef,7>
2286 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2287 /// length as the mask.
2288 static bool isSingleSourceMask(ArrayRef<int> Mask, int NumSrcElts);
2289 static bool isSingleSourceMask(const Constant *Mask, int NumSrcElts) {
2290 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2291 SmallVector<int, 16> MaskAsInts;
2292 getShuffleMask(Mask, Result&: MaskAsInts);
2293 return isSingleSourceMask(Mask: MaskAsInts, NumSrcElts);
2294 }
2295
2296 /// Return true if this shuffle chooses elements from exactly one source
2297 /// vector without changing the length of that vector.
2298 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2299 /// TODO: Optionally allow length-changing shuffles.
2300 bool isSingleSource() const {
2301 return !changesLength() &&
2302 isSingleSourceMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2303 }
2304
2305 /// Return true if this shuffle mask chooses elements from exactly one source
2306 /// vector without lane crossings. A shuffle using this mask is not
2307 /// necessarily a no-op because it may change the number of elements from its
2308 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2309 /// Example: <undef,undef,2,3>
2310 static bool isIdentityMask(ArrayRef<int> Mask, int NumSrcElts);
2311 static bool isIdentityMask(const Constant *Mask, int NumSrcElts) {
2312 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2313
2314 // Not possible to express a shuffle mask for a scalable vector for this
2315 // case.
2316 if (isa<ScalableVectorType>(Val: Mask->getType()))
2317 return false;
2318
2319 SmallVector<int, 16> MaskAsInts;
2320 getShuffleMask(Mask, Result&: MaskAsInts);
2321 return isIdentityMask(Mask: MaskAsInts, NumSrcElts);
2322 }
2323
2324 /// Return true if this shuffle chooses elements from exactly one source
2325 /// vector without lane crossings and does not change the number of elements
2326 /// from its input vectors.
2327 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2328 bool isIdentity() const {
2329 // Not possible to express a shuffle mask for a scalable vector for this
2330 // case.
2331 if (isa<ScalableVectorType>(Val: getType()))
2332 return false;
2333
2334 return !changesLength() && isIdentityMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2335 }
2336
2337 /// Return true if this shuffle lengthens exactly one source vector with
2338 /// undefs in the high elements.
2339 bool isIdentityWithPadding() const;
2340
2341 /// Return true if this shuffle extracts the first N elements of exactly one
2342 /// source vector.
2343 bool isIdentityWithExtract() const;
2344
2345 /// Return true if this shuffle concatenates its 2 source vectors. This
2346 /// returns false if either input is undefined. In that case, the shuffle is
2347 /// is better classified as an identity with padding operation.
2348 bool isConcat() const;
2349
2350 /// Return true if this shuffle mask chooses elements from its source vectors
2351 /// without lane crossings. A shuffle using this mask would be
2352 /// equivalent to a vector select with a constant condition operand.
2353 /// Example: <4,1,6,undef>
2354 /// This returns false if the mask does not choose from both input vectors.
2355 /// In that case, the shuffle is better classified as an identity shuffle.
2356 /// This assumes that vector operands are the same length as the mask
2357 /// (a length-changing shuffle can never be equivalent to a vector select).
2358 static bool isSelectMask(ArrayRef<int> Mask, int NumSrcElts);
2359 static bool isSelectMask(const Constant *Mask, int NumSrcElts) {
2360 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2361 SmallVector<int, 16> MaskAsInts;
2362 getShuffleMask(Mask, Result&: MaskAsInts);
2363 return isSelectMask(Mask: MaskAsInts, NumSrcElts);
2364 }
2365
2366 /// Return true if this shuffle chooses elements from its source vectors
2367 /// without lane crossings and all operands have the same number of elements.
2368 /// In other words, this shuffle is equivalent to a vector select with a
2369 /// constant condition operand.
2370 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2371 /// This returns false if the mask does not choose from both input vectors.
2372 /// In that case, the shuffle is better classified as an identity shuffle.
2373 /// TODO: Optionally allow length-changing shuffles.
2374 bool isSelect() const {
2375 return !changesLength() && isSelectMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2376 }
2377
2378 /// Return true if this shuffle mask swaps the order of elements from exactly
2379 /// one source vector.
2380 /// Example: <7,6,undef,4>
2381 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2382 /// length as the mask.
2383 static bool isReverseMask(ArrayRef<int> Mask, int NumSrcElts);
2384 static bool isReverseMask(const Constant *Mask, int NumSrcElts) {
2385 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2386 SmallVector<int, 16> MaskAsInts;
2387 getShuffleMask(Mask, Result&: MaskAsInts);
2388 return isReverseMask(Mask: MaskAsInts, NumSrcElts);
2389 }
2390
2391 /// Return true if this shuffle swaps the order of elements from exactly
2392 /// one source vector.
2393 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2394 /// TODO: Optionally allow length-changing shuffles.
2395 bool isReverse() const {
2396 return !changesLength() && isReverseMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2397 }
2398
2399 /// Return true if this shuffle mask chooses all elements with the same value
2400 /// as the first element of exactly one source vector.
2401 /// Example: <4,undef,undef,4>
2402 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2403 /// length as the mask.
2404 static bool isZeroEltSplatMask(ArrayRef<int> Mask, int NumSrcElts);
2405 static bool isZeroEltSplatMask(const Constant *Mask, int NumSrcElts) {
2406 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2407 SmallVector<int, 16> MaskAsInts;
2408 getShuffleMask(Mask, Result&: MaskAsInts);
2409 return isZeroEltSplatMask(Mask: MaskAsInts, NumSrcElts);
2410 }
2411
2412 /// Return true if all elements of this shuffle are the same value as the
2413 /// first element of exactly one source vector without changing the length
2414 /// of that vector.
2415 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2416 /// TODO: Optionally allow length-changing shuffles.
2417 /// TODO: Optionally allow splats from other elements.
2418 bool isZeroEltSplat() const {
2419 return !changesLength() &&
2420 isZeroEltSplatMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2421 }
2422
2423 /// Return true if this shuffle mask is a transpose mask.
2424 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2425 /// even- or odd-numbered vector elements from two n-dimensional source
2426 /// vectors and write each result into consecutive elements of an
2427 /// n-dimensional destination vector. Two shuffles are necessary to complete
2428 /// the transpose, one for the even elements and another for the odd elements.
2429 /// This description closely follows how the TRN1 and TRN2 AArch64
2430 /// instructions operate.
2431 ///
2432 /// For example, a simple 2x2 matrix can be transposed with:
2433 ///
2434 /// ; Original matrix
2435 /// m0 = < a, b >
2436 /// m1 = < c, d >
2437 ///
2438 /// ; Transposed matrix
2439 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2440 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2441 ///
2442 /// For matrices having greater than n columns, the resulting nx2 transposed
2443 /// matrix is stored in two result vectors such that one vector contains
2444 /// interleaved elements from all the even-numbered rows and the other vector
2445 /// contains interleaved elements from all the odd-numbered rows. For example,
2446 /// a 2x4 matrix can be transposed with:
2447 ///
2448 /// ; Original matrix
2449 /// m0 = < a, b, c, d >
2450 /// m1 = < e, f, g, h >
2451 ///
2452 /// ; Transposed matrix
2453 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2454 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2455 static bool isTransposeMask(ArrayRef<int> Mask, int NumSrcElts);
2456 static bool isTransposeMask(const Constant *Mask, int NumSrcElts) {
2457 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2458 SmallVector<int, 16> MaskAsInts;
2459 getShuffleMask(Mask, Result&: MaskAsInts);
2460 return isTransposeMask(Mask: MaskAsInts, NumSrcElts);
2461 }
2462
2463 /// Return true if this shuffle transposes the elements of its inputs without
2464 /// changing the length of the vectors. This operation may also be known as a
2465 /// merge or interleave. See the description for isTransposeMask() for the
2466 /// exact specification.
2467 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2468 bool isTranspose() const {
2469 return !changesLength() && isTransposeMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size());
2470 }
2471
2472 /// Return true if this shuffle mask is a splice mask, concatenating the two
2473 /// inputs together and then extracts an original width vector starting from
2474 /// the splice index.
2475 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2476 /// This assumes that vector operands (of length \p NumSrcElts) are the same
2477 /// length as the mask.
2478 static bool isSpliceMask(ArrayRef<int> Mask, int NumSrcElts, int &Index);
2479 static bool isSpliceMask(const Constant *Mask, int NumSrcElts, int &Index) {
2480 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2481 SmallVector<int, 16> MaskAsInts;
2482 getShuffleMask(Mask, Result&: MaskAsInts);
2483 return isSpliceMask(Mask: MaskAsInts, NumSrcElts, Index);
2484 }
2485
2486 /// Return true if this shuffle splices two inputs without changing the length
2487 /// of the vectors. This operation concatenates the two inputs together and
2488 /// then extracts an original width vector starting from the splice index.
2489 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2490 bool isSplice(int &Index) const {
2491 return !changesLength() &&
2492 isSpliceMask(Mask: ShuffleMask, NumSrcElts: ShuffleMask.size(), Index);
2493 }
2494
2495 /// Return true if this shuffle mask is an extract subvector mask.
2496 /// A valid extract subvector mask returns a smaller vector from a single
2497 /// source operand. The base extraction index is returned as well.
2498 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2499 int &Index);
2500 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2501 int &Index) {
2502 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2503 // Not possible to express a shuffle mask for a scalable vector for this
2504 // case.
2505 if (isa<ScalableVectorType>(Val: Mask->getType()))
2506 return false;
2507 SmallVector<int, 16> MaskAsInts;
2508 getShuffleMask(Mask, Result&: MaskAsInts);
2509 return isExtractSubvectorMask(Mask: MaskAsInts, NumSrcElts, Index);
2510 }
2511
2512 /// Return true if this shuffle mask is an extract subvector mask.
2513 bool isExtractSubvectorMask(int &Index) const {
2514 // Not possible to express a shuffle mask for a scalable vector for this
2515 // case.
2516 if (isa<ScalableVectorType>(Val: getType()))
2517 return false;
2518
2519 int NumSrcElts =
2520 cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2521 return isExtractSubvectorMask(Mask: ShuffleMask, NumSrcElts, Index);
2522 }
2523
2524 /// Return true if this shuffle mask is an insert subvector mask.
2525 /// A valid insert subvector mask inserts the lowest elements of a second
2526 /// source operand into an in-place first source operand.
2527 /// Both the sub vector width and the insertion index is returned.
2528 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2529 int &NumSubElts, int &Index);
2530 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2531 int &NumSubElts, int &Index) {
2532 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2533 // Not possible to express a shuffle mask for a scalable vector for this
2534 // case.
2535 if (isa<ScalableVectorType>(Val: Mask->getType()))
2536 return false;
2537 SmallVector<int, 16> MaskAsInts;
2538 getShuffleMask(Mask, Result&: MaskAsInts);
2539 return isInsertSubvectorMask(Mask: MaskAsInts, NumSrcElts, NumSubElts, Index);
2540 }
2541
2542 /// Return true if this shuffle mask is an insert subvector mask.
2543 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2544 // Not possible to express a shuffle mask for a scalable vector for this
2545 // case.
2546 if (isa<ScalableVectorType>(Val: getType()))
2547 return false;
2548
2549 int NumSrcElts =
2550 cast<FixedVectorType>(Val: Op<0>()->getType())->getNumElements();
2551 return isInsertSubvectorMask(Mask: ShuffleMask, NumSrcElts, NumSubElts, Index);
2552 }
2553
2554 /// Return true if this shuffle mask replicates each of the \p VF elements
2555 /// in a vector \p ReplicationFactor times.
2556 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2557 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2558 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2559 int &VF);
2560 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2561 int &VF) {
2562 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.");
2563 // Not possible to express a shuffle mask for a scalable vector for this
2564 // case.
2565 if (isa<ScalableVectorType>(Val: Mask->getType()))
2566 return false;
2567 SmallVector<int, 16> MaskAsInts;
2568 getShuffleMask(Mask, Result&: MaskAsInts);
2569 return isReplicationMask(Mask: MaskAsInts, ReplicationFactor, VF);
2570 }
2571
2572 /// Return true if this shuffle mask is a replication mask.
2573 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2574
2575 /// Return true if this shuffle mask represents "clustered" mask of size VF,
2576 /// i.e. each index between [0..VF) is used exactly once in each submask of
2577 /// size VF.
2578 /// For example, the mask for \p VF=4 is:
2579 /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4
2580 /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time.
2581 /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because
2582 /// element 3 is used twice in the second submask
2583 /// (3,3,1,0) and index 2 is not used at all.
2584 static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF);
2585
2586 /// Return true if this shuffle mask is a one-use-single-source("clustered")
2587 /// mask.
2588 bool isOneUseSingleSourceMask(int VF) const;
2589
2590 /// Change values in a shuffle permute mask assuming the two vector operands
2591 /// of length InVecNumElts have swapped position.
2592 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2593 unsigned InVecNumElts) {
2594 for (int &Idx : Mask) {
2595 if (Idx == -1)
2596 continue;
2597 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2598 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
2599 "shufflevector mask index out of range");
2600 }
2601 }
2602
2603 /// Return if this shuffle interleaves its two input vectors together.
2604 bool isInterleave(unsigned Factor);
2605
2606 /// Return true if the mask interleaves one or more input vectors together.
2607 ///
2608 /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...>
2609 /// E.g. For a Factor of 2 (LaneLen=4):
2610 /// <0, 4, 1, 5, 2, 6, 3, 7>
2611 /// E.g. For a Factor of 3 (LaneLen=4):
2612 /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12>
2613 /// E.g. For a Factor of 4 (LaneLen=2):
2614 /// <0, 2, 6, 4, 1, 3, 7, 5>
2615 ///
2616 /// NumInputElts is the total number of elements in the input vectors.
2617 ///
2618 /// StartIndexes are the first indexes of each vector being interleaved,
2619 /// substituting any indexes that were undef
2620 /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2>
2621 ///
2622 /// Note that this does not check if the input vectors are consecutive:
2623 /// It will return true for masks such as
2624 /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2)
2625 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2626 unsigned NumInputElts,
2627 SmallVectorImpl<unsigned> &StartIndexes);
2628 static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor,
2629 unsigned NumInputElts) {
2630 SmallVector<unsigned, 8> StartIndexes;
2631 return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes);
2632 }
2633
2634 /// Check if the mask is a DE-interleave mask of the given factor
2635 /// \p Factor like:
2636 /// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2637 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor,
2638 unsigned &Index);
2639 static bool isDeInterleaveMaskOfFactor(ArrayRef<int> Mask, unsigned Factor) {
2640 unsigned Unused;
2641 return isDeInterleaveMaskOfFactor(Mask, Factor, Index&: Unused);
2642 }
2643
2644 /// Checks if the shuffle is a bit rotation of the first operand across
2645 /// multiple subelements, e.g:
2646 ///
2647 /// shuffle <8 x i8> %a, <8 x i8> poison, <8 x i32> <1, 0, 3, 2, 5, 4, 7, 6>
2648 ///
2649 /// could be expressed as
2650 ///
2651 /// rotl <4 x i16> %a, 8
2652 ///
2653 /// If it can be expressed as a rotation, returns the number of subelements to
2654 /// group by in NumSubElts and the number of bits to rotate left in RotateAmt.
2655 static bool isBitRotateMask(ArrayRef<int> Mask, unsigned EltSizeInBits,
2656 unsigned MinSubElts, unsigned MaxSubElts,
2657 unsigned &NumSubElts, unsigned &RotateAmt);
2658
2659 // Methods for support type inquiry through isa, cast, and dyn_cast:
2660 static bool classof(const Instruction *I) {
2661 return I->getOpcode() == Instruction::ShuffleVector;
2662 }
2663 static bool classof(const Value *V) {
2664 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2665 }
2666};
2667
2668template <>
2669struct OperandTraits<ShuffleVectorInst>
2670 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2671
2672DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
2673
2674//===----------------------------------------------------------------------===//
2675// ExtractValueInst Class
2676//===----------------------------------------------------------------------===//
2677
2678/// This instruction extracts a struct member or array
2679/// element value from an aggregate value.
2680///
2681class ExtractValueInst : public UnaryInstruction {
2682 SmallVector<unsigned, 4> Indices;
2683
2684 ExtractValueInst(const ExtractValueInst &EVI);
2685
2686 /// Constructors - Create a extractvalue instruction with a base aggregate
2687 /// value and a list of indices. The first and second ctor can optionally
2688 /// insert before an existing instruction, the third appends the new
2689 /// instruction to the specified BasicBlock.
2690 inline ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2691 const Twine &NameStr,
2692 BasicBlock::iterator InsertBefore);
2693 inline ExtractValueInst(Value *Agg,
2694 ArrayRef<unsigned> Idxs,
2695 const Twine &NameStr,
2696 Instruction *InsertBefore);
2697 inline ExtractValueInst(Value *Agg,
2698 ArrayRef<unsigned> Idxs,
2699 const Twine &NameStr, BasicBlock *InsertAtEnd);
2700
2701 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2702
2703protected:
2704 // Note: Instruction needs to be a friend here to call cloneImpl.
2705 friend class Instruction;
2706
2707 ExtractValueInst *cloneImpl() const;
2708
2709public:
2710 static ExtractValueInst *Create(Value *Agg, ArrayRef<unsigned> Idxs,
2711 const Twine &NameStr,
2712 BasicBlock::iterator InsertBefore) {
2713 return new
2714 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2715 }
2716
2717 static ExtractValueInst *Create(Value *Agg,
2718 ArrayRef<unsigned> Idxs,
2719 const Twine &NameStr = "",
2720 Instruction *InsertBefore = nullptr) {
2721 return new
2722 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2723 }
2724
2725 static ExtractValueInst *Create(Value *Agg,
2726 ArrayRef<unsigned> Idxs,
2727 const Twine &NameStr,
2728 BasicBlock *InsertAtEnd) {
2729 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2730 }
2731
2732 /// Returns the type of the element that would be extracted
2733 /// with an extractvalue instruction with the specified parameters.
2734 ///
2735 /// Null is returned if the indices are invalid for the specified type.
2736 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2737
2738 using idx_iterator = const unsigned*;
2739
2740 inline idx_iterator idx_begin() const { return Indices.begin(); }
2741 inline idx_iterator idx_end() const { return Indices.end(); }
2742 inline iterator_range<idx_iterator> indices() const {
2743 return make_range(x: idx_begin(), y: idx_end());
2744 }
2745
2746 Value *getAggregateOperand() {
2747 return getOperand(i_nocapture: 0);
2748 }
2749 const Value *getAggregateOperand() const {
2750 return getOperand(i_nocapture: 0);
2751 }
2752 static unsigned getAggregateOperandIndex() {
2753 return 0U; // get index for modifying correct operand
2754 }
2755
2756 ArrayRef<unsigned> getIndices() const {
2757 return Indices;
2758 }
2759
2760 unsigned getNumIndices() const {
2761 return (unsigned)Indices.size();
2762 }
2763
2764 bool hasIndices() const {
2765 return true;
2766 }
2767
2768 // Methods for support type inquiry through isa, cast, and dyn_cast:
2769 static bool classof(const Instruction *I) {
2770 return I->getOpcode() == Instruction::ExtractValue;
2771 }
2772 static bool classof(const Value *V) {
2773 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2774 }
2775};
2776
2777ExtractValueInst::ExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
2778 const Twine &NameStr,
2779 BasicBlock::iterator InsertBefore)
2780 : UnaryInstruction(checkGEPType(Ty: getIndexedType(Agg: Agg->getType(), Idxs)),
2781 ExtractValue, Agg, InsertBefore) {
2782 init(Idxs, NameStr);
2783}
2784
2785ExtractValueInst::ExtractValueInst(Value *Agg,
2786 ArrayRef<unsigned> Idxs,
2787 const Twine &NameStr,
2788 Instruction *InsertBefore)
2789 : UnaryInstruction(checkGEPType(Ty: getIndexedType(Agg: Agg->getType(), Idxs)),
2790 ExtractValue, Agg, InsertBefore) {
2791 init(Idxs, NameStr);
2792}
2793
2794ExtractValueInst::ExtractValueInst(Value *Agg,
2795 ArrayRef<unsigned> Idxs,
2796 const Twine &NameStr,
2797 BasicBlock *InsertAtEnd)
2798 : UnaryInstruction(checkGEPType(Ty: getIndexedType(Agg: Agg->getType(), Idxs)),
2799 ExtractValue, Agg, InsertAtEnd) {
2800 init(Idxs, NameStr);
2801}
2802
2803//===----------------------------------------------------------------------===//
2804// InsertValueInst Class
2805//===----------------------------------------------------------------------===//
2806
2807/// This instruction inserts a struct field of array element
2808/// value into an aggregate value.
2809///
2810class InsertValueInst : public Instruction {
2811 SmallVector<unsigned, 4> Indices;
2812
2813 InsertValueInst(const InsertValueInst &IVI);
2814
2815 /// Constructors - Create a insertvalue instruction with a base aggregate
2816 /// value, a value to insert, and a list of indices. The first and second ctor
2817 /// can optionally insert before an existing instruction, the third appends
2818 /// the new instruction to the specified BasicBlock.
2819 inline InsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2820 const Twine &NameStr,
2821 BasicBlock::iterator InsertBefore);
2822 inline InsertValueInst(Value *Agg, Value *Val,
2823 ArrayRef<unsigned> Idxs,
2824 const Twine &NameStr,
2825 Instruction *InsertBefore);
2826 inline InsertValueInst(Value *Agg, Value *Val,
2827 ArrayRef<unsigned> Idxs,
2828 const Twine &NameStr, BasicBlock *InsertAtEnd);
2829
2830 /// Constructors - These three constructors are convenience methods because
2831 /// one and two index insertvalue instructions are so common.
2832 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2833 BasicBlock::iterator InsertBefore);
2834 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2835 const Twine &NameStr = "",
2836 Instruction *InsertBefore = nullptr);
2837 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2838 BasicBlock *InsertAtEnd);
2839
2840 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2841 const Twine &NameStr);
2842
2843protected:
2844 // Note: Instruction needs to be a friend here to call cloneImpl.
2845 friend class Instruction;
2846
2847 InsertValueInst *cloneImpl() const;
2848
2849public:
2850 // allocate space for exactly two operands
2851 void *operator new(size_t S) { return User::operator new(Size: S, Us: 2); }
2852 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
2853
2854 static InsertValueInst *Create(Value *Agg, Value *Val,
2855 ArrayRef<unsigned> Idxs, const Twine &NameStr,
2856 BasicBlock::iterator InsertBefore) {
2857 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2858 }
2859
2860 static InsertValueInst *Create(Value *Agg, Value *Val,
2861 ArrayRef<unsigned> Idxs,
2862 const Twine &NameStr = "",
2863 Instruction *InsertBefore = nullptr) {
2864 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2865 }
2866
2867 static InsertValueInst *Create(Value *Agg, Value *Val,
2868 ArrayRef<unsigned> Idxs,
2869 const Twine &NameStr,
2870 BasicBlock *InsertAtEnd) {
2871 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2872 }
2873
2874 /// Transparently provide more efficient getOperand methods.
2875 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
2876
2877 using idx_iterator = const unsigned*;
2878
2879 inline idx_iterator idx_begin() const { return Indices.begin(); }
2880 inline idx_iterator idx_end() const { return Indices.end(); }
2881 inline iterator_range<idx_iterator> indices() const {
2882 return make_range(x: idx_begin(), y: idx_end());
2883 }
2884
2885 Value *getAggregateOperand() {
2886 return getOperand(0);
2887 }
2888 const Value *getAggregateOperand() const {
2889 return getOperand(0);
2890 }
2891 static unsigned getAggregateOperandIndex() {
2892 return 0U; // get index for modifying correct operand
2893 }
2894
2895 Value *getInsertedValueOperand() {
2896 return getOperand(1);
2897 }
2898 const Value *getInsertedValueOperand() const {
2899 return getOperand(1);
2900 }
2901 static unsigned getInsertedValueOperandIndex() {
2902 return 1U; // get index for modifying correct operand
2903 }
2904
2905 ArrayRef<unsigned> getIndices() const {
2906 return Indices;
2907 }
2908
2909 unsigned getNumIndices() const {
2910 return (unsigned)Indices.size();
2911 }
2912
2913 bool hasIndices() const {
2914 return true;
2915 }
2916
2917 // Methods for support type inquiry through isa, cast, and dyn_cast:
2918 static bool classof(const Instruction *I) {
2919 return I->getOpcode() == Instruction::InsertValue;
2920 }
2921 static bool classof(const Value *V) {
2922 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
2923 }
2924};
2925
2926template <>
2927struct OperandTraits<InsertValueInst> :
2928 public FixedNumOperandTraits<InsertValueInst, 2> {
2929};
2930
2931InsertValueInst::InsertValueInst(Value *Agg,
2932 Value *Val,
2933 ArrayRef<unsigned> Idxs,
2934 const Twine &NameStr,
2935 BasicBlock::iterator InsertBefore)
2936 : Instruction(Agg->getType(), InsertValue, OperandTraits<InsertValueInst>::op_begin(U: this),
2937 2, InsertBefore) {
2938 init(Agg, Val, Idxs, NameStr);
2939}
2940
2941InsertValueInst::InsertValueInst(Value *Agg,
2942 Value *Val,
2943 ArrayRef<unsigned> Idxs,
2944 const Twine &NameStr,
2945 Instruction *InsertBefore)
2946 : Instruction(Agg->getType(), InsertValue,
2947 OperandTraits<InsertValueInst>::op_begin(U: this),
2948 2, InsertBefore) {
2949 init(Agg, Val, Idxs, NameStr);
2950}
2951
2952InsertValueInst::InsertValueInst(Value *Agg,
2953 Value *Val,
2954 ArrayRef<unsigned> Idxs,
2955 const Twine &NameStr,
2956 BasicBlock *InsertAtEnd)
2957 : Instruction(Agg->getType(), InsertValue,
2958 OperandTraits<InsertValueInst>::op_begin(U: this),
2959 2, InsertAtEnd) {
2960 init(Agg, Val, Idxs, NameStr);
2961}
2962
2963DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
2964
2965//===----------------------------------------------------------------------===//
2966// PHINode Class
2967//===----------------------------------------------------------------------===//
2968
2969// PHINode - The PHINode class is used to represent the magical mystical PHI
2970// node, that can not exist in nature, but can be synthesized in a computer
2971// scientist's overactive imagination.
2972//
2973class PHINode : public Instruction {
2974 /// The number of operands actually allocated. NumOperands is
2975 /// the number actually in use.
2976 unsigned ReservedSpace;
2977
2978 PHINode(const PHINode &PN);
2979
2980 explicit PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2981 BasicBlock::iterator InsertBefore)
2982 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2983 ReservedSpace(NumReservedValues) {
2984 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2985 setName(NameStr);
2986 allocHungoffUses(N: ReservedSpace);
2987 }
2988
2989 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2990 const Twine &NameStr = "",
2991 Instruction *InsertBefore = nullptr)
2992 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2993 ReservedSpace(NumReservedValues) {
2994 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
2995 setName(NameStr);
2996 allocHungoffUses(N: ReservedSpace);
2997 }
2998
2999 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
3000 BasicBlock *InsertAtEnd)
3001 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
3002 ReservedSpace(NumReservedValues) {
3003 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!");
3004 setName(NameStr);
3005 allocHungoffUses(N: ReservedSpace);
3006 }
3007
3008protected:
3009 // Note: Instruction needs to be a friend here to call cloneImpl.
3010 friend class Instruction;
3011
3012 PHINode *cloneImpl() const;
3013
3014 // allocHungoffUses - this is more complicated than the generic
3015 // User::allocHungoffUses, because we have to allocate Uses for the incoming
3016 // values and pointers to the incoming blocks, all in one allocation.
3017 void allocHungoffUses(unsigned N) {
3018 User::allocHungoffUses(N, /* IsPhi */ IsPhi: true);
3019 }
3020
3021public:
3022 /// Constructors - NumReservedValues is a hint for the number of incoming
3023 /// edges that this phi node will have (use 0 if you really have no idea).
3024 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
3025 const Twine &NameStr,
3026 BasicBlock::iterator InsertBefore) {
3027 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
3028 }
3029
3030 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
3031 const Twine &NameStr = "",
3032 Instruction *InsertBefore = nullptr) {
3033 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
3034 }
3035
3036 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
3037 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3038 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
3039 }
3040
3041 /// Provide fast operand accessors
3042 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3043
3044 // Block iterator interface. This provides access to the list of incoming
3045 // basic blocks, which parallels the list of incoming values.
3046 // Please note that we are not providing non-const iterators for blocks to
3047 // force all updates go through an interface function.
3048
3049 using block_iterator = BasicBlock **;
3050 using const_block_iterator = BasicBlock * const *;
3051
3052 const_block_iterator block_begin() const {
3053 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
3054 }
3055
3056 const_block_iterator block_end() const {
3057 return block_begin() + getNumOperands();
3058 }
3059
3060 iterator_range<const_block_iterator> blocks() const {
3061 return make_range(x: block_begin(), y: block_end());
3062 }
3063
3064 op_range incoming_values() { return operands(); }
3065
3066 const_op_range incoming_values() const { return operands(); }
3067
3068 /// Return the number of incoming edges
3069 ///
3070 unsigned getNumIncomingValues() const { return getNumOperands(); }
3071
3072 /// Return incoming value number x
3073 ///
3074 Value *getIncomingValue(unsigned i) const {
3075 return getOperand(i);
3076 }
3077 void setIncomingValue(unsigned i, Value *V) {
3078 assert(V && "PHI node got a null value!");
3079 assert(getType() == V->getType() &&
3080 "All operands to PHI node must be the same type as the PHI node!");
3081 setOperand(i, V);
3082 }
3083
3084 static unsigned getOperandNumForIncomingValue(unsigned i) {
3085 return i;
3086 }
3087
3088 static unsigned getIncomingValueNumForOperand(unsigned i) {
3089 return i;
3090 }
3091
3092 /// Return incoming basic block number @p i.
3093 ///
3094 BasicBlock *getIncomingBlock(unsigned i) const {
3095 return block_begin()[i];
3096 }
3097
3098 /// Return incoming basic block corresponding
3099 /// to an operand of the PHI.
3100 ///
3101 BasicBlock *getIncomingBlock(const Use &U) const {
3102 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
3103 return getIncomingBlock(i: unsigned(&U - op_begin()));
3104 }
3105
3106 /// Return incoming basic block corresponding
3107 /// to value use iterator.
3108 ///
3109 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
3110 return getIncomingBlock(U: I.getUse());
3111 }
3112
3113 void setIncomingBlock(unsigned i, BasicBlock *BB) {
3114 const_cast<block_iterator>(block_begin())[i] = BB;
3115 }
3116
3117 /// Copies the basic blocks from \p BBRange to the incoming basic block list
3118 /// of this PHINode, starting at \p ToIdx.
3119 void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange,
3120 uint32_t ToIdx = 0) {
3121 copy(Range&: BBRange, Out: const_cast<block_iterator>(block_begin()) + ToIdx);
3122 }
3123
3124 /// Replace every incoming basic block \p Old to basic block \p New.
3125 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
3126 assert(New && Old && "PHI node got a null basic block!");
3127 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
3128 if (getIncomingBlock(i: Op) == Old)
3129 setIncomingBlock(i: Op, BB: New);
3130 }
3131
3132 /// Add an incoming value to the end of the PHI list
3133 ///
3134 void addIncoming(Value *V, BasicBlock *BB) {
3135 if (getNumOperands() == ReservedSpace)
3136 growOperands(); // Get more space!
3137 // Initialize some new operands.
3138 setNumHungOffUseOperands(getNumOperands() + 1);
3139 setIncomingValue(i: getNumOperands() - 1, V);
3140 setIncomingBlock(i: getNumOperands() - 1, BB);
3141 }
3142
3143 /// Remove an incoming value. This is useful if a
3144 /// predecessor basic block is deleted. The value removed is returned.
3145 ///
3146 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
3147 /// is true), the PHI node is destroyed and any uses of it are replaced with
3148 /// dummy values. The only time there should be zero incoming values to a PHI
3149 /// node is when the block is dead, so this strategy is sound.
3150 ///
3151 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
3152
3153 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
3154 int Idx = getBasicBlockIndex(BB);
3155 assert(Idx >= 0 && "Invalid basic block argument to remove!");
3156 return removeIncomingValue(Idx, DeletePHIIfEmpty);
3157 }
3158
3159 /// Remove all incoming values for which the predicate returns true.
3160 /// The predicate accepts the incoming value index.
3161 void removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
3162 bool DeletePHIIfEmpty = true);
3163
3164 /// Return the first index of the specified basic
3165 /// block in the value list for this PHI. Returns -1 if no instance.
3166 ///
3167 int getBasicBlockIndex(const BasicBlock *BB) const {
3168 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
3169 if (block_begin()[i] == BB)
3170 return i;
3171 return -1;
3172 }
3173
3174 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
3175 int Idx = getBasicBlockIndex(BB);
3176 assert(Idx >= 0 && "Invalid basic block argument!");
3177 return getIncomingValue(i: Idx);
3178 }
3179
3180 /// Set every incoming value(s) for block \p BB to \p V.
3181 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
3182 assert(BB && "PHI node got a null basic block!");
3183 bool Found = false;
3184 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
3185 if (getIncomingBlock(i: Op) == BB) {
3186 Found = true;
3187 setIncomingValue(i: Op, V);
3188 }
3189 (void)Found;
3190 assert(Found && "Invalid basic block argument to set!");
3191 }
3192
3193 /// If the specified PHI node always merges together the
3194 /// same value, return the value, otherwise return null.
3195 Value *hasConstantValue() const;
3196
3197 /// Whether the specified PHI node always merges
3198 /// together the same value, assuming undefs are equal to a unique
3199 /// non-undef value.
3200 bool hasConstantOrUndefValue() const;
3201
3202 /// If the PHI node is complete which means all of its parent's predecessors
3203 /// have incoming value in this PHI, return true, otherwise return false.
3204 bool isComplete() const {
3205 return llvm::all_of(Range: predecessors(BB: getParent()),
3206 P: [this](const BasicBlock *Pred) {
3207 return getBasicBlockIndex(BB: Pred) >= 0;
3208 });
3209 }
3210
3211 /// Methods for support type inquiry through isa, cast, and dyn_cast:
3212 static bool classof(const Instruction *I) {
3213 return I->getOpcode() == Instruction::PHI;
3214 }
3215 static bool classof(const Value *V) {
3216 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3217 }
3218
3219private:
3220 void growOperands();
3221};
3222
3223template <>
3224struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
3225};
3226
3227DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
3228
3229//===----------------------------------------------------------------------===//
3230// LandingPadInst Class
3231//===----------------------------------------------------------------------===//
3232
3233//===---------------------------------------------------------------------------
3234/// The landingpad instruction holds all of the information
3235/// necessary to generate correct exception handling. The landingpad instruction
3236/// cannot be moved from the top of a landing pad block, which itself is
3237/// accessible only from the 'unwind' edge of an invoke. This uses the
3238/// SubclassData field in Value to store whether or not the landingpad is a
3239/// cleanup.
3240///
3241class LandingPadInst : public Instruction {
3242 using CleanupField = BoolBitfieldElementT<0>;
3243
3244 /// The number of operands actually allocated. NumOperands is
3245 /// the number actually in use.
3246 unsigned ReservedSpace;
3247
3248 LandingPadInst(const LandingPadInst &LP);
3249
3250public:
3251 enum ClauseType { Catch, Filter };
3252
3253private:
3254 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3255 const Twine &NameStr,
3256 BasicBlock::iterator InsertBefore);
3257 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3258 const Twine &NameStr, Instruction *InsertBefore);
3259 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
3260 const Twine &NameStr, BasicBlock *InsertAtEnd);
3261
3262 // Allocate space for exactly zero operands.
3263 void *operator new(size_t S) { return User::operator new(Size: S); }
3264
3265 void growOperands(unsigned Size);
3266 void init(unsigned NumReservedValues, const Twine &NameStr);
3267
3268protected:
3269 // Note: Instruction needs to be a friend here to call cloneImpl.
3270 friend class Instruction;
3271
3272 LandingPadInst *cloneImpl() const;
3273
3274public:
3275 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
3276
3277 /// Constructors - NumReservedClauses is a hint for the number of incoming
3278 /// clauses that this landingpad will have (use 0 if you really have no idea).
3279 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3280 const Twine &NameStr,
3281 BasicBlock::iterator InsertBefore);
3282 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3283 const Twine &NameStr = "",
3284 Instruction *InsertBefore = nullptr);
3285 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
3286 const Twine &NameStr, BasicBlock *InsertAtEnd);
3287
3288 /// Provide fast operand accessors
3289 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3290
3291 /// Return 'true' if this landingpad instruction is a
3292 /// cleanup. I.e., it should be run when unwinding even if its landing pad
3293 /// doesn't catch the exception.
3294 bool isCleanup() const { return getSubclassData<CleanupField>(); }
3295
3296 /// Indicate that this landingpad instruction is a cleanup.
3297 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
3298
3299 /// Add a catch or filter clause to the landing pad.
3300 void addClause(Constant *ClauseVal);
3301
3302 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
3303 /// determine what type of clause this is.
3304 Constant *getClause(unsigned Idx) const {
3305 return cast<Constant>(Val: getOperandList()[Idx]);
3306 }
3307
3308 /// Return 'true' if the clause and index Idx is a catch clause.
3309 bool isCatch(unsigned Idx) const {
3310 return !isa<ArrayType>(Val: getOperandList()[Idx]->getType());
3311 }
3312
3313 /// Return 'true' if the clause and index Idx is a filter clause.
3314 bool isFilter(unsigned Idx) const {
3315 return isa<ArrayType>(Val: getOperandList()[Idx]->getType());
3316 }
3317
3318 /// Get the number of clauses for this landing pad.
3319 unsigned getNumClauses() const { return getNumOperands(); }
3320
3321 /// Grow the size of the operand list to accommodate the new
3322 /// number of clauses.
3323 void reserveClauses(unsigned Size) { growOperands(Size); }
3324
3325 // Methods for support type inquiry through isa, cast, and dyn_cast:
3326 static bool classof(const Instruction *I) {
3327 return I->getOpcode() == Instruction::LandingPad;
3328 }
3329 static bool classof(const Value *V) {
3330 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3331 }
3332};
3333
3334template <>
3335struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
3336};
3337
3338DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
3339
3340//===----------------------------------------------------------------------===//
3341// ReturnInst Class
3342//===----------------------------------------------------------------------===//
3343
3344//===---------------------------------------------------------------------------
3345/// Return a value (possibly void), from a function. Execution
3346/// does not continue in this function any longer.
3347///
3348class ReturnInst : public Instruction {
3349 ReturnInst(const ReturnInst &RI);
3350
3351private:
3352 // ReturnInst constructors:
3353 // ReturnInst() - 'ret void' instruction
3354 // ReturnInst( null) - 'ret void' instruction
3355 // ReturnInst(Value* X) - 'ret X' instruction
3356 // ReturnInst(null, Iterator It) - 'ret void' instruction, insert before I
3357 // ReturnInst(Value* X, Iterator It) - 'ret X' instruction, insert before I
3358 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3359 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3360 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3361 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3362 //
3363 // NOTE: If the Value* passed is of type void then the constructor behaves as
3364 // if it was passed NULL.
3365 explicit ReturnInst(LLVMContext &C, Value *retVal,
3366 BasicBlock::iterator InsertBefore);
3367 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3368 Instruction *InsertBefore = nullptr);
3369 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3370 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3371
3372protected:
3373 // Note: Instruction needs to be a friend here to call cloneImpl.
3374 friend class Instruction;
3375
3376 ReturnInst *cloneImpl() const;
3377
3378public:
3379 static ReturnInst *Create(LLVMContext &C, Value *retVal,
3380 BasicBlock::iterator InsertBefore) {
3381 return new (!!retVal) ReturnInst(C, retVal, InsertBefore);
3382 }
3383
3384 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3385 Instruction *InsertBefore = nullptr) {
3386 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3387 }
3388
3389 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3390 BasicBlock *InsertAtEnd) {
3391 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3392 }
3393
3394 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3395 return new(0) ReturnInst(C, InsertAtEnd);
3396 }
3397
3398 /// Provide fast operand accessors
3399 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3400
3401 /// Convenience accessor. Returns null if there is no return value.
3402 Value *getReturnValue() const {
3403 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3404 }
3405
3406 unsigned getNumSuccessors() const { return 0; }
3407
3408 // Methods for support type inquiry through isa, cast, and dyn_cast:
3409 static bool classof(const Instruction *I) {
3410 return (I->getOpcode() == Instruction::Ret);
3411 }
3412 static bool classof(const Value *V) {
3413 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3414 }
3415
3416private:
3417 BasicBlock *getSuccessor(unsigned idx) const {
3418 llvm_unreachable("ReturnInst has no successors!");
3419 }
3420
3421 void setSuccessor(unsigned idx, BasicBlock *B) {
3422 llvm_unreachable("ReturnInst has no successors!");
3423 }
3424};
3425
3426template <>
3427struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3428};
3429
3430DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
3431
3432//===----------------------------------------------------------------------===//
3433// BranchInst Class
3434//===----------------------------------------------------------------------===//
3435
3436//===---------------------------------------------------------------------------
3437/// Conditional or Unconditional Branch instruction.
3438///
3439class BranchInst : public Instruction {
3440 /// Ops list - Branches are strange. The operands are ordered:
3441 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3442 /// they don't have to check for cond/uncond branchness. These are mostly
3443 /// accessed relative from op_end().
3444 BranchInst(const BranchInst &BI);
3445 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3446 // BranchInst(BB *B) - 'br B'
3447 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3448 // BranchInst(BB* B, Iter It) - 'br B' insert before I
3449 // BranchInst(BB* T, BB *F, Value *C, Iter It) - 'br C, T, F', insert before I
3450 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3451 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3452 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3453 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3454 explicit BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore);
3455 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3456 BasicBlock::iterator InsertBefore);
3457 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3458 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3459 Instruction *InsertBefore = nullptr);
3460 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3461 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3462 BasicBlock *InsertAtEnd);
3463
3464 void AssertOK();
3465
3466protected:
3467 // Note: Instruction needs to be a friend here to call cloneImpl.
3468 friend class Instruction;
3469
3470 BranchInst *cloneImpl() const;
3471
3472public:
3473 /// Iterator type that casts an operand to a basic block.
3474 ///
3475 /// This only makes sense because the successors are stored as adjacent
3476 /// operands for branch instructions.
3477 struct succ_op_iterator
3478 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3479 std::random_access_iterator_tag, BasicBlock *,
3480 ptrdiff_t, BasicBlock *, BasicBlock *> {
3481 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3482
3483 BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
3484 BasicBlock *operator->() const { return operator*(); }
3485 };
3486
3487 /// The const version of `succ_op_iterator`.
3488 struct const_succ_op_iterator
3489 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3490 std::random_access_iterator_tag,
3491 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3492 const BasicBlock *> {
3493 explicit const_succ_op_iterator(const_value_op_iterator I)
3494 : iterator_adaptor_base(I) {}
3495
3496 const BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
3497 const BasicBlock *operator->() const { return operator*(); }
3498 };
3499
3500 static BranchInst *Create(BasicBlock *IfTrue,
3501 BasicBlock::iterator InsertBefore) {
3502 return new(1) BranchInst(IfTrue, InsertBefore);
3503 }
3504
3505 static BranchInst *Create(BasicBlock *IfTrue,
3506 Instruction *InsertBefore = nullptr) {
3507 return new(1) BranchInst(IfTrue, InsertBefore);
3508 }
3509
3510 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3511 Value *Cond, BasicBlock::iterator InsertBefore) {
3512 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3513 }
3514
3515 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3516 Value *Cond, Instruction *InsertBefore = nullptr) {
3517 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3518 }
3519
3520 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3521 return new(1) BranchInst(IfTrue, InsertAtEnd);
3522 }
3523
3524 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3525 Value *Cond, BasicBlock *InsertAtEnd) {
3526 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3527 }
3528
3529 /// Transparently provide more efficient getOperand methods.
3530 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3531
3532 bool isUnconditional() const { return getNumOperands() == 1; }
3533 bool isConditional() const { return getNumOperands() == 3; }
3534
3535 Value *getCondition() const {
3536 assert(isConditional() && "Cannot get condition of an uncond branch!");
3537 return Op<-3>();
3538 }
3539
3540 void setCondition(Value *V) {
3541 assert(isConditional() && "Cannot set condition of unconditional branch!");
3542 Op<-3>() = V;
3543 }
3544
3545 unsigned getNumSuccessors() const { return 1+isConditional(); }
3546
3547 BasicBlock *getSuccessor(unsigned i) const {
3548 assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
3549 return cast_or_null<BasicBlock>(Val: (&Op<-1>() - i)->get());
3550 }
3551
3552 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3553 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
3554 *(&Op<-1>() - idx) = NewSucc;
3555 }
3556
3557 /// Swap the successors of this branch instruction.
3558 ///
3559 /// Swaps the successors of the branch instruction. This also swaps any
3560 /// branch weight metadata associated with the instruction so that it
3561 /// continues to map correctly to each operand.
3562 void swapSuccessors();
3563
3564 iterator_range<succ_op_iterator> successors() {
3565 return make_range(
3566 x: succ_op_iterator(std::next(x: value_op_begin(), n: isConditional() ? 1 : 0)),
3567 y: succ_op_iterator(value_op_end()));
3568 }
3569
3570 iterator_range<const_succ_op_iterator> successors() const {
3571 return make_range(x: const_succ_op_iterator(
3572 std::next(x: value_op_begin(), n: isConditional() ? 1 : 0)),
3573 y: const_succ_op_iterator(value_op_end()));
3574 }
3575
3576 // Methods for support type inquiry through isa, cast, and dyn_cast:
3577 static bool classof(const Instruction *I) {
3578 return (I->getOpcode() == Instruction::Br);
3579 }
3580 static bool classof(const Value *V) {
3581 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3582 }
3583};
3584
3585template <>
3586struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3587};
3588
3589DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
3590
3591//===----------------------------------------------------------------------===//
3592// SwitchInst Class
3593//===----------------------------------------------------------------------===//
3594
3595//===---------------------------------------------------------------------------
3596/// Multiway switch
3597///
3598class SwitchInst : public Instruction {
3599 unsigned ReservedSpace;
3600
3601 // Operand[0] = Value to switch on
3602 // Operand[1] = Default basic block destination
3603 // Operand[2n ] = Value to match
3604 // Operand[2n+1] = BasicBlock to go to on match
3605 SwitchInst(const SwitchInst &SI);
3606
3607 /// Create a new switch instruction, specifying a value to switch on and a
3608 /// default destination. The number of additional cases can be specified here
3609 /// to make memory allocation more efficient. This constructor can also
3610 /// auto-insert before another instruction.
3611 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3612 BasicBlock::iterator InsertBefore);
3613
3614 /// Create a new switch instruction, specifying a value to switch on and a
3615 /// default destination. The number of additional cases can be specified here
3616 /// to make memory allocation more efficient. This constructor can also
3617 /// auto-insert before another instruction.
3618 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3619 Instruction *InsertBefore);
3620
3621 /// Create a new switch instruction, specifying a value to switch on and a
3622 /// default destination. The number of additional cases can be specified here
3623 /// to make memory allocation more efficient. This constructor also
3624 /// auto-inserts at the end of the specified BasicBlock.
3625 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3626 BasicBlock *InsertAtEnd);
3627
3628 // allocate space for exactly zero operands
3629 void *operator new(size_t S) { return User::operator new(Size: S); }
3630
3631 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3632 void growOperands();
3633
3634protected:
3635 // Note: Instruction needs to be a friend here to call cloneImpl.
3636 friend class Instruction;
3637
3638 SwitchInst *cloneImpl() const;
3639
3640public:
3641 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
3642
3643 // -2
3644 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3645
3646 template <typename CaseHandleT> class CaseIteratorImpl;
3647
3648 /// A handle to a particular switch case. It exposes a convenient interface
3649 /// to both the case value and the successor block.
3650 ///
3651 /// We define this as a template and instantiate it to form both a const and
3652 /// non-const handle.
3653 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3654 class CaseHandleImpl {
3655 // Directly befriend both const and non-const iterators.
3656 friend class SwitchInst::CaseIteratorImpl<
3657 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3658
3659 protected:
3660 // Expose the switch type we're parameterized with to the iterator.
3661 using SwitchInstType = SwitchInstT;
3662
3663 SwitchInstT *SI;
3664 ptrdiff_t Index;
3665
3666 CaseHandleImpl() = default;
3667 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3668
3669 public:
3670 /// Resolves case value for current case.
3671 ConstantIntT *getCaseValue() const {
3672 assert((unsigned)Index < SI->getNumCases() &&
3673 "Index out the number of cases.");
3674 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3675 }
3676
3677 /// Resolves successor for current case.
3678 BasicBlockT *getCaseSuccessor() const {
3679 assert(((unsigned)Index < SI->getNumCases() ||
3680 (unsigned)Index == DefaultPseudoIndex) &&
3681 "Index out the number of cases.");
3682 return SI->getSuccessor(getSuccessorIndex());
3683 }
3684
3685 /// Returns number of current case.
3686 unsigned getCaseIndex() const { return Index; }
3687
3688 /// Returns successor index for current case successor.
3689 unsigned getSuccessorIndex() const {
3690 assert(((unsigned)Index == DefaultPseudoIndex ||
3691 (unsigned)Index < SI->getNumCases()) &&
3692 "Index out the number of cases.");
3693 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3694 }
3695
3696 bool operator==(const CaseHandleImpl &RHS) const {
3697 assert(SI == RHS.SI && "Incompatible operators.");
3698 return Index == RHS.Index;
3699 }
3700 };
3701
3702 using ConstCaseHandle =
3703 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3704
3705 class CaseHandle
3706 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3707 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3708
3709 public:
3710 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3711
3712 /// Sets the new value for current case.
3713 void setValue(ConstantInt *V) const {
3714 assert((unsigned)Index < SI->getNumCases() &&
3715 "Index out the number of cases.");
3716 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3717 }
3718
3719 /// Sets the new successor for current case.
3720 void setSuccessor(BasicBlock *S) const {
3721 SI->setSuccessor(idx: getSuccessorIndex(), NewSucc: S);
3722 }
3723 };
3724
3725 template <typename CaseHandleT>
3726 class CaseIteratorImpl
3727 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3728 std::random_access_iterator_tag,
3729 const CaseHandleT> {
3730 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3731
3732 CaseHandleT Case;
3733
3734 public:
3735 /// Default constructed iterator is in an invalid state until assigned to
3736 /// a case for a particular switch.
3737 CaseIteratorImpl() = default;
3738
3739 /// Initializes case iterator for given SwitchInst and for given
3740 /// case number.
3741 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3742
3743 /// Initializes case iterator for given SwitchInst and for given
3744 /// successor index.
3745 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3746 unsigned SuccessorIndex) {
3747 assert(SuccessorIndex < SI->getNumSuccessors() &&
3748 "Successor index # out of range!");
3749 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3750 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3751 }
3752
3753 /// Support converting to the const variant. This will be a no-op for const
3754 /// variant.
3755 operator CaseIteratorImpl<ConstCaseHandle>() const {
3756 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3757 }
3758
3759 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3760 // Check index correctness after addition.
3761 // Note: Index == getNumCases() means end().
3762 assert(Case.Index + N >= 0 &&
3763 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
3764 "Case.Index out the number of cases.");
3765 Case.Index += N;
3766 return *this;
3767 }
3768 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3769 // Check index correctness after subtraction.
3770 // Note: Case.Index == getNumCases() means end().
3771 assert(Case.Index - N >= 0 &&
3772 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
3773 "Case.Index out the number of cases.");
3774 Case.Index -= N;
3775 return *this;
3776 }
3777 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3778 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3779 return Case.Index - RHS.Case.Index;
3780 }
3781 bool operator==(const CaseIteratorImpl &RHS) const {
3782 return Case == RHS.Case;
3783 }
3784 bool operator<(const CaseIteratorImpl &RHS) const {
3785 assert(Case.SI == RHS.Case.SI && "Incompatible operators.");
3786 return Case.Index < RHS.Case.Index;
3787 }
3788 const CaseHandleT &operator*() const { return Case; }
3789 };
3790
3791 using CaseIt = CaseIteratorImpl<CaseHandle>;
3792 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3793
3794 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3795 unsigned NumCases,
3796 BasicBlock::iterator InsertBefore) {
3797 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3798 }
3799
3800 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3801 unsigned NumCases,
3802 Instruction *InsertBefore = nullptr) {
3803 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3804 }
3805
3806 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3807 unsigned NumCases, BasicBlock *InsertAtEnd) {
3808 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3809 }
3810
3811 /// Provide fast operand accessors
3812 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
3813
3814 // Accessor Methods for Switch stmt
3815 Value *getCondition() const { return getOperand(0); }
3816 void setCondition(Value *V) { setOperand(0, V); }
3817
3818 BasicBlock *getDefaultDest() const {
3819 return cast<BasicBlock>(Val: getOperand(1));
3820 }
3821
3822 /// Returns true if the default branch must result in immediate undefined
3823 /// behavior, false otherwise.
3824 bool defaultDestUndefined() const {
3825 return isa<UnreachableInst>(Val: getDefaultDest()->getFirstNonPHIOrDbg());
3826 }
3827
3828 void setDefaultDest(BasicBlock *DefaultCase) {
3829 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3830 }
3831
3832 /// Return the number of 'cases' in this switch instruction, excluding the
3833 /// default case.
3834 unsigned getNumCases() const {
3835 return getNumOperands()/2 - 1;
3836 }
3837
3838 /// Returns a read/write iterator that points to the first case in the
3839 /// SwitchInst.
3840 CaseIt case_begin() {
3841 return CaseIt(this, 0);
3842 }
3843
3844 /// Returns a read-only iterator that points to the first case in the
3845 /// SwitchInst.
3846 ConstCaseIt case_begin() const {
3847 return ConstCaseIt(this, 0);
3848 }
3849
3850 /// Returns a read/write iterator that points one past the last in the
3851 /// SwitchInst.
3852 CaseIt case_end() {
3853 return CaseIt(this, getNumCases());
3854 }
3855
3856 /// Returns a read-only iterator that points one past the last in the
3857 /// SwitchInst.
3858 ConstCaseIt case_end() const {
3859 return ConstCaseIt(this, getNumCases());
3860 }
3861
3862 /// Iteration adapter for range-for loops.
3863 iterator_range<CaseIt> cases() {
3864 return make_range(x: case_begin(), y: case_end());
3865 }
3866
3867 /// Constant iteration adapter for range-for loops.
3868 iterator_range<ConstCaseIt> cases() const {
3869 return make_range(x: case_begin(), y: case_end());
3870 }
3871
3872 /// Returns an iterator that points to the default case.
3873 /// Note: this iterator allows to resolve successor only. Attempt
3874 /// to resolve case value causes an assertion.
3875 /// Also note, that increment and decrement also causes an assertion and
3876 /// makes iterator invalid.
3877 CaseIt case_default() {
3878 return CaseIt(this, DefaultPseudoIndex);
3879 }
3880 ConstCaseIt case_default() const {
3881 return ConstCaseIt(this, DefaultPseudoIndex);
3882 }
3883
3884 /// Search all of the case values for the specified constant. If it is
3885 /// explicitly handled, return the case iterator of it, otherwise return
3886 /// default case iterator to indicate that it is handled by the default
3887 /// handler.
3888 CaseIt findCaseValue(const ConstantInt *C) {
3889 return CaseIt(
3890 this,
3891 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3892 }
3893 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3894 ConstCaseIt I = llvm::find_if(Range: cases(), P: [C](const ConstCaseHandle &Case) {
3895 return Case.getCaseValue() == C;
3896 });
3897 if (I != case_end())
3898 return I;
3899
3900 return case_default();
3901 }
3902
3903 /// Finds the unique case value for a given successor. Returns null if the
3904 /// successor is not found, not unique, or is the default case.
3905 ConstantInt *findCaseDest(BasicBlock *BB) {
3906 if (BB == getDefaultDest())
3907 return nullptr;
3908
3909 ConstantInt *CI = nullptr;
3910 for (auto Case : cases()) {
3911 if (Case.getCaseSuccessor() != BB)
3912 continue;
3913
3914 if (CI)
3915 return nullptr; // Multiple cases lead to BB.
3916
3917 CI = Case.getCaseValue();
3918 }
3919
3920 return CI;
3921 }
3922
3923 /// Add an entry to the switch instruction.
3924 /// Note:
3925 /// This action invalidates case_end(). Old case_end() iterator will
3926 /// point to the added case.
3927 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3928
3929 /// This method removes the specified case and its successor from the switch
3930 /// instruction. Note that this operation may reorder the remaining cases at
3931 /// index idx and above.
3932 /// Note:
3933 /// This action invalidates iterators for all cases following the one removed,
3934 /// including the case_end() iterator. It returns an iterator for the next
3935 /// case.
3936 CaseIt removeCase(CaseIt I);
3937
3938 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3939 BasicBlock *getSuccessor(unsigned idx) const {
3940 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
3941 return cast<BasicBlock>(Val: getOperand(idx*2+1));
3942 }
3943 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3944 assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
3945 setOperand(idx * 2 + 1, NewSucc);
3946 }
3947
3948 // Methods for support type inquiry through isa, cast, and dyn_cast:
3949 static bool classof(const Instruction *I) {
3950 return I->getOpcode() == Instruction::Switch;
3951 }
3952 static bool classof(const Value *V) {
3953 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
3954 }
3955};
3956
3957/// A wrapper class to simplify modification of SwitchInst cases along with
3958/// their prof branch_weights metadata.
3959class SwitchInstProfUpdateWrapper {
3960 SwitchInst &SI;
3961 std::optional<SmallVector<uint32_t, 8>> Weights;
3962 bool Changed = false;
3963
3964protected:
3965 MDNode *buildProfBranchWeightsMD();
3966
3967 void init();
3968
3969public:
3970 using CaseWeightOpt = std::optional<uint32_t>;
3971 SwitchInst *operator->() { return &SI; }
3972 SwitchInst &operator*() { return SI; }
3973 operator SwitchInst *() { return &SI; }
3974
3975 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3976
3977 ~SwitchInstProfUpdateWrapper() {
3978 if (Changed)
3979 SI.setMetadata(KindID: LLVMContext::MD_prof, Node: buildProfBranchWeightsMD());
3980 }
3981
3982 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3983 /// correspondent branch weight.
3984 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3985
3986 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3987 /// specified branch weight for the added case.
3988 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3989
3990 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3991 /// this object to not touch the underlying SwitchInst in destructor.
3992 Instruction::InstListType::iterator eraseFromParent();
3993
3994 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3995 CaseWeightOpt getSuccessorWeight(unsigned idx);
3996
3997 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3998};
3999
4000template <>
4001struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
4002};
4003
4004DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
4005
4006//===----------------------------------------------------------------------===//
4007// IndirectBrInst Class
4008//===----------------------------------------------------------------------===//
4009
4010//===---------------------------------------------------------------------------
4011/// Indirect Branch Instruction.
4012///
4013class IndirectBrInst : public Instruction {
4014 unsigned ReservedSpace;
4015
4016 // Operand[0] = Address to jump to
4017 // Operand[n+1] = n-th destination
4018 IndirectBrInst(const IndirectBrInst &IBI);
4019
4020 /// Create a new indirectbr instruction, specifying an
4021 /// Address to jump to. The number of expected destinations can be specified
4022 /// here to make memory allocation more efficient. This constructor can also
4023 /// autoinsert before another instruction.
4024 IndirectBrInst(Value *Address, unsigned NumDests,
4025 BasicBlock::iterator InsertBefore);
4026
4027 /// Create a new indirectbr instruction, specifying an
4028 /// Address to jump to. The number of expected destinations can be specified
4029 /// here to make memory allocation more efficient. This constructor can also
4030 /// autoinsert before another instruction.
4031 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
4032
4033 /// Create a new indirectbr instruction, specifying an
4034 /// Address to jump to. The number of expected destinations can be specified
4035 /// here to make memory allocation more efficient. This constructor also
4036 /// autoinserts at the end of the specified BasicBlock.
4037 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
4038
4039 // allocate space for exactly zero operands
4040 void *operator new(size_t S) { return User::operator new(Size: S); }
4041
4042 void init(Value *Address, unsigned NumDests);
4043 void growOperands();
4044
4045protected:
4046 // Note: Instruction needs to be a friend here to call cloneImpl.
4047 friend class Instruction;
4048
4049 IndirectBrInst *cloneImpl() const;
4050
4051public:
4052 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
4053
4054 /// Iterator type that casts an operand to a basic block.
4055 ///
4056 /// This only makes sense because the successors are stored as adjacent
4057 /// operands for indirectbr instructions.
4058 struct succ_op_iterator
4059 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
4060 std::random_access_iterator_tag, BasicBlock *,
4061 ptrdiff_t, BasicBlock *, BasicBlock *> {
4062 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
4063
4064 BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
4065 BasicBlock *operator->() const { return operator*(); }
4066 };
4067
4068 /// The const version of `succ_op_iterator`.
4069 struct const_succ_op_iterator
4070 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
4071 std::random_access_iterator_tag,
4072 const BasicBlock *, ptrdiff_t, const BasicBlock *,
4073 const BasicBlock *> {
4074 explicit const_succ_op_iterator(const_value_op_iterator I)
4075 : iterator_adaptor_base(I) {}
4076
4077 const BasicBlock *operator*() const { return cast<BasicBlock>(Val: *I); }
4078 const BasicBlock *operator->() const { return operator*(); }
4079 };
4080
4081 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
4082 BasicBlock::iterator InsertBefore) {
4083 return new IndirectBrInst(Address, NumDests, InsertBefore);
4084 }
4085
4086 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
4087 Instruction *InsertBefore = nullptr) {
4088 return new IndirectBrInst(Address, NumDests, InsertBefore);
4089 }
4090
4091 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
4092 BasicBlock *InsertAtEnd) {
4093 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
4094 }
4095
4096 /// Provide fast operand accessors.
4097 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4098
4099 // Accessor Methods for IndirectBrInst instruction.
4100 Value *getAddress() { return getOperand(0); }
4101 const Value *getAddress() const { return getOperand(0); }
4102 void setAddress(Value *V) { setOperand(0, V); }
4103
4104 /// return the number of possible destinations in this
4105 /// indirectbr instruction.
4106 unsigned getNumDestinations() const { return getNumOperands()-1; }
4107
4108 /// Return the specified destination.
4109 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
4110 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
4111
4112 /// Add a destination.
4113 ///
4114 void addDestination(BasicBlock *Dest);
4115
4116 /// This method removes the specified successor from the
4117 /// indirectbr instruction.
4118 void removeDestination(unsigned i);
4119
4120 unsigned getNumSuccessors() const { return getNumOperands()-1; }
4121 BasicBlock *getSuccessor(unsigned i) const {
4122 return cast<BasicBlock>(Val: getOperand(i+1));
4123 }
4124 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4125 setOperand(i + 1, NewSucc);
4126 }
4127
4128 iterator_range<succ_op_iterator> successors() {
4129 return make_range(x: succ_op_iterator(std::next(x: value_op_begin())),
4130 y: succ_op_iterator(value_op_end()));
4131 }
4132
4133 iterator_range<const_succ_op_iterator> successors() const {
4134 return make_range(x: const_succ_op_iterator(std::next(x: value_op_begin())),
4135 y: const_succ_op_iterator(value_op_end()));
4136 }
4137
4138 // Methods for support type inquiry through isa, cast, and dyn_cast:
4139 static bool classof(const Instruction *I) {
4140 return I->getOpcode() == Instruction::IndirectBr;
4141 }
4142 static bool classof(const Value *V) {
4143 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4144 }
4145};
4146
4147template <>
4148struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
4149};
4150
4151DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
4152
4153//===----------------------------------------------------------------------===//
4154// InvokeInst Class
4155//===----------------------------------------------------------------------===//
4156
4157/// Invoke instruction. The SubclassData field is used to hold the
4158/// calling convention of the call.
4159///
4160class InvokeInst : public CallBase {
4161 /// The number of operands for this call beyond the called function,
4162 /// arguments, and operand bundles.
4163 static constexpr int NumExtraOperands = 2;
4164
4165 /// The index from the end of the operand array to the normal destination.
4166 static constexpr int NormalDestOpEndIdx = -3;
4167
4168 /// The index from the end of the operand array to the unwind destination.
4169 static constexpr int UnwindDestOpEndIdx = -2;
4170
4171 InvokeInst(const InvokeInst &BI);
4172
4173 /// Construct an InvokeInst given a range of arguments.
4174 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4175 BasicBlock *IfException, ArrayRef<Value *> Args,
4176 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4177 const Twine &NameStr, BasicBlock::iterator InsertBefore);
4178
4179 /// Construct an InvokeInst given a range of arguments.
4180 ///
4181 /// Construct an InvokeInst from a range of arguments
4182 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4183 BasicBlock *IfException, ArrayRef<Value *> Args,
4184 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4185 const Twine &NameStr, Instruction *InsertBefore);
4186
4187 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4188 BasicBlock *IfException, ArrayRef<Value *> Args,
4189 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4190 const Twine &NameStr, BasicBlock *InsertAtEnd);
4191
4192 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4193 BasicBlock *IfException, ArrayRef<Value *> Args,
4194 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4195
4196 /// Compute the number of operands to allocate.
4197 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
4198 // We need one operand for the called function, plus our extra operands and
4199 // the input operand counts provided.
4200 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
4201 }
4202
4203protected:
4204 // Note: Instruction needs to be a friend here to call cloneImpl.
4205 friend class Instruction;
4206
4207 InvokeInst *cloneImpl() const;
4208
4209public:
4210 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4211 BasicBlock *IfException, ArrayRef<Value *> Args,
4212 const Twine &NameStr,
4213 BasicBlock::iterator InsertBefore) {
4214 int NumOperands = ComputeNumOperands(NumArgs: Args.size());
4215 return new (NumOperands)
4216 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
4217 NumOperands, NameStr, InsertBefore);
4218 }
4219
4220 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4221 BasicBlock *IfException, ArrayRef<Value *> Args,
4222 const Twine &NameStr,
4223 Instruction *InsertBefore = nullptr) {
4224 int NumOperands = ComputeNumOperands(NumArgs: Args.size());
4225 return new (NumOperands)
4226 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
4227 NumOperands, NameStr, InsertBefore);
4228 }
4229
4230 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4231 BasicBlock *IfException, ArrayRef<Value *> Args,
4232 ArrayRef<OperandBundleDef> Bundles,
4233 const Twine &NameStr,
4234 BasicBlock::iterator InsertBefore) {
4235 int NumOperands =
4236 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
4237 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4238
4239 return new (NumOperands, DescriptorBytes)
4240 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
4241 NameStr, InsertBefore);
4242 }
4243
4244 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4245 BasicBlock *IfException, ArrayRef<Value *> Args,
4246 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4247 const Twine &NameStr = "",
4248 Instruction *InsertBefore = nullptr) {
4249 int NumOperands =
4250 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
4251 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4252
4253 return new (NumOperands, DescriptorBytes)
4254 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
4255 NameStr, InsertBefore);
4256 }
4257
4258 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4259 BasicBlock *IfException, ArrayRef<Value *> Args,
4260 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4261 int NumOperands = ComputeNumOperands(NumArgs: Args.size());
4262 return new (NumOperands)
4263 InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt,
4264 NumOperands, NameStr, InsertAtEnd);
4265 }
4266
4267 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4268 BasicBlock *IfException, ArrayRef<Value *> Args,
4269 ArrayRef<OperandBundleDef> Bundles,
4270 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4271 int NumOperands =
4272 ComputeNumOperands(NumArgs: Args.size(), NumBundleInputs: CountBundleInputs(Bundles));
4273 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4274
4275 return new (NumOperands, DescriptorBytes)
4276 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
4277 NameStr, InsertAtEnd);
4278 }
4279
4280 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
4281 BasicBlock *IfException, ArrayRef<Value *> Args,
4282 const Twine &NameStr,
4283 BasicBlock::iterator InsertBefore) {
4284 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
4285 IfException, Args, Bundles: std::nullopt, NameStr, InsertBefore);
4286 }
4287
4288 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
4289 BasicBlock *IfException, ArrayRef<Value *> Args,
4290 const Twine &NameStr,
4291 Instruction *InsertBefore = nullptr) {
4292 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
4293 IfException, Args, Bundles: std::nullopt, NameStr, InsertBefore);
4294 }
4295
4296 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
4297 BasicBlock *IfException, ArrayRef<Value *> Args,
4298 ArrayRef<OperandBundleDef> Bundles,
4299 const Twine &NameStr,
4300 BasicBlock::iterator InsertBefore) {
4301 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
4302 IfException, Args, Bundles, NameStr, InsertBefore);
4303 }
4304
4305 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
4306 BasicBlock *IfException, ArrayRef<Value *> Args,
4307 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4308 const Twine &NameStr = "",
4309 Instruction *InsertBefore = nullptr) {
4310 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
4311 IfException, Args, Bundles, NameStr, InsertBefore);
4312 }
4313
4314 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
4315 BasicBlock *IfException, ArrayRef<Value *> Args,
4316 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4317 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
4318 IfException, Args, NameStr, InsertAtEnd);
4319 }
4320
4321 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
4322 BasicBlock *IfException, ArrayRef<Value *> Args,
4323 ArrayRef<OperandBundleDef> Bundles,
4324 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4325 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), IfNormal,
4326 IfException, Args, Bundles, NameStr, InsertAtEnd);
4327 }
4328
4329 /// Create a clone of \p II with a different set of operand bundles and
4330 /// insert it before \p InsertPt.
4331 ///
4332 /// The returned invoke instruction is identical to \p II in every way except
4333 /// that the operand bundles for the new instruction are set to the operand
4334 /// bundles in \p Bundles.
4335 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
4336 BasicBlock::iterator InsertPt);
4337 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
4338 Instruction *InsertPt = nullptr);
4339
4340 // get*Dest - Return the destination basic blocks...
4341 BasicBlock *getNormalDest() const {
4342 return cast<BasicBlock>(Val: Op<NormalDestOpEndIdx>());
4343 }
4344 BasicBlock *getUnwindDest() const {
4345 return cast<BasicBlock>(Val: Op<UnwindDestOpEndIdx>());
4346 }
4347 void setNormalDest(BasicBlock *B) {
4348 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
4349 }
4350 void setUnwindDest(BasicBlock *B) {
4351 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
4352 }
4353
4354 /// Get the landingpad instruction from the landing pad
4355 /// block (the unwind destination).
4356 LandingPadInst *getLandingPadInst() const;
4357
4358 BasicBlock *getSuccessor(unsigned i) const {
4359 assert(i < 2 && "Successor # out of range for invoke!");
4360 return i == 0 ? getNormalDest() : getUnwindDest();
4361 }
4362
4363 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4364 assert(i < 2 && "Successor # out of range for invoke!");
4365 if (i == 0)
4366 setNormalDest(NewSucc);
4367 else
4368 setUnwindDest(NewSucc);
4369 }
4370
4371 unsigned getNumSuccessors() const { return 2; }
4372
4373 // Methods for support type inquiry through isa, cast, and dyn_cast:
4374 static bool classof(const Instruction *I) {
4375 return (I->getOpcode() == Instruction::Invoke);
4376 }
4377 static bool classof(const Value *V) {
4378 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4379 }
4380
4381private:
4382 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4383 // method so that subclasses cannot accidentally use it.
4384 template <typename Bitfield>
4385 void setSubclassData(typename Bitfield::Type Value) {
4386 Instruction::setSubclassData<Bitfield>(Value);
4387 }
4388};
4389
4390InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4391 BasicBlock *IfException, ArrayRef<Value *> Args,
4392 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4393 const Twine &NameStr, BasicBlock::iterator InsertBefore)
4394 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4395 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4396 InsertBefore) {
4397 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4398}
4399
4400InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4401 BasicBlock *IfException, ArrayRef<Value *> Args,
4402 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4403 const Twine &NameStr, Instruction *InsertBefore)
4404 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4405 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4406 InsertBefore) {
4407 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4408}
4409
4410InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
4411 BasicBlock *IfException, ArrayRef<Value *> Args,
4412 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4413 const Twine &NameStr, BasicBlock *InsertAtEnd)
4414 : CallBase(Ty->getReturnType(), Instruction::Invoke,
4415 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4416 InsertAtEnd) {
4417 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
4418}
4419
4420//===----------------------------------------------------------------------===//
4421// CallBrInst Class
4422//===----------------------------------------------------------------------===//
4423
4424/// CallBr instruction, tracking function calls that may not return control but
4425/// instead transfer it to a third location. The SubclassData field is used to
4426/// hold the calling convention of the call.
4427///
4428class CallBrInst : public CallBase {
4429
4430 unsigned NumIndirectDests;
4431
4432 CallBrInst(const CallBrInst &BI);
4433
4434 /// Construct a CallBrInst given a range of arguments.
4435 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4436 ArrayRef<BasicBlock *> IndirectDests,
4437 ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
4438 int NumOperands, const Twine &NameStr,
4439 BasicBlock::iterator InsertBefore);
4440
4441 /// Construct a CallBrInst given a range of arguments.
4442 ///
4443 /// Construct a CallBrInst from a range of arguments
4444 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4445 ArrayRef<BasicBlock *> IndirectDests,
4446 ArrayRef<Value *> Args,
4447 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4448 const Twine &NameStr, Instruction *InsertBefore);
4449
4450 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4451 ArrayRef<BasicBlock *> IndirectDests,
4452 ArrayRef<Value *> Args,
4453 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4454 const Twine &NameStr, BasicBlock *InsertAtEnd);
4455
4456 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
4457 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4458 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
4459
4460 /// Compute the number of operands to allocate.
4461 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4462 int NumBundleInputs = 0) {
4463 // We need one operand for the called function, plus our extra operands and
4464 // the input operand counts provided.
4465 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4466 }
4467
4468protected:
4469 // Note: Instruction needs to be a friend here to call cloneImpl.
4470 friend class Instruction;
4471
4472 CallBrInst *cloneImpl() const;
4473
4474public:
4475 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4476 BasicBlock *DefaultDest,
4477 ArrayRef<BasicBlock *> IndirectDests,
4478 ArrayRef<Value *> Args, const Twine &NameStr,
4479 BasicBlock::iterator InsertBefore) {
4480 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size());
4481 return new (NumOperands)
4482 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4483 NumOperands, NameStr, InsertBefore);
4484 }
4485
4486 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4487 BasicBlock *DefaultDest,
4488 ArrayRef<BasicBlock *> IndirectDests,
4489 ArrayRef<Value *> Args, const Twine &NameStr,
4490 Instruction *InsertBefore = nullptr) {
4491 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size());
4492 return new (NumOperands)
4493 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4494 NumOperands, NameStr, InsertBefore);
4495 }
4496
4497 static CallBrInst *
4498 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4499 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4500 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
4501 BasicBlock::iterator InsertBefore) {
4502 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size(),
4503 NumBundleInputs: CountBundleInputs(Bundles));
4504 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4505
4506 return new (NumOperands, DescriptorBytes)
4507 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4508 NumOperands, NameStr, InsertBefore);
4509 }
4510
4511 static CallBrInst *
4512 Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4513 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
4514 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4515 const Twine &NameStr = "", Instruction *InsertBefore = nullptr) {
4516 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size(),
4517 NumBundleInputs: CountBundleInputs(Bundles));
4518 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4519
4520 return new (NumOperands, DescriptorBytes)
4521 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4522 NumOperands, NameStr, InsertBefore);
4523 }
4524
4525 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4526 BasicBlock *DefaultDest,
4527 ArrayRef<BasicBlock *> IndirectDests,
4528 ArrayRef<Value *> Args, const Twine &NameStr,
4529 BasicBlock *InsertAtEnd) {
4530 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size());
4531 return new (NumOperands)
4532 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt,
4533 NumOperands, NameStr, InsertAtEnd);
4534 }
4535
4536 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4537 BasicBlock *DefaultDest,
4538 ArrayRef<BasicBlock *> IndirectDests,
4539 ArrayRef<Value *> Args,
4540 ArrayRef<OperandBundleDef> Bundles,
4541 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4542 int NumOperands = ComputeNumOperands(NumArgs: Args.size(), NumIndirectDests: IndirectDests.size(),
4543 NumBundleInputs: CountBundleInputs(Bundles));
4544 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4545
4546 return new (NumOperands, DescriptorBytes)
4547 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4548 NumOperands, NameStr, InsertAtEnd);
4549 }
4550
4551 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4552 ArrayRef<BasicBlock *> IndirectDests,
4553 ArrayRef<Value *> Args, const Twine &NameStr,
4554 BasicBlock::iterator InsertBefore) {
4555 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4556 IndirectDests, Args, NameStr, InsertBefore);
4557 }
4558
4559 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4560 ArrayRef<BasicBlock *> IndirectDests,
4561 ArrayRef<Value *> Args, const Twine &NameStr,
4562 Instruction *InsertBefore = nullptr) {
4563 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4564 IndirectDests, Args, NameStr, InsertBefore);
4565 }
4566
4567 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4568 ArrayRef<BasicBlock *> IndirectDests,
4569 ArrayRef<Value *> Args,
4570 ArrayRef<OperandBundleDef> Bundles,
4571 const Twine &NameStr,
4572 BasicBlock::iterator InsertBefore) {
4573 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4574 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4575 }
4576
4577 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4578 ArrayRef<BasicBlock *> IndirectDests,
4579 ArrayRef<Value *> Args,
4580 ArrayRef<OperandBundleDef> Bundles = std::nullopt,
4581 const Twine &NameStr = "",
4582 Instruction *InsertBefore = nullptr) {
4583 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4584 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4585 }
4586
4587 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4588 ArrayRef<BasicBlock *> IndirectDests,
4589 ArrayRef<Value *> Args, const Twine &NameStr,
4590 BasicBlock *InsertAtEnd) {
4591 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4592 IndirectDests, Args, NameStr, InsertAtEnd);
4593 }
4594
4595 static CallBrInst *Create(FunctionCallee Func,
4596 BasicBlock *DefaultDest,
4597 ArrayRef<BasicBlock *> IndirectDests,
4598 ArrayRef<Value *> Args,
4599 ArrayRef<OperandBundleDef> Bundles,
4600 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4601 return Create(Ty: Func.getFunctionType(), Func: Func.getCallee(), DefaultDest,
4602 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4603 }
4604
4605 /// Create a clone of \p CBI with a different set of operand bundles and
4606 /// insert it before \p InsertPt.
4607 ///
4608 /// The returned callbr instruction is identical to \p CBI in every way
4609 /// except that the operand bundles for the new instruction are set to the
4610 /// operand bundles in \p Bundles.
4611 static CallBrInst *Create(CallBrInst *CBI, ArrayRef<OperandBundleDef> Bundles,
4612 BasicBlock::iterator InsertPt);
4613 static CallBrInst *Create(CallBrInst *CBI,
4614 ArrayRef<OperandBundleDef> Bundles,
4615 Instruction *InsertPt = nullptr);
4616
4617 /// Return the number of callbr indirect dest labels.
4618 ///
4619 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4620
4621 /// getIndirectDestLabel - Return the i-th indirect dest label.
4622 ///
4623 Value *getIndirectDestLabel(unsigned i) const {
4624 assert(i < getNumIndirectDests() && "Out of bounds!");
4625 return getOperand(i_nocapture: i + arg_size() + getNumTotalBundleOperands() + 1);
4626 }
4627
4628 Value *getIndirectDestLabelUse(unsigned i) const {
4629 assert(i < getNumIndirectDests() && "Out of bounds!");
4630 return getOperandUse(i: i + arg_size() + getNumTotalBundleOperands() + 1);
4631 }
4632
4633 // Return the destination basic blocks...
4634 BasicBlock *getDefaultDest() const {
4635 return cast<BasicBlock>(Val: *(&Op<-1>() - getNumIndirectDests() - 1));
4636 }
4637 BasicBlock *getIndirectDest(unsigned i) const {
4638 return cast_or_null<BasicBlock>(Val: *(&Op<-1>() - getNumIndirectDests() + i));
4639 }
4640 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4641 SmallVector<BasicBlock *, 16> IndirectDests;
4642 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4643 IndirectDests.push_back(Elt: getIndirectDest(i));
4644 return IndirectDests;
4645 }
4646 void setDefaultDest(BasicBlock *B) {
4647 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4648 }
4649 void setIndirectDest(unsigned i, BasicBlock *B) {
4650 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4651 }
4652
4653 BasicBlock *getSuccessor(unsigned i) const {
4654 assert(i < getNumSuccessors() + 1 &&
4655 "Successor # out of range for callbr!");
4656 return i == 0 ? getDefaultDest() : getIndirectDest(i: i - 1);
4657 }
4658
4659 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4660 assert(i < getNumIndirectDests() + 1 &&
4661 "Successor # out of range for callbr!");
4662 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i: i - 1, B: NewSucc);
4663 }
4664
4665 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4666
4667 // Methods for support type inquiry through isa, cast, and dyn_cast:
4668 static bool classof(const Instruction *I) {
4669 return (I->getOpcode() == Instruction::CallBr);
4670 }
4671 static bool classof(const Value *V) {
4672 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4673 }
4674
4675private:
4676 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4677 // method so that subclasses cannot accidentally use it.
4678 template <typename Bitfield>
4679 void setSubclassData(typename Bitfield::Type Value) {
4680 Instruction::setSubclassData<Bitfield>(Value);
4681 }
4682};
4683
4684CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4685 ArrayRef<BasicBlock *> IndirectDests,
4686 ArrayRef<Value *> Args,
4687 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4688 const Twine &NameStr, BasicBlock::iterator InsertBefore)
4689 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4690 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4691 InsertBefore) {
4692 init(FTy: Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4693}
4694
4695CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4696 ArrayRef<BasicBlock *> IndirectDests,
4697 ArrayRef<Value *> Args,
4698 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4699 const Twine &NameStr, Instruction *InsertBefore)
4700 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4701 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4702 InsertBefore) {
4703 init(FTy: Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4704}
4705
4706CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4707 ArrayRef<BasicBlock *> IndirectDests,
4708 ArrayRef<Value *> Args,
4709 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4710 const Twine &NameStr, BasicBlock *InsertAtEnd)
4711 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4712 OperandTraits<CallBase>::op_end(U: this) - NumOperands, NumOperands,
4713 InsertAtEnd) {
4714 init(FTy: Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4715}
4716
4717//===----------------------------------------------------------------------===//
4718// ResumeInst Class
4719//===----------------------------------------------------------------------===//
4720
4721//===---------------------------------------------------------------------------
4722/// Resume the propagation of an exception.
4723///
4724class ResumeInst : public Instruction {
4725 ResumeInst(const ResumeInst &RI);
4726
4727 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4728 explicit ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore);
4729 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4730
4731protected:
4732 // Note: Instruction needs to be a friend here to call cloneImpl.
4733 friend class Instruction;
4734
4735 ResumeInst *cloneImpl() const;
4736
4737public:
4738 static ResumeInst *Create(Value *Exn, BasicBlock::iterator InsertBefore) {
4739 return new (1) ResumeInst(Exn, InsertBefore);
4740 }
4741
4742 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4743 return new(1) ResumeInst(Exn, InsertBefore);
4744 }
4745
4746 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4747 return new(1) ResumeInst(Exn, InsertAtEnd);
4748 }
4749
4750 /// Provide fast operand accessors
4751 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4752
4753 /// Convenience accessor.
4754 Value *getValue() const { return Op<0>(); }
4755
4756 unsigned getNumSuccessors() const { return 0; }
4757
4758 // Methods for support type inquiry through isa, cast, and dyn_cast:
4759 static bool classof(const Instruction *I) {
4760 return I->getOpcode() == Instruction::Resume;
4761 }
4762 static bool classof(const Value *V) {
4763 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4764 }
4765
4766private:
4767 BasicBlock *getSuccessor(unsigned idx) const {
4768 llvm_unreachable("ResumeInst has no successors!");
4769 }
4770
4771 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4772 llvm_unreachable("ResumeInst has no successors!");
4773 }
4774};
4775
4776template <>
4777struct OperandTraits<ResumeInst> :
4778 public FixedNumOperandTraits<ResumeInst, 1> {
4779};
4780
4781DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
4782
4783//===----------------------------------------------------------------------===//
4784// CatchSwitchInst Class
4785//===----------------------------------------------------------------------===//
4786class CatchSwitchInst : public Instruction {
4787 using UnwindDestField = BoolBitfieldElementT<0>;
4788
4789 /// The number of operands actually allocated. NumOperands is
4790 /// the number actually in use.
4791 unsigned ReservedSpace;
4792
4793 // Operand[0] = Outer scope
4794 // Operand[1] = Unwind block destination
4795 // Operand[n] = BasicBlock to go to on match
4796 CatchSwitchInst(const CatchSwitchInst &CSI);
4797
4798 /// Create a new switch instruction, specifying a
4799 /// default destination. The number of additional handlers can be specified
4800 /// here to make memory allocation more efficient.
4801 /// This constructor can also autoinsert before another instruction.
4802 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4803 unsigned NumHandlers, const Twine &NameStr,
4804 BasicBlock::iterator InsertBefore);
4805
4806 /// Create a new switch instruction, specifying a
4807 /// default destination. The number of additional handlers can be specified
4808 /// here to make memory allocation more efficient.
4809 /// This constructor can also autoinsert before another instruction.
4810 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4811 unsigned NumHandlers, const Twine &NameStr,
4812 Instruction *InsertBefore);
4813
4814 /// Create a new switch instruction, specifying a
4815 /// default destination. The number of additional handlers can be specified
4816 /// here to make memory allocation more efficient.
4817 /// This constructor also autoinserts at the end of the specified BasicBlock.
4818 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4819 unsigned NumHandlers, const Twine &NameStr,
4820 BasicBlock *InsertAtEnd);
4821
4822 // allocate space for exactly zero operands
4823 void *operator new(size_t S) { return User::operator new(Size: S); }
4824
4825 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4826 void growOperands(unsigned Size);
4827
4828protected:
4829 // Note: Instruction needs to be a friend here to call cloneImpl.
4830 friend class Instruction;
4831
4832 CatchSwitchInst *cloneImpl() const;
4833
4834public:
4835 void operator delete(void *Ptr) { return User::operator delete(Usr: Ptr); }
4836
4837 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4838 unsigned NumHandlers, const Twine &NameStr,
4839 BasicBlock::iterator InsertBefore) {
4840 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4841 InsertBefore);
4842 }
4843
4844 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4845 unsigned NumHandlers,
4846 const Twine &NameStr = "",
4847 Instruction *InsertBefore = nullptr) {
4848 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4849 InsertBefore);
4850 }
4851
4852 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4853 unsigned NumHandlers, const Twine &NameStr,
4854 BasicBlock *InsertAtEnd) {
4855 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4856 InsertAtEnd);
4857 }
4858
4859 /// Provide fast operand accessors
4860 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
4861
4862 // Accessor Methods for CatchSwitch stmt
4863 Value *getParentPad() const { return getOperand(0); }
4864 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4865
4866 // Accessor Methods for CatchSwitch stmt
4867 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4868 bool unwindsToCaller() const { return !hasUnwindDest(); }
4869 BasicBlock *getUnwindDest() const {
4870 if (hasUnwindDest())
4871 return cast<BasicBlock>(Val: getOperand(1));
4872 return nullptr;
4873 }
4874 void setUnwindDest(BasicBlock *UnwindDest) {
4875 assert(UnwindDest);
4876 assert(hasUnwindDest());
4877 setOperand(1, UnwindDest);
4878 }
4879
4880 /// return the number of 'handlers' in this catchswitch
4881 /// instruction, except the default handler
4882 unsigned getNumHandlers() const {
4883 if (hasUnwindDest())
4884 return getNumOperands() - 2;
4885 return getNumOperands() - 1;
4886 }
4887
4888private:
4889 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(Val: V); }
4890 static const BasicBlock *handler_helper(const Value *V) {
4891 return cast<BasicBlock>(Val: V);
4892 }
4893
4894public:
4895 using DerefFnTy = BasicBlock *(*)(Value *);
4896 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4897 using handler_range = iterator_range<handler_iterator>;
4898 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4899 using const_handler_iterator =
4900 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4901 using const_handler_range = iterator_range<const_handler_iterator>;
4902
4903 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4904 handler_iterator handler_begin() {
4905 op_iterator It = op_begin() + 1;
4906 if (hasUnwindDest())
4907 ++It;
4908 return handler_iterator(It, DerefFnTy(handler_helper));
4909 }
4910
4911 /// Returns an iterator that points to the first handler in the
4912 /// CatchSwitchInst.
4913 const_handler_iterator handler_begin() const {
4914 const_op_iterator It = op_begin() + 1;
4915 if (hasUnwindDest())
4916 ++It;
4917 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4918 }
4919
4920 /// Returns a read-only iterator that points one past the last
4921 /// handler in the CatchSwitchInst.
4922 handler_iterator handler_end() {
4923 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4924 }
4925
4926 /// Returns an iterator that points one past the last handler in the
4927 /// CatchSwitchInst.
4928 const_handler_iterator handler_end() const {
4929 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4930 }
4931
4932 /// iteration adapter for range-for loops.
4933 handler_range handlers() {
4934 return make_range(x: handler_begin(), y: handler_end());
4935 }
4936
4937 /// iteration adapter for range-for loops.
4938 const_handler_range handlers() const {
4939 return make_range(x: handler_begin(), y: handler_end());
4940 }
4941
4942 /// Add an entry to the switch instruction...
4943 /// Note:
4944 /// This action invalidates handler_end(). Old handler_end() iterator will
4945 /// point to the added handler.
4946 void addHandler(BasicBlock *Dest);
4947
4948 void removeHandler(handler_iterator HI);
4949
4950 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4951 BasicBlock *getSuccessor(unsigned Idx) const {
4952 assert(Idx < getNumSuccessors() &&
4953 "Successor # out of range for catchswitch!");
4954 return cast<BasicBlock>(Val: getOperand(Idx + 1));
4955 }
4956 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4957 assert(Idx < getNumSuccessors() &&
4958 "Successor # out of range for catchswitch!");
4959 setOperand(Idx + 1, NewSucc);
4960 }
4961
4962 // Methods for support type inquiry through isa, cast, and dyn_cast:
4963 static bool classof(const Instruction *I) {
4964 return I->getOpcode() == Instruction::CatchSwitch;
4965 }
4966 static bool classof(const Value *V) {
4967 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
4968 }
4969};
4970
4971template <>
4972struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4973
4974DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)
4975
4976//===----------------------------------------------------------------------===//
4977// CleanupPadInst Class
4978//===----------------------------------------------------------------------===//
4979class CleanupPadInst : public FuncletPadInst {
4980private:
4981 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4982 unsigned Values, const Twine &NameStr,
4983 BasicBlock::iterator InsertBefore)
4984 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4985 NameStr, InsertBefore) {}
4986 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4987 unsigned Values, const Twine &NameStr,
4988 Instruction *InsertBefore)
4989 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4990 NameStr, InsertBefore) {}
4991 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4992 unsigned Values, const Twine &NameStr,
4993 BasicBlock *InsertAtEnd)
4994 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4995 NameStr, InsertAtEnd) {}
4996
4997public:
4998 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4999 const Twine &NameStr,
5000 BasicBlock::iterator InsertBefore) {
5001 unsigned Values = 1 + Args.size();
5002 return new (Values)
5003 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
5004 }
5005
5006 static CleanupPadInst *Create(Value *ParentPad,
5007 ArrayRef<Value *> Args = std::nullopt,
5008 const Twine &NameStr = "",
5009 Instruction *InsertBefore = nullptr) {
5010 unsigned Values = 1 + Args.size();
5011 return new (Values)
5012 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
5013 }
5014
5015 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
5016 const Twine &NameStr, BasicBlock *InsertAtEnd) {
5017 unsigned Values = 1 + Args.size();
5018 return new (Values)
5019 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
5020 }
5021
5022 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5023 static bool classof(const Instruction *I) {
5024 return I->getOpcode() == Instruction::CleanupPad;
5025 }
5026 static bool classof(const Value *V) {
5027 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5028 }
5029};
5030
5031//===----------------------------------------------------------------------===//
5032// CatchPadInst Class
5033//===----------------------------------------------------------------------===//
5034class CatchPadInst : public FuncletPadInst {
5035private:
5036 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
5037 unsigned Values, const Twine &NameStr,
5038 BasicBlock::iterator InsertBefore)
5039 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
5040 NameStr, InsertBefore) {}
5041 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
5042 unsigned Values, const Twine &NameStr,
5043 Instruction *InsertBefore)
5044 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
5045 NameStr, InsertBefore) {}
5046 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
5047 unsigned Values, const Twine &NameStr,
5048 BasicBlock *InsertAtEnd)
5049 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
5050 NameStr, InsertAtEnd) {}
5051
5052public:
5053 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
5054 const Twine &NameStr,
5055 BasicBlock::iterator InsertBefore) {
5056 unsigned Values = 1 + Args.size();
5057 return new (Values)
5058 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
5059 }
5060
5061 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
5062 const Twine &NameStr = "",
5063 Instruction *InsertBefore = nullptr) {
5064 unsigned Values = 1 + Args.size();
5065 return new (Values)
5066 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
5067 }
5068
5069 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
5070 const Twine &NameStr, BasicBlock *InsertAtEnd) {
5071 unsigned Values = 1 + Args.size();
5072 return new (Values)
5073 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
5074 }
5075
5076 /// Convenience accessors
5077 CatchSwitchInst *getCatchSwitch() const {
5078 return cast<CatchSwitchInst>(Val: Op<-1>());
5079 }
5080 void setCatchSwitch(Value *CatchSwitch) {
5081 assert(CatchSwitch);
5082 Op<-1>() = CatchSwitch;
5083 }
5084
5085 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5086 static bool classof(const Instruction *I) {
5087 return I->getOpcode() == Instruction::CatchPad;
5088 }
5089 static bool classof(const Value *V) {
5090 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5091 }
5092};
5093
5094//===----------------------------------------------------------------------===//
5095// CatchReturnInst Class
5096//===----------------------------------------------------------------------===//
5097
5098class CatchReturnInst : public Instruction {
5099 CatchReturnInst(const CatchReturnInst &RI);
5100 CatchReturnInst(Value *CatchPad, BasicBlock *BB,
5101 BasicBlock::iterator InsertBefore);
5102 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
5103 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
5104
5105 void init(Value *CatchPad, BasicBlock *BB);
5106
5107protected:
5108 // Note: Instruction needs to be a friend here to call cloneImpl.
5109 friend class Instruction;
5110
5111 CatchReturnInst *cloneImpl() const;
5112
5113public:
5114 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
5115 BasicBlock::iterator InsertBefore) {
5116 assert(CatchPad);
5117 assert(BB);
5118 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
5119 }
5120
5121 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
5122 Instruction *InsertBefore = nullptr) {
5123 assert(CatchPad);
5124 assert(BB);
5125 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
5126 }
5127
5128 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
5129 BasicBlock *InsertAtEnd) {
5130 assert(CatchPad);
5131 assert(BB);
5132 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
5133 }
5134
5135 /// Provide fast operand accessors
5136 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
5137
5138 /// Convenience accessors.
5139 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Val: Op<0>()); }
5140 void setCatchPad(CatchPadInst *CatchPad) {
5141 assert(CatchPad);
5142 Op<0>() = CatchPad;
5143 }
5144
5145 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Val: Op<1>()); }
5146 void setSuccessor(BasicBlock *NewSucc) {
5147 assert(NewSucc);
5148 Op<1>() = NewSucc;
5149 }
5150 unsigned getNumSuccessors() const { return 1; }
5151
5152 /// Get the parentPad of this catchret's catchpad's catchswitch.
5153 /// The successor block is implicitly a member of this funclet.
5154 Value *getCatchSwitchParentPad() const {
5155 return getCatchPad()->getCatchSwitch()->getParentPad();
5156 }
5157
5158 // Methods for support type inquiry through isa, cast, and dyn_cast:
5159 static bool classof(const Instruction *I) {
5160 return (I->getOpcode() == Instruction::CatchRet);
5161 }
5162 static bool classof(const Value *V) {
5163 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5164 }
5165
5166private:
5167 BasicBlock *getSuccessor(unsigned Idx) const {
5168 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
5169 return getSuccessor();
5170 }
5171
5172 void setSuccessor(unsigned Idx, BasicBlock *B) {
5173 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
5174 setSuccessor(B);
5175 }
5176};
5177
5178template <>
5179struct OperandTraits<CatchReturnInst>
5180 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
5181
5182DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
5183
5184//===----------------------------------------------------------------------===//
5185// CleanupReturnInst Class
5186//===----------------------------------------------------------------------===//
5187
5188class CleanupReturnInst : public Instruction {
5189 using UnwindDestField = BoolBitfieldElementT<0>;
5190
5191private:
5192 CleanupReturnInst(const CleanupReturnInst &RI);
5193 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
5194 BasicBlock::iterator InsertBefore);
5195 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
5196 Instruction *InsertBefore = nullptr);
5197 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
5198 BasicBlock *InsertAtEnd);
5199
5200 void init(Value *CleanupPad, BasicBlock *UnwindBB);
5201
5202protected:
5203 // Note: Instruction needs to be a friend here to call cloneImpl.
5204 friend class Instruction;
5205
5206 CleanupReturnInst *cloneImpl() const;
5207
5208public:
5209 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
5210 BasicBlock::iterator InsertBefore) {
5211 assert(CleanupPad);
5212 unsigned Values = 1;
5213 if (UnwindBB)
5214 ++Values;
5215 return new (Values)
5216 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
5217 }
5218
5219 static CleanupReturnInst *Create(Value *CleanupPad,
5220 BasicBlock *UnwindBB = nullptr,
5221 Instruction *InsertBefore = nullptr) {
5222 assert(CleanupPad);
5223 unsigned Values = 1;
5224 if (UnwindBB)
5225 ++Values;
5226 return new (Values)
5227 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
5228 }
5229
5230 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
5231 BasicBlock *InsertAtEnd) {
5232 assert(CleanupPad);
5233 unsigned Values = 1;
5234 if (UnwindBB)
5235 ++Values;
5236 return new (Values)
5237 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
5238 }
5239
5240 /// Provide fast operand accessors
5241 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
5242
5243 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
5244 bool unwindsToCaller() const { return !hasUnwindDest(); }
5245
5246 /// Convenience accessor.
5247 CleanupPadInst *getCleanupPad() const {
5248 return cast<CleanupPadInst>(Val: Op<0>());
5249 }
5250 void setCleanupPad(CleanupPadInst *CleanupPad) {
5251 assert(CleanupPad);
5252 Op<0>() = CleanupPad;
5253 }
5254
5255 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
5256
5257 BasicBlock *getUnwindDest() const {
5258 return hasUnwindDest() ? cast<BasicBlock>(Val: Op<1>()) : nullptr;
5259 }
5260 void setUnwindDest(BasicBlock *NewDest) {
5261 assert(NewDest);
5262 assert(hasUnwindDest());
5263 Op<1>() = NewDest;
5264 }
5265
5266 // Methods for support type inquiry through isa, cast, and dyn_cast:
5267 static bool classof(const Instruction *I) {
5268 return (I->getOpcode() == Instruction::CleanupRet);
5269 }
5270 static bool classof(const Value *V) {
5271 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5272 }
5273
5274private:
5275 BasicBlock *getSuccessor(unsigned Idx) const {
5276 assert(Idx == 0);
5277 return getUnwindDest();
5278 }
5279
5280 void setSuccessor(unsigned Idx, BasicBlock *B) {
5281 assert(Idx == 0);
5282 setUnwindDest(B);
5283 }
5284
5285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
5286 // method so that subclasses cannot accidentally use it.
5287 template <typename Bitfield>
5288 void setSubclassData(typename Bitfield::Type Value) {
5289 Instruction::setSubclassData<Bitfield>(Value);
5290 }
5291};
5292
5293template <>
5294struct OperandTraits<CleanupReturnInst>
5295 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
5296
5297DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
5298
5299//===----------------------------------------------------------------------===//
5300// UnreachableInst Class
5301//===----------------------------------------------------------------------===//
5302
5303//===---------------------------------------------------------------------------
5304/// This function has undefined behavior. In particular, the
5305/// presence of this instruction indicates some higher level knowledge that the
5306/// end of the block cannot be reached.
5307///
5308class UnreachableInst : public Instruction {
5309protected:
5310 // Note: Instruction needs to be a friend here to call cloneImpl.
5311 friend class Instruction;
5312
5313 UnreachableInst *cloneImpl() const;
5314
5315public:
5316 explicit UnreachableInst(LLVMContext &C, BasicBlock::iterator InsertBefore);
5317 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
5318 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
5319
5320 // allocate space for exactly zero operands
5321 void *operator new(size_t S) { return User::operator new(Size: S, Us: 0); }
5322 void operator delete(void *Ptr) { User::operator delete(Usr: Ptr); }
5323
5324 unsigned getNumSuccessors() const { return 0; }
5325
5326 // Methods for support type inquiry through isa, cast, and dyn_cast:
5327 static bool classof(const Instruction *I) {
5328 return I->getOpcode() == Instruction::Unreachable;
5329 }
5330 static bool classof(const Value *V) {
5331 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5332 }
5333
5334private:
5335 BasicBlock *getSuccessor(unsigned idx) const {
5336 llvm_unreachable("UnreachableInst has no successors!");
5337 }
5338
5339 void setSuccessor(unsigned idx, BasicBlock *B) {
5340 llvm_unreachable("UnreachableInst has no successors!");
5341 }
5342};
5343
5344//===----------------------------------------------------------------------===//
5345// TruncInst Class
5346//===----------------------------------------------------------------------===//
5347
5348/// This class represents a truncation of integer types.
5349class TruncInst : public CastInst {
5350protected:
5351 // Note: Instruction needs to be a friend here to call cloneImpl.
5352 friend class Instruction;
5353
5354 /// Clone an identical TruncInst
5355 TruncInst *cloneImpl() const;
5356
5357public:
5358 enum { AnyWrap = 0, NoUnsignedWrap = (1 << 0), NoSignedWrap = (1 << 1) };
5359
5360 /// Constructor with insert-before-instruction semantics
5361 TruncInst(
5362 Value *S, ///< The value to be truncated
5363 Type *Ty, ///< The (smaller) type to truncate to
5364 const Twine &NameStr, ///< A name for the new instruction
5365 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5366 );
5367
5368 /// Constructor with insert-before-instruction semantics
5369 TruncInst(
5370 Value *S, ///< The value to be truncated
5371 Type *Ty, ///< The (smaller) type to truncate to
5372 const Twine &NameStr = "", ///< A name for the new instruction
5373 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5374 );
5375
5376 /// Constructor with insert-at-end-of-block semantics
5377 TruncInst(
5378 Value *S, ///< The value to be truncated
5379 Type *Ty, ///< The (smaller) type to truncate to
5380 const Twine &NameStr, ///< A name for the new instruction
5381 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5382 );
5383
5384 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5385 static bool classof(const Instruction *I) {
5386 return I->getOpcode() == Trunc;
5387 }
5388 static bool classof(const Value *V) {
5389 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5390 }
5391
5392 void setHasNoUnsignedWrap(bool B) {
5393 SubclassOptionalData =
5394 (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
5395 }
5396 void setHasNoSignedWrap(bool B) {
5397 SubclassOptionalData =
5398 (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
5399 }
5400
5401 /// Test whether this operation is known to never
5402 /// undergo unsigned overflow, aka the nuw property.
5403 bool hasNoUnsignedWrap() const {
5404 return SubclassOptionalData & NoUnsignedWrap;
5405 }
5406
5407 /// Test whether this operation is known to never
5408 /// undergo signed overflow, aka the nsw property.
5409 bool hasNoSignedWrap() const {
5410 return (SubclassOptionalData & NoSignedWrap) != 0;
5411 }
5412
5413 /// Returns the no-wrap kind of the operation.
5414 unsigned getNoWrapKind() const {
5415 unsigned NoWrapKind = 0;
5416 if (hasNoUnsignedWrap())
5417 NoWrapKind |= NoUnsignedWrap;
5418
5419 if (hasNoSignedWrap())
5420 NoWrapKind |= NoSignedWrap;
5421
5422 return NoWrapKind;
5423 }
5424};
5425
5426//===----------------------------------------------------------------------===//
5427// ZExtInst Class
5428//===----------------------------------------------------------------------===//
5429
5430/// This class represents zero extension of integer types.
5431class ZExtInst : public CastInst {
5432protected:
5433 // Note: Instruction needs to be a friend here to call cloneImpl.
5434 friend class Instruction;
5435
5436 /// Clone an identical ZExtInst
5437 ZExtInst *cloneImpl() const;
5438
5439public:
5440 /// Constructor with insert-before-instruction semantics
5441 ZExtInst(
5442 Value *S, ///< The value to be zero extended
5443 Type *Ty, ///< The type to zero extend to
5444 const Twine &NameStr, ///< A name for the new instruction
5445 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5446 );
5447
5448 /// Constructor with insert-before-instruction semantics
5449 ZExtInst(
5450 Value *S, ///< The value to be zero extended
5451 Type *Ty, ///< The type to zero extend to
5452 const Twine &NameStr = "", ///< A name for the new instruction
5453 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5454 );
5455
5456 /// Constructor with insert-at-end semantics.
5457 ZExtInst(
5458 Value *S, ///< The value to be zero extended
5459 Type *Ty, ///< The type to zero extend to
5460 const Twine &NameStr, ///< A name for the new instruction
5461 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5462 );
5463
5464 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5465 static bool classof(const Instruction *I) {
5466 return I->getOpcode() == ZExt;
5467 }
5468 static bool classof(const Value *V) {
5469 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5470 }
5471};
5472
5473//===----------------------------------------------------------------------===//
5474// SExtInst Class
5475//===----------------------------------------------------------------------===//
5476
5477/// This class represents a sign extension of integer types.
5478class SExtInst : public CastInst {
5479protected:
5480 // Note: Instruction needs to be a friend here to call cloneImpl.
5481 friend class Instruction;
5482
5483 /// Clone an identical SExtInst
5484 SExtInst *cloneImpl() const;
5485
5486public:
5487 /// Constructor with insert-before-instruction semantics
5488 SExtInst(
5489 Value *S, ///< The value to be sign extended
5490 Type *Ty, ///< The type to sign extend to
5491 const Twine &NameStr, ///< A name for the new instruction
5492 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5493 );
5494
5495 /// Constructor with insert-before-instruction semantics
5496 SExtInst(
5497 Value *S, ///< The value to be sign extended
5498 Type *Ty, ///< The type to sign extend to
5499 const Twine &NameStr = "", ///< A name for the new instruction
5500 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5501 );
5502
5503 /// Constructor with insert-at-end-of-block semantics
5504 SExtInst(
5505 Value *S, ///< The value to be sign extended
5506 Type *Ty, ///< The type to sign extend to
5507 const Twine &NameStr, ///< A name for the new instruction
5508 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5509 );
5510
5511 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5512 static bool classof(const Instruction *I) {
5513 return I->getOpcode() == SExt;
5514 }
5515 static bool classof(const Value *V) {
5516 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5517 }
5518};
5519
5520//===----------------------------------------------------------------------===//
5521// FPTruncInst Class
5522//===----------------------------------------------------------------------===//
5523
5524/// This class represents a truncation of floating point types.
5525class FPTruncInst : public CastInst {
5526protected:
5527 // Note: Instruction needs to be a friend here to call cloneImpl.
5528 friend class Instruction;
5529
5530 /// Clone an identical FPTruncInst
5531 FPTruncInst *cloneImpl() const;
5532
5533public:
5534 /// Constructor with insert-before-instruction semantics
5535 FPTruncInst(
5536 Value *S, ///< The value to be truncated
5537 Type *Ty, ///< The type to truncate to
5538 const Twine &NameStr, ///< A name for the new instruction
5539 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5540 );
5541
5542 /// Constructor with insert-before-instruction semantics
5543 FPTruncInst(
5544 Value *S, ///< The value to be truncated
5545 Type *Ty, ///< The type to truncate to
5546 const Twine &NameStr = "", ///< A name for the new instruction
5547 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5548 );
5549
5550 /// Constructor with insert-before-instruction semantics
5551 FPTruncInst(
5552 Value *S, ///< The value to be truncated
5553 Type *Ty, ///< The type to truncate to
5554 const Twine &NameStr, ///< A name for the new instruction
5555 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5556 );
5557
5558 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5559 static bool classof(const Instruction *I) {
5560 return I->getOpcode() == FPTrunc;
5561 }
5562 static bool classof(const Value *V) {
5563 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5564 }
5565};
5566
5567//===----------------------------------------------------------------------===//
5568// FPExtInst Class
5569//===----------------------------------------------------------------------===//
5570
5571/// This class represents an extension of floating point types.
5572class FPExtInst : public CastInst {
5573protected:
5574 // Note: Instruction needs to be a friend here to call cloneImpl.
5575 friend class Instruction;
5576
5577 /// Clone an identical FPExtInst
5578 FPExtInst *cloneImpl() const;
5579
5580public:
5581 /// Constructor with insert-before-instruction semantics
5582 FPExtInst(
5583 Value *S, ///< The value to be extended
5584 Type *Ty, ///< The type to extend to
5585 const Twine &NameStr, ///< A name for the new instruction
5586 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5587 );
5588
5589 /// Constructor with insert-before-instruction semantics
5590 FPExtInst(
5591 Value *S, ///< The value to be extended
5592 Type *Ty, ///< The type to extend to
5593 const Twine &NameStr = "", ///< A name for the new instruction
5594 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5595 );
5596
5597 /// Constructor with insert-at-end-of-block semantics
5598 FPExtInst(
5599 Value *S, ///< The value to be extended
5600 Type *Ty, ///< The type to extend to
5601 const Twine &NameStr, ///< A name for the new instruction
5602 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5603 );
5604
5605 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5606 static bool classof(const Instruction *I) {
5607 return I->getOpcode() == FPExt;
5608 }
5609 static bool classof(const Value *V) {
5610 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5611 }
5612};
5613
5614//===----------------------------------------------------------------------===//
5615// UIToFPInst Class
5616//===----------------------------------------------------------------------===//
5617
5618/// This class represents a cast unsigned integer to floating point.
5619class UIToFPInst : public CastInst {
5620protected:
5621 // Note: Instruction needs to be a friend here to call cloneImpl.
5622 friend class Instruction;
5623
5624 /// Clone an identical UIToFPInst
5625 UIToFPInst *cloneImpl() const;
5626
5627public:
5628 /// Constructor with insert-before-instruction semantics
5629 UIToFPInst(
5630 Value *S, ///< The value to be converted
5631 Type *Ty, ///< The type to convert to
5632 const Twine &NameStr, ///< A name for the new instruction
5633 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5634 );
5635
5636 /// Constructor with insert-before-instruction semantics
5637 UIToFPInst(
5638 Value *S, ///< The value to be converted
5639 Type *Ty, ///< The type to convert to
5640 const Twine &NameStr = "", ///< A name for the new instruction
5641 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5642 );
5643
5644 /// Constructor with insert-at-end-of-block semantics
5645 UIToFPInst(
5646 Value *S, ///< The value to be converted
5647 Type *Ty, ///< The type to convert to
5648 const Twine &NameStr, ///< A name for the new instruction
5649 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5650 );
5651
5652 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5653 static bool classof(const Instruction *I) {
5654 return I->getOpcode() == UIToFP;
5655 }
5656 static bool classof(const Value *V) {
5657 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5658 }
5659};
5660
5661//===----------------------------------------------------------------------===//
5662// SIToFPInst Class
5663//===----------------------------------------------------------------------===//
5664
5665/// This class represents a cast from signed integer to floating point.
5666class SIToFPInst : public CastInst {
5667protected:
5668 // Note: Instruction needs to be a friend here to call cloneImpl.
5669 friend class Instruction;
5670
5671 /// Clone an identical SIToFPInst
5672 SIToFPInst *cloneImpl() const;
5673
5674public:
5675 /// Constructor with insert-before-instruction semantics
5676 SIToFPInst(
5677 Value *S, ///< The value to be converted
5678 Type *Ty, ///< The type to convert to
5679 const Twine &NameStr, ///< A name for the new instruction
5680 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5681 );
5682
5683 /// Constructor with insert-before-instruction semantics
5684 SIToFPInst(
5685 Value *S, ///< The value to be converted
5686 Type *Ty, ///< The type to convert to
5687 const Twine &NameStr = "", ///< A name for the new instruction
5688 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5689 );
5690
5691 /// Constructor with insert-at-end-of-block semantics
5692 SIToFPInst(
5693 Value *S, ///< The value to be converted
5694 Type *Ty, ///< The type to convert to
5695 const Twine &NameStr, ///< A name for the new instruction
5696 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5697 );
5698
5699 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5700 static bool classof(const Instruction *I) {
5701 return I->getOpcode() == SIToFP;
5702 }
5703 static bool classof(const Value *V) {
5704 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5705 }
5706};
5707
5708//===----------------------------------------------------------------------===//
5709// FPToUIInst Class
5710//===----------------------------------------------------------------------===//
5711
5712/// This class represents a cast from floating point to unsigned integer
5713class FPToUIInst : public CastInst {
5714protected:
5715 // Note: Instruction needs to be a friend here to call cloneImpl.
5716 friend class Instruction;
5717
5718 /// Clone an identical FPToUIInst
5719 FPToUIInst *cloneImpl() const;
5720
5721public:
5722 /// Constructor with insert-before-instruction semantics
5723 FPToUIInst(
5724 Value *S, ///< The value to be converted
5725 Type *Ty, ///< The type to convert to
5726 const Twine &NameStr, ///< A name for the new instruction
5727 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5728 );
5729
5730 /// Constructor with insert-before-instruction semantics
5731 FPToUIInst(
5732 Value *S, ///< The value to be converted
5733 Type *Ty, ///< The type to convert to
5734 const Twine &NameStr = "", ///< A name for the new instruction
5735 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5736 );
5737
5738 /// Constructor with insert-at-end-of-block semantics
5739 FPToUIInst(
5740 Value *S, ///< The value to be converted
5741 Type *Ty, ///< The type to convert to
5742 const Twine &NameStr, ///< A name for the new instruction
5743 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5744 );
5745
5746 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5747 static bool classof(const Instruction *I) {
5748 return I->getOpcode() == FPToUI;
5749 }
5750 static bool classof(const Value *V) {
5751 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5752 }
5753};
5754
5755//===----------------------------------------------------------------------===//
5756// FPToSIInst Class
5757//===----------------------------------------------------------------------===//
5758
5759/// This class represents a cast from floating point to signed integer.
5760class FPToSIInst : public CastInst {
5761protected:
5762 // Note: Instruction needs to be a friend here to call cloneImpl.
5763 friend class Instruction;
5764
5765 /// Clone an identical FPToSIInst
5766 FPToSIInst *cloneImpl() const;
5767
5768public:
5769 /// Constructor with insert-before-instruction semantics
5770 FPToSIInst(
5771 Value *S, ///< The value to be converted
5772 Type *Ty, ///< The type to convert to
5773 const Twine &NameStr, ///< A name for the new instruction
5774 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5775 );
5776
5777 /// Constructor with insert-before-instruction semantics
5778 FPToSIInst(
5779 Value *S, ///< The value to be converted
5780 Type *Ty, ///< The type to convert to
5781 const Twine &NameStr = "", ///< A name for the new instruction
5782 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5783 );
5784
5785 /// Constructor with insert-at-end-of-block semantics
5786 FPToSIInst(
5787 Value *S, ///< The value to be converted
5788 Type *Ty, ///< The type to convert to
5789 const Twine &NameStr, ///< A name for the new instruction
5790 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5791 );
5792
5793 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5794 static bool classof(const Instruction *I) {
5795 return I->getOpcode() == FPToSI;
5796 }
5797 static bool classof(const Value *V) {
5798 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5799 }
5800};
5801
5802//===----------------------------------------------------------------------===//
5803// IntToPtrInst Class
5804//===----------------------------------------------------------------------===//
5805
5806/// This class represents a cast from an integer to a pointer.
5807class IntToPtrInst : public CastInst {
5808public:
5809 // Note: Instruction needs to be a friend here to call cloneImpl.
5810 friend class Instruction;
5811
5812 /// Constructor with insert-before-instruction semantics
5813 IntToPtrInst(
5814 Value *S, ///< The value to be converted
5815 Type *Ty, ///< The type to convert to
5816 const Twine &NameStr, ///< A name for the new instruction
5817 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5818 );
5819
5820 /// Constructor with insert-before-instruction semantics
5821 IntToPtrInst(
5822 Value *S, ///< The value to be converted
5823 Type *Ty, ///< The type to convert to
5824 const Twine &NameStr = "", ///< A name for the new instruction
5825 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5826 );
5827
5828 /// Constructor with insert-at-end-of-block semantics
5829 IntToPtrInst(
5830 Value *S, ///< The value to be converted
5831 Type *Ty, ///< The type to convert to
5832 const Twine &NameStr, ///< A name for the new instruction
5833 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5834 );
5835
5836 /// Clone an identical IntToPtrInst.
5837 IntToPtrInst *cloneImpl() const;
5838
5839 /// Returns the address space of this instruction's pointer type.
5840 unsigned getAddressSpace() const {
5841 return getType()->getPointerAddressSpace();
5842 }
5843
5844 // Methods for support type inquiry through isa, cast, and dyn_cast:
5845 static bool classof(const Instruction *I) {
5846 return I->getOpcode() == IntToPtr;
5847 }
5848 static bool classof(const Value *V) {
5849 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5850 }
5851};
5852
5853//===----------------------------------------------------------------------===//
5854// PtrToIntInst Class
5855//===----------------------------------------------------------------------===//
5856
5857/// This class represents a cast from a pointer to an integer.
5858class PtrToIntInst : public CastInst {
5859protected:
5860 // Note: Instruction needs to be a friend here to call cloneImpl.
5861 friend class Instruction;
5862
5863 /// Clone an identical PtrToIntInst.
5864 PtrToIntInst *cloneImpl() const;
5865
5866public:
5867 /// Constructor with insert-before-instruction semantics
5868 PtrToIntInst(
5869 Value *S, ///< The value to be converted
5870 Type *Ty, ///< The type to convert to
5871 const Twine &NameStr, ///< A name for the new instruction
5872 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5873 );
5874
5875 /// Constructor with insert-before-instruction semantics
5876 PtrToIntInst(
5877 Value *S, ///< The value to be converted
5878 Type *Ty, ///< The type to convert to
5879 const Twine &NameStr = "", ///< A name for the new instruction
5880 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5881 );
5882
5883 /// Constructor with insert-at-end-of-block semantics
5884 PtrToIntInst(
5885 Value *S, ///< The value to be converted
5886 Type *Ty, ///< The type to convert to
5887 const Twine &NameStr, ///< A name for the new instruction
5888 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5889 );
5890
5891 /// Gets the pointer operand.
5892 Value *getPointerOperand() { return getOperand(i_nocapture: 0); }
5893 /// Gets the pointer operand.
5894 const Value *getPointerOperand() const { return getOperand(i_nocapture: 0); }
5895 /// Gets the operand index of the pointer operand.
5896 static unsigned getPointerOperandIndex() { return 0U; }
5897
5898 /// Returns the address space of the pointer operand.
5899 unsigned getPointerAddressSpace() const {
5900 return getPointerOperand()->getType()->getPointerAddressSpace();
5901 }
5902
5903 // Methods for support type inquiry through isa, cast, and dyn_cast:
5904 static bool classof(const Instruction *I) {
5905 return I->getOpcode() == PtrToInt;
5906 }
5907 static bool classof(const Value *V) {
5908 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5909 }
5910};
5911
5912//===----------------------------------------------------------------------===//
5913// BitCastInst Class
5914//===----------------------------------------------------------------------===//
5915
5916/// This class represents a no-op cast from one type to another.
5917class BitCastInst : public CastInst {
5918protected:
5919 // Note: Instruction needs to be a friend here to call cloneImpl.
5920 friend class Instruction;
5921
5922 /// Clone an identical BitCastInst.
5923 BitCastInst *cloneImpl() const;
5924
5925public:
5926 /// Constructor with insert-before-instruction semantics
5927 BitCastInst(
5928 Value *S, ///< The value to be casted
5929 Type *Ty, ///< The type to casted to
5930 const Twine &NameStr, ///< A name for the new instruction
5931 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5932 );
5933
5934 /// Constructor with insert-before-instruction semantics
5935 BitCastInst(
5936 Value *S, ///< The value to be casted
5937 Type *Ty, ///< The type to casted to
5938 const Twine &NameStr = "", ///< A name for the new instruction
5939 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5940 );
5941
5942 /// Constructor with insert-at-end-of-block semantics
5943 BitCastInst(
5944 Value *S, ///< The value to be casted
5945 Type *Ty, ///< The type to casted to
5946 const Twine &NameStr, ///< A name for the new instruction
5947 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5948 );
5949
5950 // Methods for support type inquiry through isa, cast, and dyn_cast:
5951 static bool classof(const Instruction *I) {
5952 return I->getOpcode() == BitCast;
5953 }
5954 static bool classof(const Value *V) {
5955 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
5956 }
5957};
5958
5959//===----------------------------------------------------------------------===//
5960// AddrSpaceCastInst Class
5961//===----------------------------------------------------------------------===//
5962
5963/// This class represents a conversion between pointers from one address space
5964/// to another.
5965class AddrSpaceCastInst : public CastInst {
5966protected:
5967 // Note: Instruction needs to be a friend here to call cloneImpl.
5968 friend class Instruction;
5969
5970 /// Clone an identical AddrSpaceCastInst.
5971 AddrSpaceCastInst *cloneImpl() const;
5972
5973public:
5974 /// Constructor with insert-before-instruction semantics
5975 AddrSpaceCastInst(
5976 Value *S, ///< The value to be casted
5977 Type *Ty, ///< The type to casted to
5978 const Twine &NameStr, ///< A name for the new instruction
5979 BasicBlock::iterator InsertBefore ///< Where to insert the new instruction
5980 );
5981
5982 /// Constructor with insert-before-instruction semantics
5983 AddrSpaceCastInst(
5984 Value *S, ///< The value to be casted
5985 Type *Ty, ///< The type to casted to
5986 const Twine &NameStr = "", ///< A name for the new instruction
5987 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5988 );
5989
5990 /// Constructor with insert-at-end-of-block semantics
5991 AddrSpaceCastInst(
5992 Value *S, ///< The value to be casted
5993 Type *Ty, ///< The type to casted to
5994 const Twine &NameStr, ///< A name for the new instruction
5995 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5996 );
5997
5998 // Methods for support type inquiry through isa, cast, and dyn_cast:
5999 static bool classof(const Instruction *I) {
6000 return I->getOpcode() == AddrSpaceCast;
6001 }
6002 static bool classof(const Value *V) {
6003 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
6004 }
6005
6006 /// Gets the pointer operand.
6007 Value *getPointerOperand() {
6008 return getOperand(i_nocapture: 0);
6009 }
6010
6011 /// Gets the pointer operand.
6012 const Value *getPointerOperand() const {
6013 return getOperand(i_nocapture: 0);
6014 }
6015
6016 /// Gets the operand index of the pointer operand.
6017 static unsigned getPointerOperandIndex() {
6018 return 0U;
6019 }
6020
6021 /// Returns the address space of the pointer operand.
6022 unsigned getSrcAddressSpace() const {
6023 return getPointerOperand()->getType()->getPointerAddressSpace();
6024 }
6025
6026 /// Returns the address space of the result.
6027 unsigned getDestAddressSpace() const {
6028 return getType()->getPointerAddressSpace();
6029 }
6030};
6031
6032//===----------------------------------------------------------------------===//
6033// Helper functions
6034//===----------------------------------------------------------------------===//
6035
6036/// A helper function that returns the pointer operand of a load or store
6037/// instruction. Returns nullptr if not load or store.
6038inline const Value *getLoadStorePointerOperand(const Value *V) {
6039 if (auto *Load = dyn_cast<LoadInst>(Val: V))
6040 return Load->getPointerOperand();
6041 if (auto *Store = dyn_cast<StoreInst>(Val: V))
6042 return Store->getPointerOperand();
6043 return nullptr;
6044}
6045inline Value *getLoadStorePointerOperand(Value *V) {
6046 return const_cast<Value *>(
6047 getLoadStorePointerOperand(V: static_cast<const Value *>(V)));
6048}
6049
6050/// A helper function that returns the pointer operand of a load, store
6051/// or GEP instruction. Returns nullptr if not load, store, or GEP.
6052inline const Value *getPointerOperand(const Value *V) {
6053 if (auto *Ptr = getLoadStorePointerOperand(V))
6054 return Ptr;
6055 if (auto *Gep = dyn_cast<GetElementPtrInst>(Val: V))
6056 return Gep->getPointerOperand();
6057 return nullptr;
6058}
6059inline Value *getPointerOperand(Value *V) {
6060 return const_cast<Value *>(getPointerOperand(V: static_cast<const Value *>(V)));
6061}
6062
6063/// A helper function that returns the alignment of load or store instruction.
6064inline Align getLoadStoreAlignment(Value *I) {
6065 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6066 "Expected Load or Store instruction");
6067 if (auto *LI = dyn_cast<LoadInst>(Val: I))
6068 return LI->getAlign();
6069 return cast<StoreInst>(Val: I)->getAlign();
6070}
6071
6072/// A helper function that returns the address space of the pointer operand of
6073/// load or store instruction.
6074inline unsigned getLoadStoreAddressSpace(Value *I) {
6075 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6076 "Expected Load or Store instruction");
6077 if (auto *LI = dyn_cast<LoadInst>(Val: I))
6078 return LI->getPointerAddressSpace();
6079 return cast<StoreInst>(Val: I)->getPointerAddressSpace();
6080}
6081
6082/// A helper function that returns the type of a load or store instruction.
6083inline Type *getLoadStoreType(Value *I) {
6084 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&
6085 "Expected Load or Store instruction");
6086 if (auto *LI = dyn_cast<LoadInst>(Val: I))
6087 return LI->getType();
6088 return cast<StoreInst>(Val: I)->getValueOperand()->getType();
6089}
6090
6091/// A helper function that returns an atomic operation's sync scope; returns
6092/// std::nullopt if it is not an atomic operation.
6093inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
6094 if (!I->isAtomic())
6095 return std::nullopt;
6096 if (auto *AI = dyn_cast<LoadInst>(Val: I))
6097 return AI->getSyncScopeID();
6098 if (auto *AI = dyn_cast<StoreInst>(Val: I))
6099 return AI->getSyncScopeID();
6100 if (auto *AI = dyn_cast<FenceInst>(Val: I))
6101 return AI->getSyncScopeID();
6102 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Val: I))
6103 return AI->getSyncScopeID();
6104 if (auto *AI = dyn_cast<AtomicRMWInst>(Val: I))
6105 return AI->getSyncScopeID();
6106 llvm_unreachable("unhandled atomic operation");
6107}
6108
6109//===----------------------------------------------------------------------===//
6110// FreezeInst Class
6111//===----------------------------------------------------------------------===//
6112
6113/// This class represents a freeze function that returns random concrete
6114/// value if an operand is either a poison value or an undef value
6115class FreezeInst : public UnaryInstruction {
6116protected:
6117 // Note: Instruction needs to be a friend here to call cloneImpl.
6118 friend class Instruction;
6119
6120 /// Clone an identical FreezeInst
6121 FreezeInst *cloneImpl() const;
6122
6123public:
6124 explicit FreezeInst(Value *S, const Twine &NameStr,
6125 BasicBlock::iterator InsertBefore);
6126 explicit FreezeInst(Value *S,
6127 const Twine &NameStr = "",
6128 Instruction *InsertBefore = nullptr);
6129 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
6130
6131 // Methods for support type inquiry through isa, cast, and dyn_cast:
6132 static inline bool classof(const Instruction *I) {
6133 return I->getOpcode() == Freeze;
6134 }
6135 static inline bool classof(const Value *V) {
6136 return isa<Instruction>(Val: V) && classof(I: cast<Instruction>(Val: V));
6137 }
6138};
6139
6140} // end namespace llvm
6141
6142#endif // LLVM_IR_INSTRUCTIONS_H
6143

source code of llvm/include/llvm/IR/Instructions.h