1//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_VALUETRACKING_H
15#define LLVM_ANALYSIS_VALUETRACKING_H
16
17#include "llvm/Analysis/SimplifyQuery.h"
18#include "llvm/Analysis/WithCache.h"
19#include "llvm/IR/Constants.h"
20#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/FMF.h"
22#include "llvm/IR/InstrTypes.h"
23#include "llvm/IR/Instructions.h"
24#include "llvm/IR/Intrinsics.h"
25#include "llvm/Support/Compiler.h"
26#include <cassert>
27#include <cstdint>
28
29namespace llvm {
30
31class Operator;
32class AddOperator;
33class AssumptionCache;
34class DominatorTree;
35class GEPOperator;
36class WithOverflowInst;
37struct KnownBits;
38struct KnownFPClass;
39class Loop;
40class LoopInfo;
41class MDNode;
42class StringRef;
43class TargetLibraryInfo;
44template <typename T> class ArrayRef;
45
46constexpr unsigned MaxAnalysisRecursionDepth = 6;
47
48/// The max limit of the search depth in DecomposeGEPExpression() and
49/// getUnderlyingObject().
50constexpr unsigned MaxLookupSearchDepth = 6;
51
52/// Determine which bits of V are known to be either zero or one and return
53/// them in the KnownZero/KnownOne bit sets.
54///
55/// This function is defined on values with integer type, values with pointer
56/// type, and vectors of integers. In the case
57/// where V is a vector, the known zero and known one values are the
58/// same width as the vector element, and the bit is set only if it is true
59/// for all of the elements in the vector.
60LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known,
61 const DataLayout &DL,
62 AssumptionCache *AC = nullptr,
63 const Instruction *CxtI = nullptr,
64 const DominatorTree *DT = nullptr,
65 bool UseInstrInfo = true, unsigned Depth = 0);
66
67/// Returns the known bits rather than passing by reference.
68LLVM_ABI KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
69 AssumptionCache *AC = nullptr,
70 const Instruction *CxtI = nullptr,
71 const DominatorTree *DT = nullptr,
72 bool UseInstrInfo = true,
73 unsigned Depth = 0);
74
75/// Returns the known bits rather than passing by reference.
76LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
77 const DataLayout &DL,
78 AssumptionCache *AC = nullptr,
79 const Instruction *CxtI = nullptr,
80 const DominatorTree *DT = nullptr,
81 bool UseInstrInfo = true,
82 unsigned Depth = 0);
83
84LLVM_ABI KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
85 const SimplifyQuery &Q, unsigned Depth = 0);
86
87LLVM_ABI KnownBits computeKnownBits(const Value *V, const SimplifyQuery &Q,
88 unsigned Depth = 0);
89
90LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known,
91 const SimplifyQuery &Q, unsigned Depth = 0);
92
93/// Compute known bits from the range metadata.
94/// \p KnownZero the set of bits that are known to be zero
95/// \p KnownOne the set of bits that are known to be one
96LLVM_ABI void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
97 KnownBits &Known);
98
99/// Merge bits known from context-dependent facts into Known.
100LLVM_ABI void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
101 const SimplifyQuery &Q,
102 unsigned Depth = 0);
103
104/// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
105LLVM_ABI KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
106 const KnownBits &KnownLHS,
107 const KnownBits &KnownRHS,
108 const SimplifyQuery &SQ,
109 unsigned Depth = 0);
110
111/// Adjust \p Known for the given select \p Arm to include information from the
112/// select \p Cond.
113LLVM_ABI void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond,
114 Value *Arm, bool Invert,
115 const SimplifyQuery &Q,
116 unsigned Depth = 0);
117
118/// Return true if LHS and RHS have no common bits set.
119LLVM_ABI bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
120 const WithCache<const Value *> &RHSCache,
121 const SimplifyQuery &SQ);
122
123/// Return true if the given value is known to have exactly one bit set when
124/// defined. For vectors return true if every element is known to be a power
125/// of two when defined. Supports values with integer or pointer type and
126/// vectors of integers. If 'OrZero' is set, then return true if the given
127/// value is either a power of two or zero.
128LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
129 bool OrZero = false,
130 AssumptionCache *AC = nullptr,
131 const Instruction *CxtI = nullptr,
132 const DominatorTree *DT = nullptr,
133 bool UseInstrInfo = true,
134 unsigned Depth = 0);
135
136LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero,
137 const SimplifyQuery &Q,
138 unsigned Depth = 0);
139
140LLVM_ABI bool isOnlyUsedInZeroComparison(const Instruction *CxtI);
141
142LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
143
144/// Return true if the given value is known to be non-zero when defined. For
145/// vectors, return true if every element is known to be non-zero when
146/// defined. For pointers, if the context instruction and dominator tree are
147/// specified, perform context-sensitive analysis and return true if the
148/// pointer couldn't possibly be null at the specified instruction.
149/// Supports values with integer or pointer type and vectors of integers.
150LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q,
151 unsigned Depth = 0);
152
153/// Return true if the two given values are negation.
154/// Currently can recoginze Value pair:
155/// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
156/// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
157LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y,
158 bool NeedNSW = false, bool AllowPoison = true);
159
160/// Return true iff:
161/// 1. X is poison implies Y is poison.
162/// 2. X is true implies Y is false.
163/// 3. X is false implies Y is true.
164/// Otherwise, return false.
165LLVM_ABI bool isKnownInversion(const Value *X, const Value *Y);
166
167/// Returns true if the give value is known to be non-negative.
168LLVM_ABI bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
169 unsigned Depth = 0);
170
171/// Returns true if the given value is known be positive (i.e. non-negative
172/// and non-zero).
173LLVM_ABI bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
174 unsigned Depth = 0);
175
176/// Returns true if the given value is known be negative (i.e. non-positive
177/// and non-zero).
178LLVM_ABI bool isKnownNegative(const Value *V, const SimplifyQuery &SQ,
179 unsigned Depth = 0);
180
181/// Return true if the given values are known to be non-equal when defined.
182/// Supports scalar integer types only.
183LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2,
184 const SimplifyQuery &SQ, unsigned Depth = 0);
185
186/// Return true if 'V & Mask' is known to be zero. We use this predicate to
187/// simplify operations downstream. Mask is known to be zero for bits that V
188/// cannot have.
189///
190/// This function is defined on values with integer type, values with pointer
191/// type, and vectors of integers. In the case
192/// where V is a vector, the mask, known zero, and known one values are the
193/// same width as the vector element, and the bit is set only if it is true
194/// for all of the elements in the vector.
195LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask,
196 const SimplifyQuery &SQ, unsigned Depth = 0);
197
198/// Return the number of times the sign bit of the register is replicated into
199/// the other bits. We know that at least 1 bit is always equal to the sign
200/// bit (itself), but other cases can give us information. For example,
201/// immediately after an "ashr X, 2", we know that the top 3 bits are all
202/// equal to each other, so we return 3. For vectors, return the number of
203/// sign bits for the vector element with the mininum number of known sign
204/// bits.
205LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
206 AssumptionCache *AC = nullptr,
207 const Instruction *CxtI = nullptr,
208 const DominatorTree *DT = nullptr,
209 bool UseInstrInfo = true,
210 unsigned Depth = 0);
211
212/// Get the upper bound on bit size for this Value \p Op as a signed integer.
213/// i.e. x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
214/// Similar to the APInt::getSignificantBits function.
215LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op,
216 const DataLayout &DL,
217 AssumptionCache *AC = nullptr,
218 const Instruction *CxtI = nullptr,
219 const DominatorTree *DT = nullptr,
220 unsigned Depth = 0);
221
222/// Map a call instruction to an intrinsic ID. Libcalls which have equivalent
223/// intrinsics are treated as-if they were intrinsics.
224LLVM_ABI Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
225 const TargetLibraryInfo *TLI);
226
227/// Given an exploded icmp instruction, return true if the comparison only
228/// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
229/// the result of the comparison is true when the input value is signed.
230LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
231 bool &TrueIfSigned);
232
233/// Determine which floating-point classes are valid for \p V, and return them
234/// in KnownFPClass bit sets.
235///
236/// This function is defined on values with floating-point type, values vectors
237/// of floating-point type, and arrays of floating-point type.
238
239/// \p InterestedClasses is a compile time optimization hint for which floating
240/// point classes should be queried. Queries not specified in \p
241/// InterestedClasses should be reliable if they are determined during the
242/// query.
243LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V,
244 const APInt &DemandedElts,
245 FPClassTest InterestedClasses,
246 const SimplifyQuery &SQ,
247 unsigned Depth = 0);
248
249LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V,
250 FPClassTest InterestedClasses,
251 const SimplifyQuery &SQ,
252 unsigned Depth = 0);
253
254LLVM_ABI KnownFPClass computeKnownFPClass(
255 const Value *V, const DataLayout &DL,
256 FPClassTest InterestedClasses = fcAllFlags,
257 const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
258 const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
259 bool UseInstrInfo = true, unsigned Depth = 0);
260
261/// Wrapper to account for known fast math flags at the use instruction.
262LLVM_ABI KnownFPClass computeKnownFPClass(
263 const Value *V, const APInt &DemandedElts, FastMathFlags FMF,
264 FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth = 0);
265
266LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
267 FPClassTest InterestedClasses,
268 const SimplifyQuery &SQ,
269 unsigned Depth = 0);
270
271/// Return true if we can prove that the specified FP value is never equal to
272/// -0.0. Users should use caution when considering PreserveSign
273/// denormal-fp-math.
274LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ,
275 unsigned Depth = 0);
276
277/// Return true if we can prove that the specified FP value is either NaN or
278/// never less than -0.0.
279///
280/// NaN --> true
281/// +0 --> true
282/// -0 --> true
283/// x > +0 --> true
284/// x < -0 --> false
285LLVM_ABI bool cannotBeOrderedLessThanZero(const Value *V,
286 const SimplifyQuery &SQ,
287 unsigned Depth = 0);
288
289/// Return true if the floating-point scalar value is not an infinity or if
290/// the floating-point vector value has no infinities. Return false if a value
291/// could ever be infinity.
292LLVM_ABI bool isKnownNeverInfinity(const Value *V, const SimplifyQuery &SQ,
293 unsigned Depth = 0);
294
295/// Return true if the floating-point value can never contain a NaN or infinity.
296LLVM_ABI bool isKnownNeverInfOrNaN(const Value *V, const SimplifyQuery &SQ,
297 unsigned Depth = 0);
298
299/// Return true if the floating-point scalar value is not a NaN or if the
300/// floating-point vector value has no NaN elements. Return false if a value
301/// could ever be NaN.
302LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ,
303 unsigned Depth = 0);
304
305/// Return false if we can prove that the specified FP value's sign bit is 0.
306/// Return true if we can prove that the specified FP value's sign bit is 1.
307/// Otherwise return std::nullopt.
308LLVM_ABI std::optional<bool> computeKnownFPSignBit(const Value *V,
309 const SimplifyQuery &SQ,
310 unsigned Depth = 0);
311
312/// Return true if the sign bit of the FP value can be ignored by the user when
313/// the value is zero.
314bool canIgnoreSignBitOfZero(const Use &U);
315
316/// Return true if the sign bit of the FP value can be ignored by the user when
317/// the value is NaN.
318bool canIgnoreSignBitOfNaN(const Use &U);
319
320/// If the specified value can be set by repeating the same byte in memory,
321/// return the i8 value that it is represented with. This is true for all i8
322/// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
323/// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
324/// i16 0x1234), return null. If the value is entirely undef and padding,
325/// return undef.
326LLVM_ABI Value *isBytewiseValue(Value *V, const DataLayout &DL);
327
328/// Given an aggregate and an sequence of indices, see if the scalar value
329/// indexed is already around as a register, for example if it were inserted
330/// directly into the aggregate.
331///
332/// If InsertBefore is not empty, this function will duplicate (modified)
333/// insertvalues when a part of a nested struct is extracted.
334LLVM_ABI Value *FindInsertedValue(
335 Value *V, ArrayRef<unsigned> idx_range,
336 std::optional<BasicBlock::iterator> InsertBefore = std::nullopt);
337
338/// Analyze the specified pointer to see if it can be expressed as a base
339/// pointer plus a constant offset. Return the base and offset to the caller.
340///
341/// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
342/// creates and later unpacks the required APInt.
343inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
344 const DataLayout &DL,
345 bool AllowNonInbounds = true) {
346 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()), 0);
347 Value *Base =
348 Ptr->stripAndAccumulateConstantOffsets(DL, Offset&: OffsetAPInt, AllowNonInbounds);
349
350 Offset = OffsetAPInt.getSExtValue();
351 return Base;
352}
353inline const Value *
354GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
355 const DataLayout &DL,
356 bool AllowNonInbounds = true) {
357 return GetPointerBaseWithConstantOffset(Ptr: const_cast<Value *>(Ptr), Offset, DL,
358 AllowNonInbounds);
359}
360
361/// Returns true if the GEP is based on a pointer to a string (array of
362// \p CharSize integers) and is indexing into this string.
363LLVM_ABI bool isGEPBasedOnPointerToString(const GEPOperator *GEP,
364 unsigned CharSize = 8);
365
366/// Represents offset+length into a ConstantDataArray.
367struct ConstantDataArraySlice {
368 /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
369 /// initializer, it just doesn't fit the ConstantDataArray interface).
370 const ConstantDataArray *Array;
371
372 /// Slice starts at this Offset.
373 uint64_t Offset;
374
375 /// Length of the slice.
376 uint64_t Length;
377
378 /// Moves the Offset and adjusts Length accordingly.
379 void move(uint64_t Delta) {
380 assert(Delta < Length);
381 Offset += Delta;
382 Length -= Delta;
383 }
384
385 /// Convenience accessor for elements in the slice.
386 uint64_t operator[](unsigned I) const {
387 return Array == nullptr ? 0 : Array->getElementAsInteger(i: I + Offset);
388 }
389};
390
391/// Returns true if the value \p V is a pointer into a ConstantDataArray.
392/// If successful \p Slice will point to a ConstantDataArray info object
393/// with an appropriate offset.
394LLVM_ABI bool getConstantDataArrayInfo(const Value *V,
395 ConstantDataArraySlice &Slice,
396 unsigned ElementSize,
397 uint64_t Offset = 0);
398
399/// This function computes the length of a null-terminated C string pointed to
400/// by V. If successful, it returns true and returns the string in Str. If
401/// unsuccessful, it returns false. This does not include the trailing null
402/// character by default. If TrimAtNul is set to false, then this returns any
403/// trailing null characters as well as any other characters that come after
404/// it.
405LLVM_ABI bool getConstantStringInfo(const Value *V, StringRef &Str,
406 bool TrimAtNul = true);
407
408/// If we can compute the length of the string pointed to by the specified
409/// pointer, return 'len+1'. If we can't, return 0.
410LLVM_ABI uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
411
412/// This function returns call pointer argument that is considered the same by
413/// aliasing rules. You CAN'T use it to replace one value with another. If
414/// \p MustPreserveNullness is true, the call must preserve the nullness of
415/// the pointer.
416LLVM_ABI const Value *
417getArgumentAliasingToReturnedPointer(const CallBase *Call,
418 bool MustPreserveNullness);
419inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
420 bool MustPreserveNullness) {
421 return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
422 Call: const_cast<const CallBase *>(Call), MustPreserveNullness));
423}
424
425/// {launder,strip}.invariant.group returns pointer that aliases its argument,
426/// and it only captures pointer by returning it.
427/// These intrinsics are not marked as nocapture, because returning is
428/// considered as capture. The arguments are not marked as returned neither,
429/// because it would make it useless. If \p MustPreserveNullness is true,
430/// the intrinsic must preserve the nullness of the pointer.
431LLVM_ABI bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
432 const CallBase *Call, bool MustPreserveNullness);
433
434/// This method strips off any GEP address adjustments, pointer casts
435/// or `llvm.threadlocal.address` from the specified value \p V, returning the
436/// original object being addressed. Note that the returned value has pointer
437/// type if the specified value does. If the \p MaxLookup value is non-zero, it
438/// limits the number of instructions to be stripped off.
439LLVM_ABI const Value *
440getUnderlyingObject(const Value *V, unsigned MaxLookup = MaxLookupSearchDepth);
441inline Value *getUnderlyingObject(Value *V,
442 unsigned MaxLookup = MaxLookupSearchDepth) {
443 // Force const to avoid infinite recursion.
444 const Value *VConst = V;
445 return const_cast<Value *>(getUnderlyingObject(V: VConst, MaxLookup));
446}
447
448/// Like getUnderlyingObject(), but will try harder to find a single underlying
449/// object. In particular, this function also looks through selects and phis.
450LLVM_ABI const Value *getUnderlyingObjectAggressive(const Value *V);
451
452/// This method is similar to getUnderlyingObject except that it can
453/// look through phi and select instructions and return multiple objects.
454///
455/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
456/// accesses different objects in each iteration, we don't look through the
457/// phi node. E.g. consider this loop nest:
458///
459/// int **A;
460/// for (i)
461/// for (j) {
462/// A[i][j] = A[i-1][j] * B[j]
463/// }
464///
465/// This is transformed by Load-PRE to stash away A[i] for the next iteration
466/// of the outer loop:
467///
468/// Curr = A[0]; // Prev_0
469/// for (i: 1..N) {
470/// Prev = Curr; // Prev = PHI (Prev_0, Curr)
471/// Curr = A[i];
472/// for (j: 0..N) {
473/// Curr[j] = Prev[j] * B[j]
474/// }
475/// }
476///
477/// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
478/// should not assume that Curr and Prev share the same underlying object thus
479/// it shouldn't look through the phi above.
480LLVM_ABI void getUnderlyingObjects(const Value *V,
481 SmallVectorImpl<const Value *> &Objects,
482 const LoopInfo *LI = nullptr,
483 unsigned MaxLookup = MaxLookupSearchDepth);
484
485/// This is a wrapper around getUnderlyingObjects and adds support for basic
486/// ptrtoint+arithmetic+inttoptr sequences.
487LLVM_ABI bool getUnderlyingObjectsForCodeGen(const Value *V,
488 SmallVectorImpl<Value *> &Objects);
489
490/// Returns unique alloca where the value comes from, or nullptr.
491/// If OffsetZero is true check that V points to the begining of the alloca.
492LLVM_ABI AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
493inline const AllocaInst *findAllocaForValue(const Value *V,
494 bool OffsetZero = false) {
495 return findAllocaForValue(V: const_cast<Value *>(V), OffsetZero);
496}
497
498/// Return true if the only users of this pointer are lifetime markers.
499LLVM_ABI bool onlyUsedByLifetimeMarkers(const Value *V);
500
501/// Return true if the only users of this pointer are lifetime markers or
502/// droppable instructions.
503LLVM_ABI bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
504
505/// Return true if the instruction doesn't potentially cross vector lanes. This
506/// condition is weaker than checking that the instruction is lanewise: lanewise
507/// means that the same operation is splatted across all lanes, but we also
508/// include the case where there is a different operation on each lane, as long
509/// as the operation only uses data from that lane. An example of an operation
510/// that is not lanewise, but doesn't cross vector lanes is insertelement.
511LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I);
512
513/// Return true if the instruction does not have any effects besides
514/// calculating the result and does not have undefined behavior.
515///
516/// This method never returns true for an instruction that returns true for
517/// mayHaveSideEffects; however, this method also does some other checks in
518/// addition. It checks for undefined behavior, like dividing by zero or
519/// loading from an invalid pointer (but not for undefined results, like a
520/// shift with a shift amount larger than the width of the result). It checks
521/// for malloc and alloca because speculatively executing them might cause a
522/// memory leak. It also returns false for instructions related to control
523/// flow, specifically terminators and PHI nodes.
524///
525/// If the CtxI is specified this method performs context-sensitive analysis
526/// and returns true if it is safe to execute the instruction immediately
527/// before the CtxI. If the instruction has (transitive) operands that don't
528/// dominate CtxI, the analysis is performed under the assumption that these
529/// operands will also be speculated to a point before CxtI.
530///
531/// If the CtxI is NOT specified this method only looks at the instruction
532/// itself and its operands, so if this method returns true, it is safe to
533/// move the instruction as long as the correct dominance relationships for
534/// the operands and users hold.
535///
536/// If \p UseVariableInfo is true, the information from non-constant operands
537/// will be taken into account.
538///
539/// If \p IgnoreUBImplyingAttrs is true, UB-implying attributes will be ignored.
540/// The caller is responsible for correctly propagating them after hoisting.
541///
542/// This method can return true for instructions that read memory;
543/// for such instructions, moving them may change the resulting value.
544LLVM_ABI bool isSafeToSpeculativelyExecute(
545 const Instruction *I, const Instruction *CtxI = nullptr,
546 AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
547 const TargetLibraryInfo *TLI = nullptr, bool UseVariableInfo = true,
548 bool IgnoreUBImplyingAttrs = true);
549
550inline bool isSafeToSpeculativelyExecute(const Instruction *I,
551 BasicBlock::iterator CtxI,
552 AssumptionCache *AC = nullptr,
553 const DominatorTree *DT = nullptr,
554 const TargetLibraryInfo *TLI = nullptr,
555 bool UseVariableInfo = true,
556 bool IgnoreUBImplyingAttrs = true) {
557 // Take an iterator, and unwrap it into an Instruction *.
558 return isSafeToSpeculativelyExecute(I, CtxI: &*CtxI, AC, DT, TLI, UseVariableInfo,
559 IgnoreUBImplyingAttrs);
560}
561
562/// Don't use information from its non-constant operands. This helper is used
563/// when its operands are going to be replaced.
564inline bool isSafeToSpeculativelyExecuteWithVariableReplaced(
565 const Instruction *I, bool IgnoreUBImplyingAttrs = true) {
566 return isSafeToSpeculativelyExecute(I, CtxI: nullptr, AC: nullptr, DT: nullptr, TLI: nullptr,
567 /*UseVariableInfo=*/UseVariableInfo: false,
568 IgnoreUBImplyingAttrs);
569}
570
571/// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
572/// the actual opcode of Inst. If the provided and actual opcode differ, the
573/// function (virtually) overrides the opcode of Inst with the provided
574/// Opcode. There are come constraints in this case:
575/// * If Opcode has a fixed number of operands (eg, as binary operators do),
576/// then Inst has to have at least as many leading operands. The function
577/// will ignore all trailing operands beyond that number.
578/// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
579/// do), then all operands are considered.
580/// * The virtual instruction has to satisfy all typing rules of the provided
581/// Opcode.
582/// * This function is pessimistic in the following sense: If one actually
583/// materialized the virtual instruction, then isSafeToSpeculativelyExecute
584/// may say that the materialized instruction is speculatable whereas this
585/// function may have said that the instruction wouldn't be speculatable.
586/// This behavior is a shortcoming in the current implementation and not
587/// intentional.
588LLVM_ABI bool isSafeToSpeculativelyExecuteWithOpcode(
589 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
590 AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
591 const TargetLibraryInfo *TLI = nullptr, bool UseVariableInfo = true,
592 bool IgnoreUBImplyingAttrs = true);
593
594/// Returns true if the result or effects of the given instructions \p I
595/// depend values not reachable through the def use graph.
596/// * Memory dependence arises for example if the instruction reads from
597/// memory or may produce effects or undefined behaviour. Memory dependent
598/// instructions generally cannot be reorderd with respect to other memory
599/// dependent instructions.
600/// * Control dependence arises for example if the instruction may fault
601/// if lifted above a throwing call or infinite loop.
602LLVM_ABI bool mayHaveNonDefUseDependency(const Instruction &I);
603
604/// Return true if it is an intrinsic that cannot be speculated but also
605/// cannot trap.
606LLVM_ABI bool isAssumeLikeIntrinsic(const Instruction *I);
607
608/// Return true if it is valid to use the assumptions provided by an
609/// assume intrinsic, I, at the point in the control-flow identified by the
610/// context instruction, CxtI. By default, ephemeral values of the assumption
611/// are treated as an invalid context, to prevent the assumption from being used
612/// to optimize away its argument. If the caller can ensure that this won't
613/// happen, it can call with AllowEphemerals set to true to get more valid
614/// assumptions.
615LLVM_ABI bool isValidAssumeForContext(const Instruction *I,
616 const Instruction *CxtI,
617 const DominatorTree *DT = nullptr,
618 bool AllowEphemerals = false);
619
620enum class OverflowResult {
621 /// Always overflows in the direction of signed/unsigned min value.
622 AlwaysOverflowsLow,
623 /// Always overflows in the direction of signed/unsigned max value.
624 AlwaysOverflowsHigh,
625 /// May or may not overflow.
626 MayOverflow,
627 /// Never overflows.
628 NeverOverflows,
629};
630
631LLVM_ABI OverflowResult computeOverflowForUnsignedMul(const Value *LHS,
632 const Value *RHS,
633 const SimplifyQuery &SQ,
634 bool IsNSW = false);
635LLVM_ABI OverflowResult computeOverflowForSignedMul(const Value *LHS,
636 const Value *RHS,
637 const SimplifyQuery &SQ);
638LLVM_ABI OverflowResult computeOverflowForUnsignedAdd(
639 const WithCache<const Value *> &LHS, const WithCache<const Value *> &RHS,
640 const SimplifyQuery &SQ);
641LLVM_ABI OverflowResult computeOverflowForSignedAdd(
642 const WithCache<const Value *> &LHS, const WithCache<const Value *> &RHS,
643 const SimplifyQuery &SQ);
644/// This version also leverages the sign bit of Add if known.
645LLVM_ABI OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
646 const SimplifyQuery &SQ);
647LLVM_ABI OverflowResult computeOverflowForUnsignedSub(const Value *LHS,
648 const Value *RHS,
649 const SimplifyQuery &SQ);
650LLVM_ABI OverflowResult computeOverflowForSignedSub(const Value *LHS,
651 const Value *RHS,
652 const SimplifyQuery &SQ);
653
654/// Returns true if the arithmetic part of the \p WO 's result is
655/// used only along the paths control dependent on the computation
656/// not overflowing, \p WO being an <op>.with.overflow intrinsic.
657LLVM_ABI bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
658 const DominatorTree &DT);
659
660/// Determine the possible constant range of vscale with the given bit width,
661/// based on the vscale_range function attribute.
662LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
663
664/// Determine the possible constant range of an integer or vector of integer
665/// value. This is intended as a cheap, non-recursive check.
666LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned,
667 bool UseInstrInfo = true,
668 AssumptionCache *AC = nullptr,
669 const Instruction *CtxI = nullptr,
670 const DominatorTree *DT = nullptr,
671 unsigned Depth = 0);
672
673/// Combine constant ranges from computeConstantRange() and computeKnownBits().
674LLVM_ABI ConstantRange computeConstantRangeIncludingKnownBits(
675 const WithCache<const Value *> &V, bool ForSigned, const SimplifyQuery &SQ);
676
677/// Return true if this function can prove that the instruction I will
678/// always transfer execution to one of its successors (including the next
679/// instruction that follows within a basic block). E.g. this is not
680/// guaranteed for function calls that could loop infinitely.
681///
682/// In other words, this function returns false for instructions that may
683/// transfer execution or fail to transfer execution in a way that is not
684/// captured in the CFG nor in the sequence of instructions within a basic
685/// block.
686///
687/// Undefined behavior is assumed not to happen, so e.g. division is
688/// guaranteed to transfer execution to the following instruction even
689/// though division by zero might cause undefined behavior.
690LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
691
692/// Returns true if this block does not contain a potential implicit exit.
693/// This is equivelent to saying that all instructions within the basic block
694/// are guaranteed to transfer execution to their successor within the basic
695/// block. This has the same assumptions w.r.t. undefined behavior as the
696/// instruction variant of this function.
697LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
698
699/// Return true if every instruction in the range (Begin, End) is
700/// guaranteed to transfer execution to its static successor. \p ScanLimit
701/// bounds the search to avoid scanning huge blocks.
702LLVM_ABI bool
703isGuaranteedToTransferExecutionToSuccessor(BasicBlock::const_iterator Begin,
704 BasicBlock::const_iterator End,
705 unsigned ScanLimit = 32);
706
707/// Same as previous, but with range expressed via iterator_range.
708LLVM_ABI bool isGuaranteedToTransferExecutionToSuccessor(
709 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
710
711/// Return true if this function can prove that the instruction I
712/// is executed for every iteration of the loop L.
713///
714/// Note that this currently only considers the loop header.
715LLVM_ABI bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
716 const Loop *L);
717
718/// Return true if \p PoisonOp's user yields poison or raises UB if its
719/// operand \p PoisonOp is poison.
720///
721/// If \p PoisonOp is a vector or an aggregate and the operation's result is a
722/// single value, any poison element in /p PoisonOp should make the result
723/// poison or raise UB.
724///
725/// To filter out operands that raise UB on poison, you can use
726/// getGuaranteedNonPoisonOp.
727LLVM_ABI bool propagatesPoison(const Use &PoisonOp);
728
729/// Return true if the given instruction must trigger undefined behavior
730/// when I is executed with any operands which appear in KnownPoison holding
731/// a poison value at the point of execution.
732LLVM_ABI bool mustTriggerUB(const Instruction *I,
733 const SmallPtrSetImpl<const Value *> &KnownPoison);
734
735/// Return true if this function can prove that if Inst is executed
736/// and yields a poison value or undef bits, then that will trigger
737/// undefined behavior.
738///
739/// Note that this currently only considers the basic block that is
740/// the parent of Inst.
741LLVM_ABI bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
742LLVM_ABI bool programUndefinedIfPoison(const Instruction *Inst);
743
744/// canCreateUndefOrPoison returns true if Op can create undef or poison from
745/// non-undef & non-poison operands.
746/// For vectors, canCreateUndefOrPoison returns true if there is potential
747/// poison or undef in any element of the result when vectors without
748/// undef/poison poison are given as operands.
749/// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
750/// true. If Op raises immediate UB but never creates poison or undef
751/// (e.g. sdiv I, 0), canCreatePoison returns false.
752///
753/// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
754/// metadata on the instruction are considered. This can be used to see if the
755/// instruction could still introduce undef or poison even without poison
756/// generating flags and metadata which might be on the instruction.
757/// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
758/// poison or undef)
759///
760/// canCreatePoison returns true if Op can create poison from non-poison
761/// operands.
762LLVM_ABI bool canCreateUndefOrPoison(const Operator *Op,
763 bool ConsiderFlagsAndMetadata = true);
764LLVM_ABI bool canCreatePoison(const Operator *Op,
765 bool ConsiderFlagsAndMetadata = true);
766
767/// Return true if V is poison given that ValAssumedPoison is already poison.
768/// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
769/// impliesPoison returns true.
770LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
771
772/// Return true if this function can prove that V does not have undef bits
773/// and is never poison. If V is an aggregate value or vector, check whether
774/// all elements (except padding) are not undef or poison.
775/// Note that this is different from canCreateUndefOrPoison because the
776/// function assumes Op's operands are not poison/undef.
777///
778/// If CtxI and DT are specified this method performs flow-sensitive analysis
779/// and returns true if it is guaranteed to be never undef or poison
780/// immediately before the CtxI.
781LLVM_ABI bool
782isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC = nullptr,
783 const Instruction *CtxI = nullptr,
784 const DominatorTree *DT = nullptr,
785 unsigned Depth = 0);
786
787/// Returns true if V cannot be poison, but may be undef.
788LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V,
789 AssumptionCache *AC = nullptr,
790 const Instruction *CtxI = nullptr,
791 const DominatorTree *DT = nullptr,
792 unsigned Depth = 0);
793
794inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
795 BasicBlock::iterator CtxI,
796 const DominatorTree *DT = nullptr,
797 unsigned Depth = 0) {
798 // Takes an iterator as a position, passes down to Instruction *
799 // implementation.
800 return isGuaranteedNotToBePoison(V, AC, CtxI: &*CtxI, DT, Depth);
801}
802
803/// Returns true if V cannot be undef, but may be poison.
804LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V,
805 AssumptionCache *AC = nullptr,
806 const Instruction *CtxI = nullptr,
807 const DominatorTree *DT = nullptr,
808 unsigned Depth = 0);
809
810/// Return true if undefined behavior would provable be executed on the path to
811/// OnPathTo if Root produced a posion result. Note that this doesn't say
812/// anything about whether OnPathTo is actually executed or whether Root is
813/// actually poison. This can be used to assess whether a new use of Root can
814/// be added at a location which is control equivalent with OnPathTo (such as
815/// immediately before it) without introducing UB which didn't previously
816/// exist. Note that a false result conveys no information.
817LLVM_ABI bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
818 Instruction *OnPathTo,
819 DominatorTree *DT);
820
821/// Convert an integer comparison with a constant RHS into an equivalent
822/// form with the strictness flipped predicate. Return the new predicate and
823/// corresponding constant RHS if possible. Otherwise return std::nullopt.
824/// E.g., (icmp sgt X, 0) -> (icmp sle X, 1).
825LLVM_ABI std::optional<std::pair<CmpPredicate, Constant *>>
826getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C);
827
828/// Specific patterns of select instructions we can match.
829enum SelectPatternFlavor {
830 SPF_UNKNOWN = 0,
831 SPF_SMIN, /// Signed minimum
832 SPF_UMIN, /// Unsigned minimum
833 SPF_SMAX, /// Signed maximum
834 SPF_UMAX, /// Unsigned maximum
835 SPF_FMINNUM, /// Floating point minnum
836 SPF_FMAXNUM, /// Floating point maxnum
837 SPF_ABS, /// Absolute value
838 SPF_NABS /// Negated absolute value
839};
840
841/// Behavior when a floating point min/max is given one NaN and one
842/// non-NaN as input.
843enum SelectPatternNaNBehavior {
844 SPNB_NA = 0, /// NaN behavior not applicable.
845 SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN.
846 SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
847 SPNB_RETURNS_ANY /// Given one NaN input, can return either (or
848 /// it has been determined that no operands can
849 /// be NaN).
850};
851
852struct SelectPatternResult {
853 SelectPatternFlavor Flavor;
854 SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
855 /// SPF_FMINNUM or SPF_FMAXNUM.
856 bool Ordered; /// When implementing this min/max pattern as
857 /// fcmp; select, does the fcmp have to be
858 /// ordered?
859
860 /// Return true if \p SPF is a min or a max pattern.
861 static bool isMinOrMax(SelectPatternFlavor SPF) {
862 return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
863 }
864};
865
866/// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
867/// and providing the out parameter results if we successfully match.
868///
869/// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
870/// the negation instruction from the idiom.
871///
872/// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
873/// not match that of the original select. If this is the case, the cast
874/// operation (one of Trunc,SExt,Zext) that must be done to transform the
875/// type of LHS and RHS into the type of V is returned in CastOp.
876///
877/// For example:
878/// %1 = icmp slt i32 %a, i32 4
879/// %2 = sext i32 %a to i64
880/// %3 = select i1 %1, i64 %2, i64 4
881///
882/// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
883///
884LLVM_ABI SelectPatternResult
885matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
886 Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
887
888inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
889 const Value *&RHS) {
890 Value *L = const_cast<Value *>(LHS);
891 Value *R = const_cast<Value *>(RHS);
892 auto Result = matchSelectPattern(V: const_cast<Value *>(V), LHS&: L, RHS&: R);
893 LHS = L;
894 RHS = R;
895 return Result;
896}
897
898/// Determine the pattern that a select with the given compare as its
899/// predicate and given values as its true/false operands would match.
900LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(
901 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
902 FastMathFlags FMF = FastMathFlags(), Instruction::CastOps *CastOp = nullptr,
903 unsigned Depth = 0);
904
905/// Determine the pattern for predicate `X Pred Y ? X : Y`.
906LLVM_ABI SelectPatternResult getSelectPattern(
907 CmpInst::Predicate Pred, SelectPatternNaNBehavior NaNBehavior = SPNB_NA,
908 bool Ordered = false);
909
910/// Return the canonical comparison predicate for the specified
911/// minimum/maximum flavor.
912LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF,
913 bool Ordered = false);
914
915/// Convert given `SPF` to equivalent min/max intrinsic.
916/// Caller must ensure `SPF` is an integer min or max pattern.
917LLVM_ABI Intrinsic::ID getMinMaxIntrinsic(SelectPatternFlavor SPF);
918
919/// Return the inverse minimum/maximum flavor of the specified flavor.
920/// For example, signed minimum is the inverse of signed maximum.
921LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
922
923LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
924
925/// Return the minimum or maximum constant value for the specified integer
926/// min/max flavor and type.
927LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
928
929/// Check if the values in \p VL are select instructions that can be converted
930/// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
931/// conversion is possible, together with a bool indicating whether all select
932/// conditions are only used by the selects. Otherwise return
933/// Intrinsic::not_intrinsic.
934LLVM_ABI std::pair<Intrinsic::ID, bool>
935canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
936
937/// Attempt to match a simple first order recurrence cycle of the form:
938/// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
939/// %inc = binop %iv, %step
940/// OR
941/// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
942/// %inc = binop %step, %iv
943///
944/// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
945///
946/// A couple of notes on subtleties in that definition:
947/// * The Step does not have to be loop invariant. In math terms, it can
948/// be a free variable. We allow recurrences with both constant and
949/// variable coefficients. Callers may wish to filter cases where Step
950/// does not dominate P.
951/// * For non-commutative operators, we will match both forms. This
952/// results in some odd recurrence structures. Callers may wish to filter
953/// out recurrences where the phi is not the LHS of the returned operator.
954/// * Because of the structure matched, the caller can assume as a post
955/// condition of the match the presence of a Loop with P's parent as it's
956/// header *except* in unreachable code. (Dominance decays in unreachable
957/// code.)
958///
959/// NOTE: This is intentional simple. If you want the ability to analyze
960/// non-trivial loop conditons, see ScalarEvolution instead.
961LLVM_ABI bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
962 Value *&Start, Value *&Step);
963
964/// Analogous to the above, but starting from the binary operator
965LLVM_ABI bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
966 Value *&Start, Value *&Step);
967
968/// Return true if RHS is known to be implied true by LHS. Return false if
969/// RHS is known to be implied false by LHS. Otherwise, return std::nullopt if
970/// no implication can be made. A & B must be i1 (boolean) values or a vector of
971/// such values. Note that the truth table for implication is the same as <=u on
972/// i1 values (but not
973/// <=s!). The truth table for both is:
974/// | T | F (B)
975/// T | T | F
976/// F | T | T
977/// (A)
978LLVM_ABI std::optional<bool>
979isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL,
980 bool LHSIsTrue = true, unsigned Depth = 0);
981LLVM_ABI std::optional<bool>
982isImpliedCondition(const Value *LHS, CmpPredicate RHSPred, const Value *RHSOp0,
983 const Value *RHSOp1, const DataLayout &DL,
984 bool LHSIsTrue = true, unsigned Depth = 0);
985
986/// Return the boolean condition value in the context of the given instruction
987/// if it is known based on dominating conditions.
988LLVM_ABI std::optional<bool>
989isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI,
990 const DataLayout &DL);
991LLVM_ABI std::optional<bool>
992isImpliedByDomCondition(CmpPredicate Pred, const Value *LHS, const Value *RHS,
993 const Instruction *ContextI, const DataLayout &DL);
994
995/// Call \p InsertAffected on all Values whose known bits / value may be
996/// affected by the condition \p Cond. Used by AssumptionCache and
997/// DomConditionCache.
998LLVM_ABI void
999findValuesAffectedByCondition(Value *Cond, bool IsAssume,
1000 function_ref<void(Value *)> InsertAffected);
1001
1002} // end namespace llvm
1003
1004#endif // LLVM_ANALYSIS_VALUETRACKING_H
1005

Provided by KDAB

Privacy Policy
Learn to use CMake with our Intro Training
Find out more

source code of llvm/include/llvm/Analysis/ValueTracking.h