1//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ANALYSIS_VALUETRACKING_H
15#define LLVM_ANALYSIS_VALUETRACKING_H
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/Analysis/SimplifyQuery.h"
19#include "llvm/Analysis/WithCache.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/DataLayout.h"
22#include "llvm/IR/FMF.h"
23#include "llvm/IR/Instructions.h"
24#include "llvm/IR/InstrTypes.h"
25#include "llvm/IR/Intrinsics.h"
26#include <cassert>
27#include <cstdint>
28
29namespace llvm {
30
31class Operator;
32class AddOperator;
33class AllocaInst;
34class APInt;
35class AssumptionCache;
36class DominatorTree;
37class GEPOperator;
38class LoadInst;
39class WithOverflowInst;
40struct KnownBits;
41class Loop;
42class LoopInfo;
43class MDNode;
44class StringRef;
45class TargetLibraryInfo;
46class Value;
47
48constexpr unsigned MaxAnalysisRecursionDepth = 6;
49
50/// Determine which bits of V are known to be either zero or one and return
51/// them in the KnownZero/KnownOne bit sets.
52///
53/// This function is defined on values with integer type, values with pointer
54/// type, and vectors of integers. In the case
55/// where V is a vector, the known zero and known one values are the
56/// same width as the vector element, and the bit is set only if it is true
57/// for all of the elements in the vector.
58void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL,
59 unsigned Depth = 0, AssumptionCache *AC = nullptr,
60 const Instruction *CxtI = nullptr,
61 const DominatorTree *DT = nullptr,
62 bool UseInstrInfo = true);
63
64/// Returns the known bits rather than passing by reference.
65KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
66 unsigned Depth = 0, AssumptionCache *AC = nullptr,
67 const Instruction *CxtI = nullptr,
68 const DominatorTree *DT = nullptr,
69 bool UseInstrInfo = true);
70
71/// Returns the known bits rather than passing by reference.
72KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
73 const DataLayout &DL, unsigned Depth = 0,
74 AssumptionCache *AC = nullptr,
75 const Instruction *CxtI = nullptr,
76 const DominatorTree *DT = nullptr,
77 bool UseInstrInfo = true);
78
79KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
80 unsigned Depth, const SimplifyQuery &Q);
81
82KnownBits computeKnownBits(const Value *V, unsigned Depth,
83 const SimplifyQuery &Q);
84
85void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
86 const SimplifyQuery &Q);
87
88/// Compute known bits from the range metadata.
89/// \p KnownZero the set of bits that are known to be zero
90/// \p KnownOne the set of bits that are known to be one
91void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known);
92
93/// Merge bits known from context-dependent facts into Known.
94void computeKnownBitsFromContext(const Value *V, KnownBits &Known,
95 unsigned Depth, const SimplifyQuery &Q);
96
97/// Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
98KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I,
99 const KnownBits &KnownLHS,
100 const KnownBits &KnownRHS,
101 unsigned Depth, const SimplifyQuery &SQ);
102
103/// Return true if LHS and RHS have no common bits set.
104bool haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
105 const WithCache<const Value *> &RHSCache,
106 const SimplifyQuery &SQ);
107
108/// Return true if the given value is known to have exactly one bit set when
109/// defined. For vectors return true if every element is known to be a power
110/// of two when defined. Supports values with integer or pointer type and
111/// vectors of integers. If 'OrZero' is set, then return true if the given
112/// value is either a power of two or zero.
113bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
114 bool OrZero = false, unsigned Depth = 0,
115 AssumptionCache *AC = nullptr,
116 const Instruction *CxtI = nullptr,
117 const DominatorTree *DT = nullptr,
118 bool UseInstrInfo = true);
119
120bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
121
122/// Return true if the given value is known to be non-zero when defined. For
123/// vectors, return true if every element is known to be non-zero when
124/// defined. For pointers, if the context instruction and dominator tree are
125/// specified, perform context-sensitive analysis and return true if the
126/// pointer couldn't possibly be null at the specified instruction.
127/// Supports values with integer or pointer type and vectors of integers.
128bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth = 0);
129
130/// Return true if the two given values are negation.
131/// Currently can recoginze Value pair:
132/// 1: <X, Y> if X = sub (0, Y) or Y = sub (0, X)
133/// 2: <X, Y> if X = sub (A, B) and Y = sub (B, A)
134bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW = false,
135 bool AllowPoison = true);
136
137/// Returns true if the give value is known to be non-negative.
138bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
139 unsigned Depth = 0);
140
141/// Returns true if the given value is known be positive (i.e. non-negative
142/// and non-zero).
143bool isKnownPositive(const Value *V, const SimplifyQuery &SQ,
144 unsigned Depth = 0);
145
146/// Returns true if the given value is known be negative (i.e. non-positive
147/// and non-zero).
148bool isKnownNegative(const Value *V, const SimplifyQuery &DL,
149 unsigned Depth = 0);
150
151/// Return true if the given values are known to be non-equal when defined.
152/// Supports scalar integer types only.
153bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
154 AssumptionCache *AC = nullptr,
155 const Instruction *CxtI = nullptr,
156 const DominatorTree *DT = nullptr,
157 bool UseInstrInfo = true);
158
159/// Return true if 'V & Mask' is known to be zero. We use this predicate to
160/// simplify operations downstream. Mask is known to be zero for bits that V
161/// cannot have.
162///
163/// This function is defined on values with integer type, values with pointer
164/// type, and vectors of integers. In the case
165/// where V is a vector, the mask, known zero, and known one values are the
166/// same width as the vector element, and the bit is set only if it is true
167/// for all of the elements in the vector.
168bool MaskedValueIsZero(const Value *V, const APInt &Mask,
169 const SimplifyQuery &DL, unsigned Depth = 0);
170
171/// Return the number of times the sign bit of the register is replicated into
172/// the other bits. We know that at least 1 bit is always equal to the sign
173/// bit (itself), but other cases can give us information. For example,
174/// immediately after an "ashr X, 2", we know that the top 3 bits are all
175/// equal to each other, so we return 3. For vectors, return the number of
176/// sign bits for the vector element with the mininum number of known sign
177/// bits.
178unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
179 unsigned Depth = 0, AssumptionCache *AC = nullptr,
180 const Instruction *CxtI = nullptr,
181 const DominatorTree *DT = nullptr,
182 bool UseInstrInfo = true);
183
184/// Get the upper bound on bit size for this Value \p Op as a signed integer.
185/// i.e. x == sext(trunc(x to MaxSignificantBits) to bitwidth(x)).
186/// Similar to the APInt::getSignificantBits function.
187unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL,
188 unsigned Depth = 0,
189 AssumptionCache *AC = nullptr,
190 const Instruction *CxtI = nullptr,
191 const DominatorTree *DT = nullptr);
192
193/// Map a call instruction to an intrinsic ID. Libcalls which have equivalent
194/// intrinsics are treated as-if they were intrinsics.
195Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB,
196 const TargetLibraryInfo *TLI);
197
198/// Given an exploded icmp instruction, return true if the comparison only
199/// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
200/// the result of the comparison is true when the input value is signed.
201bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
202 bool &TrueIfSigned);
203
204/// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
205/// same result as an fcmp with the given operands.
206///
207/// If \p LookThroughSrc is true, consider the input value when computing the
208/// mask.
209///
210/// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
211/// element will always be LHS.
212std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
213 const Function &F, Value *LHS,
214 Value *RHS,
215 bool LookThroughSrc = true);
216std::pair<Value *, FPClassTest> fcmpToClassTest(CmpInst::Predicate Pred,
217 const Function &F, Value *LHS,
218 const APFloat *ConstRHS,
219 bool LookThroughSrc = true);
220
221/// Compute the possible floating-point classes that \p LHS could be based on
222/// fcmp \Pred \p LHS, \p RHS.
223///
224/// \returns { TestedValue, ClassesIfTrue, ClassesIfFalse }
225///
226/// If the compare returns an exact class test, ClassesIfTrue == ~ClassesIfFalse
227///
228/// This is a less exact version of fcmpToClassTest (e.g. fcmpToClassTest will
229/// only succeed for a test of x > 0 implies positive, but not x > 1).
230///
231/// If \p LookThroughSrc is true, consider the input value when computing the
232/// mask. This may look through sign bit operations.
233///
234/// If \p LookThroughSrc is false, ignore the source value (i.e. the first pair
235/// element will always be LHS.
236///
237std::tuple<Value *, FPClassTest, FPClassTest>
238fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
239 Value *RHS, bool LookThroughSrc = true);
240std::tuple<Value *, FPClassTest, FPClassTest>
241fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
242 FPClassTest RHS, bool LookThroughSrc = true);
243std::tuple<Value *, FPClassTest, FPClassTest>
244fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
245 const APFloat &RHS, bool LookThroughSrc = true);
246
247struct KnownFPClass {
248 /// Floating-point classes the value could be one of.
249 FPClassTest KnownFPClasses = fcAllFlags;
250
251 /// std::nullopt if the sign bit is unknown, true if the sign bit is
252 /// definitely set or false if the sign bit is definitely unset.
253 std::optional<bool> SignBit;
254
255 bool operator==(KnownFPClass Other) const {
256 return KnownFPClasses == Other.KnownFPClasses && SignBit == Other.SignBit;
257 }
258
259 /// Return true if it's known this can never be one of the mask entries.
260 bool isKnownNever(FPClassTest Mask) const {
261 return (KnownFPClasses & Mask) == fcNone;
262 }
263
264 bool isUnknown() const {
265 return KnownFPClasses == fcAllFlags && !SignBit;
266 }
267
268 /// Return true if it's known this can never be a nan.
269 bool isKnownNeverNaN() const {
270 return isKnownNever(Mask: fcNan);
271 }
272
273 /// Return true if it's known this can never be an infinity.
274 bool isKnownNeverInfinity() const {
275 return isKnownNever(Mask: fcInf);
276 }
277
278 /// Return true if it's known this can never be +infinity.
279 bool isKnownNeverPosInfinity() const {
280 return isKnownNever(Mask: fcPosInf);
281 }
282
283 /// Return true if it's known this can never be -infinity.
284 bool isKnownNeverNegInfinity() const {
285 return isKnownNever(Mask: fcNegInf);
286 }
287
288 /// Return true if it's known this can never be a subnormal
289 bool isKnownNeverSubnormal() const {
290 return isKnownNever(Mask: fcSubnormal);
291 }
292
293 /// Return true if it's known this can never be a positive subnormal
294 bool isKnownNeverPosSubnormal() const {
295 return isKnownNever(Mask: fcPosSubnormal);
296 }
297
298 /// Return true if it's known this can never be a negative subnormal
299 bool isKnownNeverNegSubnormal() const {
300 return isKnownNever(Mask: fcNegSubnormal);
301 }
302
303 /// Return true if it's known this can never be a zero. This means a literal
304 /// [+-]0, and does not include denormal inputs implicitly treated as [+-]0.
305 bool isKnownNeverZero() const {
306 return isKnownNever(Mask: fcZero);
307 }
308
309 /// Return true if it's known this can never be a literal positive zero.
310 bool isKnownNeverPosZero() const {
311 return isKnownNever(Mask: fcPosZero);
312 }
313
314 /// Return true if it's known this can never be a negative zero. This means a
315 /// literal -0 and does not include denormal inputs implicitly treated as -0.
316 bool isKnownNeverNegZero() const {
317 return isKnownNever(Mask: fcNegZero);
318 }
319
320 /// Return true if it's know this can never be interpreted as a zero. This
321 /// extends isKnownNeverZero to cover the case where the assumed
322 /// floating-point mode for the function interprets denormals as zero.
323 bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const;
324
325 /// Return true if it's know this can never be interpreted as a negative zero.
326 bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const;
327
328 /// Return true if it's know this can never be interpreted as a positive zero.
329 bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const;
330
331 static constexpr FPClassTest OrderedLessThanZeroMask =
332 fcNegSubnormal | fcNegNormal | fcNegInf;
333 static constexpr FPClassTest OrderedGreaterThanZeroMask =
334 fcPosSubnormal | fcPosNormal | fcPosInf;
335
336 /// Return true if we can prove that the analyzed floating-point value is
337 /// either NaN or never less than -0.0.
338 ///
339 /// NaN --> true
340 /// +0 --> true
341 /// -0 --> true
342 /// x > +0 --> true
343 /// x < -0 --> false
344 bool cannotBeOrderedLessThanZero() const {
345 return isKnownNever(Mask: OrderedLessThanZeroMask);
346 }
347
348 /// Return true if we can prove that the analyzed floating-point value is
349 /// either NaN or never greater than -0.0.
350 /// NaN --> true
351 /// +0 --> true
352 /// -0 --> true
353 /// x > +0 --> false
354 /// x < -0 --> true
355 bool cannotBeOrderedGreaterThanZero() const {
356 return isKnownNever(Mask: OrderedGreaterThanZeroMask);
357 }
358
359 KnownFPClass &operator|=(const KnownFPClass &RHS) {
360 KnownFPClasses = KnownFPClasses | RHS.KnownFPClasses;
361
362 if (SignBit != RHS.SignBit)
363 SignBit = std::nullopt;
364 return *this;
365 }
366
367 void knownNot(FPClassTest RuleOut) {
368 KnownFPClasses = KnownFPClasses & ~RuleOut;
369 if (isKnownNever(Mask: fcNan) && !SignBit) {
370 if (isKnownNever(Mask: fcNegative))
371 SignBit = false;
372 else if (isKnownNever(Mask: fcPositive))
373 SignBit = true;
374 }
375 }
376
377 void fneg() {
378 KnownFPClasses = llvm::fneg(Mask: KnownFPClasses);
379 if (SignBit)
380 SignBit = !*SignBit;
381 }
382
383 void fabs() {
384 if (KnownFPClasses & fcNegZero)
385 KnownFPClasses |= fcPosZero;
386
387 if (KnownFPClasses & fcNegInf)
388 KnownFPClasses |= fcPosInf;
389
390 if (KnownFPClasses & fcNegSubnormal)
391 KnownFPClasses |= fcPosSubnormal;
392
393 if (KnownFPClasses & fcNegNormal)
394 KnownFPClasses |= fcPosNormal;
395
396 signBitMustBeZero();
397 }
398
399 /// Return true if the sign bit must be 0, ignoring the sign of nans.
400 bool signBitIsZeroOrNaN() const {
401 return isKnownNever(Mask: fcNegative);
402 }
403
404 /// Assume the sign bit is zero.
405 void signBitMustBeZero() {
406 KnownFPClasses &= (fcPositive | fcNan);
407 SignBit = false;
408 }
409
410 /// Assume the sign bit is one.
411 void signBitMustBeOne() {
412 KnownFPClasses &= (fcNegative | fcNan);
413 SignBit = true;
414 }
415
416 void copysign(const KnownFPClass &Sign) {
417 // Don't know anything about the sign of the source. Expand the possible set
418 // to its opposite sign pair.
419 if (KnownFPClasses & fcZero)
420 KnownFPClasses |= fcZero;
421 if (KnownFPClasses & fcSubnormal)
422 KnownFPClasses |= fcSubnormal;
423 if (KnownFPClasses & fcNormal)
424 KnownFPClasses |= fcNormal;
425 if (KnownFPClasses & fcInf)
426 KnownFPClasses |= fcInf;
427
428 // Sign bit is exactly preserved even for nans.
429 SignBit = Sign.SignBit;
430
431 // Clear sign bits based on the input sign mask.
432 if (Sign.isKnownNever(Mask: fcPositive | fcNan) || (SignBit && *SignBit))
433 KnownFPClasses &= (fcNegative | fcNan);
434 if (Sign.isKnownNever(Mask: fcNegative | fcNan) || (SignBit && !*SignBit))
435 KnownFPClasses &= (fcPositive | fcNan);
436 }
437
438 // Propagate knowledge that a non-NaN source implies the result can also not
439 // be a NaN. For unconstrained operations, signaling nans are not guaranteed
440 // to be quieted but cannot be introduced.
441 void propagateNaN(const KnownFPClass &Src, bool PreserveSign = false) {
442 if (Src.isKnownNever(Mask: fcNan)) {
443 knownNot(RuleOut: fcNan);
444 if (PreserveSign)
445 SignBit = Src.SignBit;
446 } else if (Src.isKnownNever(Mask: fcSNan))
447 knownNot(RuleOut: fcSNan);
448 }
449
450 /// Propagate knowledge from a source value that could be a denormal or
451 /// zero. We have to be conservative since output flushing is not guaranteed,
452 /// so known-never-zero may not hold.
453 ///
454 /// This assumes a copy-like operation and will replace any currently known
455 /// information.
456 void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty);
457
458 /// Report known classes if \p Src is evaluated through a potentially
459 /// canonicalizing operation. We can assume signaling nans will not be
460 /// introduced, but cannot assume a denormal will be flushed under FTZ/DAZ.
461 ///
462 /// This assumes a copy-like operation and will replace any currently known
463 /// information.
464 void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F,
465 Type *Ty);
466
467 void resetAll() { *this = KnownFPClass(); }
468};
469
470inline KnownFPClass operator|(KnownFPClass LHS, const KnownFPClass &RHS) {
471 LHS |= RHS;
472 return LHS;
473}
474
475inline KnownFPClass operator|(const KnownFPClass &LHS, KnownFPClass &&RHS) {
476 RHS |= LHS;
477 return std::move(RHS);
478}
479
480/// Determine which floating-point classes are valid for \p V, and return them
481/// in KnownFPClass bit sets.
482///
483/// This function is defined on values with floating-point type, values vectors
484/// of floating-point type, and arrays of floating-point type.
485
486/// \p InterestedClasses is a compile time optimization hint for which floating
487/// point classes should be queried. Queries not specified in \p
488/// InterestedClasses should be reliable if they are determined during the
489/// query.
490KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts,
491 FPClassTest InterestedClasses, unsigned Depth,
492 const SimplifyQuery &SQ);
493
494KnownFPClass computeKnownFPClass(const Value *V, FPClassTest InterestedClasses,
495 unsigned Depth, const SimplifyQuery &SQ);
496
497inline KnownFPClass computeKnownFPClass(
498 const Value *V, const DataLayout &DL,
499 FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
500 const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
501 const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
502 bool UseInstrInfo = true) {
503 return computeKnownFPClass(
504 V, InterestedClasses, Depth,
505 SQ: SimplifyQuery(DL, TLI, DT, AC, CxtI, UseInstrInfo));
506}
507
508/// Wrapper to account for known fast math flags at the use instruction.
509inline KnownFPClass computeKnownFPClass(const Value *V, FastMathFlags FMF,
510 FPClassTest InterestedClasses,
511 unsigned Depth,
512 const SimplifyQuery &SQ) {
513 if (FMF.noNaNs())
514 InterestedClasses &= ~fcNan;
515 if (FMF.noInfs())
516 InterestedClasses &= ~fcInf;
517
518 KnownFPClass Result = computeKnownFPClass(V, InterestedClasses, Depth, SQ);
519
520 if (FMF.noNaNs())
521 Result.KnownFPClasses &= ~fcNan;
522 if (FMF.noInfs())
523 Result.KnownFPClasses &= ~fcInf;
524 return Result;
525}
526
527/// Return true if we can prove that the specified FP value is never equal to
528/// -0.0. Users should use caution when considering PreserveSign
529/// denormal-fp-math.
530inline bool cannotBeNegativeZero(const Value *V, unsigned Depth,
531 const SimplifyQuery &SQ) {
532 KnownFPClass Known = computeKnownFPClass(V, InterestedClasses: fcNegZero, Depth, SQ);
533 return Known.isKnownNeverNegZero();
534}
535
536/// Return true if we can prove that the specified FP value is either NaN or
537/// never less than -0.0.
538///
539/// NaN --> true
540/// +0 --> true
541/// -0 --> true
542/// x > +0 --> true
543/// x < -0 --> false
544inline bool cannotBeOrderedLessThanZero(const Value *V, unsigned Depth,
545 const SimplifyQuery &SQ) {
546 KnownFPClass Known =
547 computeKnownFPClass(V, InterestedClasses: KnownFPClass::OrderedLessThanZeroMask, Depth, SQ);
548 return Known.cannotBeOrderedLessThanZero();
549}
550
551/// Return true if the floating-point scalar value is not an infinity or if
552/// the floating-point vector value has no infinities. Return false if a value
553/// could ever be infinity.
554inline bool isKnownNeverInfinity(const Value *V, unsigned Depth,
555 const SimplifyQuery &SQ) {
556 KnownFPClass Known = computeKnownFPClass(V, InterestedClasses: fcInf, Depth, SQ);
557 return Known.isKnownNeverInfinity();
558}
559
560/// Return true if the floating-point value can never contain a NaN or infinity.
561inline bool isKnownNeverInfOrNaN(const Value *V, unsigned Depth,
562 const SimplifyQuery &SQ) {
563 KnownFPClass Known = computeKnownFPClass(V, InterestedClasses: fcInf | fcNan, Depth, SQ);
564 return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
565}
566
567/// Return true if the floating-point scalar value is not a NaN or if the
568/// floating-point vector value has no NaN elements. Return false if a value
569/// could ever be NaN.
570inline bool isKnownNeverNaN(const Value *V, unsigned Depth,
571 const SimplifyQuery &SQ) {
572 KnownFPClass Known = computeKnownFPClass(V, InterestedClasses: fcNan, Depth, SQ);
573 return Known.isKnownNeverNaN();
574}
575
576/// Return false if we can prove that the specified FP value's sign bit is 0.
577/// Return true if we can prove that the specified FP value's sign bit is 1.
578/// Otherwise return std::nullopt.
579inline std::optional<bool> computeKnownFPSignBit(const Value *V, unsigned Depth,
580 const SimplifyQuery &SQ) {
581 KnownFPClass Known = computeKnownFPClass(V, InterestedClasses: fcAllFlags, Depth, SQ);
582 return Known.SignBit;
583}
584
585/// If the specified value can be set by repeating the same byte in memory,
586/// return the i8 value that it is represented with. This is true for all i8
587/// values obviously, but is also true for i32 0, i32 -1, i16 0xF0F0, double
588/// 0.0 etc. If the value can't be handled with a repeated byte store (e.g.
589/// i16 0x1234), return null. If the value is entirely undef and padding,
590/// return undef.
591Value *isBytewiseValue(Value *V, const DataLayout &DL);
592
593/// Given an aggregate and an sequence of indices, see if the scalar value
594/// indexed is already around as a register, for example if it were inserted
595/// directly into the aggregate.
596///
597/// If InsertBefore is not empty, this function will duplicate (modified)
598/// insertvalues when a part of a nested struct is extracted.
599Value *FindInsertedValue(
600 Value *V, ArrayRef<unsigned> idx_range,
601 std::optional<BasicBlock::iterator> InsertBefore = std::nullopt);
602
603/// Analyze the specified pointer to see if it can be expressed as a base
604/// pointer plus a constant offset. Return the base and offset to the caller.
605///
606/// This is a wrapper around Value::stripAndAccumulateConstantOffsets that
607/// creates and later unpacks the required APInt.
608inline Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
609 const DataLayout &DL,
610 bool AllowNonInbounds = true) {
611 APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ty: Ptr->getType()), 0);
612 Value *Base =
613 Ptr->stripAndAccumulateConstantOffsets(DL, Offset&: OffsetAPInt, AllowNonInbounds);
614
615 Offset = OffsetAPInt.getSExtValue();
616 return Base;
617}
618inline const Value *
619GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
620 const DataLayout &DL,
621 bool AllowNonInbounds = true) {
622 return GetPointerBaseWithConstantOffset(Ptr: const_cast<Value *>(Ptr), Offset, DL,
623 AllowNonInbounds);
624}
625
626/// Returns true if the GEP is based on a pointer to a string (array of
627// \p CharSize integers) and is indexing into this string.
628bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize = 8);
629
630/// Represents offset+length into a ConstantDataArray.
631struct ConstantDataArraySlice {
632 /// ConstantDataArray pointer. nullptr indicates a zeroinitializer (a valid
633 /// initializer, it just doesn't fit the ConstantDataArray interface).
634 const ConstantDataArray *Array;
635
636 /// Slice starts at this Offset.
637 uint64_t Offset;
638
639 /// Length of the slice.
640 uint64_t Length;
641
642 /// Moves the Offset and adjusts Length accordingly.
643 void move(uint64_t Delta) {
644 assert(Delta < Length);
645 Offset += Delta;
646 Length -= Delta;
647 }
648
649 /// Convenience accessor for elements in the slice.
650 uint64_t operator[](unsigned I) const {
651 return Array == nullptr ? 0 : Array->getElementAsInteger(i: I + Offset);
652 }
653};
654
655/// Returns true if the value \p V is a pointer into a ConstantDataArray.
656/// If successful \p Slice will point to a ConstantDataArray info object
657/// with an appropriate offset.
658bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
659 unsigned ElementSize, uint64_t Offset = 0);
660
661/// This function computes the length of a null-terminated C string pointed to
662/// by V. If successful, it returns true and returns the string in Str. If
663/// unsuccessful, it returns false. This does not include the trailing null
664/// character by default. If TrimAtNul is set to false, then this returns any
665/// trailing null characters as well as any other characters that come after
666/// it.
667bool getConstantStringInfo(const Value *V, StringRef &Str,
668 bool TrimAtNul = true);
669
670/// If we can compute the length of the string pointed to by the specified
671/// pointer, return 'len+1'. If we can't, return 0.
672uint64_t GetStringLength(const Value *V, unsigned CharSize = 8);
673
674/// This function returns call pointer argument that is considered the same by
675/// aliasing rules. You CAN'T use it to replace one value with another. If
676/// \p MustPreserveNullness is true, the call must preserve the nullness of
677/// the pointer.
678const Value *getArgumentAliasingToReturnedPointer(const CallBase *Call,
679 bool MustPreserveNullness);
680inline Value *getArgumentAliasingToReturnedPointer(CallBase *Call,
681 bool MustPreserveNullness) {
682 return const_cast<Value *>(getArgumentAliasingToReturnedPointer(
683 Call: const_cast<const CallBase *>(Call), MustPreserveNullness));
684}
685
686/// {launder,strip}.invariant.group returns pointer that aliases its argument,
687/// and it only captures pointer by returning it.
688/// These intrinsics are not marked as nocapture, because returning is
689/// considered as capture. The arguments are not marked as returned neither,
690/// because it would make it useless. If \p MustPreserveNullness is true,
691/// the intrinsic must preserve the nullness of the pointer.
692bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
693 const CallBase *Call, bool MustPreserveNullness);
694
695/// This method strips off any GEP address adjustments, pointer casts
696/// or `llvm.threadlocal.address` from the specified value \p V, returning the
697/// original object being addressed. Note that the returned value has pointer
698/// type if the specified value does. If the \p MaxLookup value is non-zero, it
699/// limits the number of instructions to be stripped off.
700const Value *getUnderlyingObject(const Value *V, unsigned MaxLookup = 6);
701inline Value *getUnderlyingObject(Value *V, unsigned MaxLookup = 6) {
702 // Force const to avoid infinite recursion.
703 const Value *VConst = V;
704 return const_cast<Value *>(getUnderlyingObject(V: VConst, MaxLookup));
705}
706
707/// This method is similar to getUnderlyingObject except that it can
708/// look through phi and select instructions and return multiple objects.
709///
710/// If LoopInfo is passed, loop phis are further analyzed. If a pointer
711/// accesses different objects in each iteration, we don't look through the
712/// phi node. E.g. consider this loop nest:
713///
714/// int **A;
715/// for (i)
716/// for (j) {
717/// A[i][j] = A[i-1][j] * B[j]
718/// }
719///
720/// This is transformed by Load-PRE to stash away A[i] for the next iteration
721/// of the outer loop:
722///
723/// Curr = A[0]; // Prev_0
724/// for (i: 1..N) {
725/// Prev = Curr; // Prev = PHI (Prev_0, Curr)
726/// Curr = A[i];
727/// for (j: 0..N) {
728/// Curr[j] = Prev[j] * B[j]
729/// }
730/// }
731///
732/// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
733/// should not assume that Curr and Prev share the same underlying object thus
734/// it shouldn't look through the phi above.
735void getUnderlyingObjects(const Value *V,
736 SmallVectorImpl<const Value *> &Objects,
737 LoopInfo *LI = nullptr, unsigned MaxLookup = 6);
738
739/// This is a wrapper around getUnderlyingObjects and adds support for basic
740/// ptrtoint+arithmetic+inttoptr sequences.
741bool getUnderlyingObjectsForCodeGen(const Value *V,
742 SmallVectorImpl<Value *> &Objects);
743
744/// Returns unique alloca where the value comes from, or nullptr.
745/// If OffsetZero is true check that V points to the begining of the alloca.
746AllocaInst *findAllocaForValue(Value *V, bool OffsetZero = false);
747inline const AllocaInst *findAllocaForValue(const Value *V,
748 bool OffsetZero = false) {
749 return findAllocaForValue(V: const_cast<Value *>(V), OffsetZero);
750}
751
752/// Return true if the only users of this pointer are lifetime markers.
753bool onlyUsedByLifetimeMarkers(const Value *V);
754
755/// Return true if the only users of this pointer are lifetime markers or
756/// droppable instructions.
757bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V);
758
759/// Return true if speculation of the given load must be suppressed to avoid
760/// ordering or interfering with an active sanitizer. If not suppressed,
761/// dereferenceability and alignment must be proven separately. Note: This
762/// is only needed for raw reasoning; if you use the interface below
763/// (isSafeToSpeculativelyExecute), this is handled internally.
764bool mustSuppressSpeculation(const LoadInst &LI);
765
766/// Return true if the instruction does not have any effects besides
767/// calculating the result and does not have undefined behavior.
768///
769/// This method never returns true for an instruction that returns true for
770/// mayHaveSideEffects; however, this method also does some other checks in
771/// addition. It checks for undefined behavior, like dividing by zero or
772/// loading from an invalid pointer (but not for undefined results, like a
773/// shift with a shift amount larger than the width of the result). It checks
774/// for malloc and alloca because speculatively executing them might cause a
775/// memory leak. It also returns false for instructions related to control
776/// flow, specifically terminators and PHI nodes.
777///
778/// If the CtxI is specified this method performs context-sensitive analysis
779/// and returns true if it is safe to execute the instruction immediately
780/// before the CtxI.
781///
782/// If the CtxI is NOT specified this method only looks at the instruction
783/// itself and its operands, so if this method returns true, it is safe to
784/// move the instruction as long as the correct dominance relationships for
785/// the operands and users hold.
786///
787/// This method can return true for instructions that read memory;
788/// for such instructions, moving them may change the resulting value.
789bool isSafeToSpeculativelyExecute(const Instruction *I,
790 const Instruction *CtxI = nullptr,
791 AssumptionCache *AC = nullptr,
792 const DominatorTree *DT = nullptr,
793 const TargetLibraryInfo *TLI = nullptr);
794
795inline bool
796isSafeToSpeculativelyExecute(const Instruction *I, BasicBlock::iterator CtxI,
797 AssumptionCache *AC = nullptr,
798 const DominatorTree *DT = nullptr,
799 const TargetLibraryInfo *TLI = nullptr) {
800 // Take an iterator, and unwrap it into an Instruction *.
801 return isSafeToSpeculativelyExecute(I, CtxI: &*CtxI, AC, DT, TLI);
802}
803
804/// This returns the same result as isSafeToSpeculativelyExecute if Opcode is
805/// the actual opcode of Inst. If the provided and actual opcode differ, the
806/// function (virtually) overrides the opcode of Inst with the provided
807/// Opcode. There are come constraints in this case:
808/// * If Opcode has a fixed number of operands (eg, as binary operators do),
809/// then Inst has to have at least as many leading operands. The function
810/// will ignore all trailing operands beyond that number.
811/// * If Opcode allows for an arbitrary number of operands (eg, as CallInsts
812/// do), then all operands are considered.
813/// * The virtual instruction has to satisfy all typing rules of the provided
814/// Opcode.
815/// * This function is pessimistic in the following sense: If one actually
816/// materialized the virtual instruction, then isSafeToSpeculativelyExecute
817/// may say that the materialized instruction is speculatable whereas this
818/// function may have said that the instruction wouldn't be speculatable.
819/// This behavior is a shortcoming in the current implementation and not
820/// intentional.
821bool isSafeToSpeculativelyExecuteWithOpcode(
822 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI = nullptr,
823 AssumptionCache *AC = nullptr, const DominatorTree *DT = nullptr,
824 const TargetLibraryInfo *TLI = nullptr);
825
826/// Returns true if the result or effects of the given instructions \p I
827/// depend values not reachable through the def use graph.
828/// * Memory dependence arises for example if the instruction reads from
829/// memory or may produce effects or undefined behaviour. Memory dependent
830/// instructions generally cannot be reorderd with respect to other memory
831/// dependent instructions.
832/// * Control dependence arises for example if the instruction may fault
833/// if lifted above a throwing call or infinite loop.
834bool mayHaveNonDefUseDependency(const Instruction &I);
835
836/// Return true if it is an intrinsic that cannot be speculated but also
837/// cannot trap.
838bool isAssumeLikeIntrinsic(const Instruction *I);
839
840/// Return true if it is valid to use the assumptions provided by an
841/// assume intrinsic, I, at the point in the control-flow identified by the
842/// context instruction, CxtI. By default, ephemeral values of the assumption
843/// are treated as an invalid context, to prevent the assumption from being used
844/// to optimize away its argument. If the caller can ensure that this won't
845/// happen, it can call with AllowEphemerals set to true to get more valid
846/// assumptions.
847bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
848 const DominatorTree *DT = nullptr,
849 bool AllowEphemerals = false);
850
851enum class OverflowResult {
852 /// Always overflows in the direction of signed/unsigned min value.
853 AlwaysOverflowsLow,
854 /// Always overflows in the direction of signed/unsigned max value.
855 AlwaysOverflowsHigh,
856 /// May or may not overflow.
857 MayOverflow,
858 /// Never overflows.
859 NeverOverflows,
860};
861
862OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS,
863 const SimplifyQuery &SQ);
864OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
865 const SimplifyQuery &SQ);
866OverflowResult
867computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
868 const WithCache<const Value *> &RHS,
869 const SimplifyQuery &SQ);
870OverflowResult computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
871 const WithCache<const Value *> &RHS,
872 const SimplifyQuery &SQ);
873/// This version also leverages the sign bit of Add if known.
874OverflowResult computeOverflowForSignedAdd(const AddOperator *Add,
875 const SimplifyQuery &SQ);
876OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS,
877 const SimplifyQuery &SQ);
878OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS,
879 const SimplifyQuery &SQ);
880
881/// Returns true if the arithmetic part of the \p WO 's result is
882/// used only along the paths control dependent on the computation
883/// not overflowing, \p WO being an <op>.with.overflow intrinsic.
884bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
885 const DominatorTree &DT);
886
887/// Determine the possible constant range of vscale with the given bit width,
888/// based on the vscale_range function attribute.
889ConstantRange getVScaleRange(const Function *F, unsigned BitWidth);
890
891/// Determine the possible constant range of an integer or vector of integer
892/// value. This is intended as a cheap, non-recursive check.
893ConstantRange computeConstantRange(const Value *V, bool ForSigned,
894 bool UseInstrInfo = true,
895 AssumptionCache *AC = nullptr,
896 const Instruction *CtxI = nullptr,
897 const DominatorTree *DT = nullptr,
898 unsigned Depth = 0);
899
900/// Combine constant ranges from computeConstantRange() and computeKnownBits().
901ConstantRange
902computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
903 bool ForSigned, const SimplifyQuery &SQ);
904
905/// Return true if this function can prove that the instruction I will
906/// always transfer execution to one of its successors (including the next
907/// instruction that follows within a basic block). E.g. this is not
908/// guaranteed for function calls that could loop infinitely.
909///
910/// In other words, this function returns false for instructions that may
911/// transfer execution or fail to transfer execution in a way that is not
912/// captured in the CFG nor in the sequence of instructions within a basic
913/// block.
914///
915/// Undefined behavior is assumed not to happen, so e.g. division is
916/// guaranteed to transfer execution to the following instruction even
917/// though division by zero might cause undefined behavior.
918bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
919
920/// Returns true if this block does not contain a potential implicit exit.
921/// This is equivelent to saying that all instructions within the basic block
922/// are guaranteed to transfer execution to their successor within the basic
923/// block. This has the same assumptions w.r.t. undefined behavior as the
924/// instruction variant of this function.
925bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
926
927/// Return true if every instruction in the range (Begin, End) is
928/// guaranteed to transfer execution to its static successor. \p ScanLimit
929/// bounds the search to avoid scanning huge blocks.
930bool isGuaranteedToTransferExecutionToSuccessor(
931 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
932 unsigned ScanLimit = 32);
933
934/// Same as previous, but with range expressed via iterator_range.
935bool isGuaranteedToTransferExecutionToSuccessor(
936 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit = 32);
937
938/// Return true if this function can prove that the instruction I
939/// is executed for every iteration of the loop L.
940///
941/// Note that this currently only considers the loop header.
942bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
943 const Loop *L);
944
945/// Return true if \p PoisonOp's user yields poison or raises UB if its
946/// operand \p PoisonOp is poison.
947///
948/// If \p PoisonOp is a vector or an aggregate and the operation's result is a
949/// single value, any poison element in /p PoisonOp should make the result
950/// poison or raise UB.
951///
952/// To filter out operands that raise UB on poison, you can use
953/// getGuaranteedNonPoisonOp.
954bool propagatesPoison(const Use &PoisonOp);
955
956/// Insert operands of I into Ops such that I will trigger undefined behavior
957/// if I is executed and that operand has a poison value.
958void getGuaranteedNonPoisonOps(const Instruction *I,
959 SmallVectorImpl<const Value *> &Ops);
960
961/// Insert operands of I into Ops such that I will trigger undefined behavior
962/// if I is executed and that operand is not a well-defined value
963/// (i.e. has undef bits or poison).
964void getGuaranteedWellDefinedOps(const Instruction *I,
965 SmallVectorImpl<const Value *> &Ops);
966
967/// Return true if the given instruction must trigger undefined behavior
968/// when I is executed with any operands which appear in KnownPoison holding
969/// a poison value at the point of execution.
970bool mustTriggerUB(const Instruction *I,
971 const SmallPtrSetImpl<const Value *> &KnownPoison);
972
973/// Return true if this function can prove that if Inst is executed
974/// and yields a poison value or undef bits, then that will trigger
975/// undefined behavior.
976///
977/// Note that this currently only considers the basic block that is
978/// the parent of Inst.
979bool programUndefinedIfUndefOrPoison(const Instruction *Inst);
980bool programUndefinedIfPoison(const Instruction *Inst);
981
982/// canCreateUndefOrPoison returns true if Op can create undef or poison from
983/// non-undef & non-poison operands.
984/// For vectors, canCreateUndefOrPoison returns true if there is potential
985/// poison or undef in any element of the result when vectors without
986/// undef/poison poison are given as operands.
987/// For example, given `Op = shl <2 x i32> %x, <0, 32>`, this function returns
988/// true. If Op raises immediate UB but never creates poison or undef
989/// (e.g. sdiv I, 0), canCreatePoison returns false.
990///
991/// \p ConsiderFlagsAndMetadata controls whether poison producing flags and
992/// metadata on the instruction are considered. This can be used to see if the
993/// instruction could still introduce undef or poison even without poison
994/// generating flags and metadata which might be on the instruction.
995/// (i.e. could the result of Op->dropPoisonGeneratingFlags() still create
996/// poison or undef)
997///
998/// canCreatePoison returns true if Op can create poison from non-poison
999/// operands.
1000bool canCreateUndefOrPoison(const Operator *Op,
1001 bool ConsiderFlagsAndMetadata = true);
1002bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata = true);
1003
1004/// Return true if V is poison given that ValAssumedPoison is already poison.
1005/// For example, if ValAssumedPoison is `icmp X, 10` and V is `icmp X, 5`,
1006/// impliesPoison returns true.
1007bool impliesPoison(const Value *ValAssumedPoison, const Value *V);
1008
1009/// Return true if this function can prove that V does not have undef bits
1010/// and is never poison. If V is an aggregate value or vector, check whether
1011/// all elements (except padding) are not undef or poison.
1012/// Note that this is different from canCreateUndefOrPoison because the
1013/// function assumes Op's operands are not poison/undef.
1014///
1015/// If CtxI and DT are specified this method performs flow-sensitive analysis
1016/// and returns true if it is guaranteed to be never undef or poison
1017/// immediately before the CtxI.
1018bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
1019 AssumptionCache *AC = nullptr,
1020 const Instruction *CtxI = nullptr,
1021 const DominatorTree *DT = nullptr,
1022 unsigned Depth = 0);
1023
1024/// Returns true if V cannot be poison, but may be undef.
1025bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC = nullptr,
1026 const Instruction *CtxI = nullptr,
1027 const DominatorTree *DT = nullptr,
1028 unsigned Depth = 0);
1029
1030inline bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
1031 BasicBlock::iterator CtxI,
1032 const DominatorTree *DT = nullptr,
1033 unsigned Depth = 0) {
1034 // Takes an iterator as a position, passes down to Instruction *
1035 // implementation.
1036 return isGuaranteedNotToBePoison(V, AC, CtxI: &*CtxI, DT, Depth);
1037}
1038
1039/// Returns true if V cannot be undef, but may be poison.
1040bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC = nullptr,
1041 const Instruction *CtxI = nullptr,
1042 const DominatorTree *DT = nullptr,
1043 unsigned Depth = 0);
1044
1045/// Return true if undefined behavior would provable be executed on the path to
1046/// OnPathTo if Root produced a posion result. Note that this doesn't say
1047/// anything about whether OnPathTo is actually executed or whether Root is
1048/// actually poison. This can be used to assess whether a new use of Root can
1049/// be added at a location which is control equivalent with OnPathTo (such as
1050/// immediately before it) without introducing UB which didn't previously
1051/// exist. Note that a false result conveys no information.
1052bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
1053 Instruction *OnPathTo,
1054 DominatorTree *DT);
1055
1056/// Specific patterns of select instructions we can match.
1057enum SelectPatternFlavor {
1058 SPF_UNKNOWN = 0,
1059 SPF_SMIN, /// Signed minimum
1060 SPF_UMIN, /// Unsigned minimum
1061 SPF_SMAX, /// Signed maximum
1062 SPF_UMAX, /// Unsigned maximum
1063 SPF_FMINNUM, /// Floating point minnum
1064 SPF_FMAXNUM, /// Floating point maxnum
1065 SPF_ABS, /// Absolute value
1066 SPF_NABS /// Negated absolute value
1067};
1068
1069/// Behavior when a floating point min/max is given one NaN and one
1070/// non-NaN as input.
1071enum SelectPatternNaNBehavior {
1072 SPNB_NA = 0, /// NaN behavior not applicable.
1073 SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN.
1074 SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
1075 SPNB_RETURNS_ANY /// Given one NaN input, can return either (or
1076 /// it has been determined that no operands can
1077 /// be NaN).
1078};
1079
1080struct SelectPatternResult {
1081 SelectPatternFlavor Flavor;
1082 SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
1083 /// SPF_FMINNUM or SPF_FMAXNUM.
1084 bool Ordered; /// When implementing this min/max pattern as
1085 /// fcmp; select, does the fcmp have to be
1086 /// ordered?
1087
1088 /// Return true if \p SPF is a min or a max pattern.
1089 static bool isMinOrMax(SelectPatternFlavor SPF) {
1090 return SPF != SPF_UNKNOWN && SPF != SPF_ABS && SPF != SPF_NABS;
1091 }
1092};
1093
1094/// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
1095/// and providing the out parameter results if we successfully match.
1096///
1097/// For ABS/NABS, LHS will be set to the input to the abs idiom. RHS will be
1098/// the negation instruction from the idiom.
1099///
1100/// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
1101/// not match that of the original select. If this is the case, the cast
1102/// operation (one of Trunc,SExt,Zext) that must be done to transform the
1103/// type of LHS and RHS into the type of V is returned in CastOp.
1104///
1105/// For example:
1106/// %1 = icmp slt i32 %a, i32 4
1107/// %2 = sext i32 %a to i64
1108/// %3 = select i1 %1, i64 %2, i64 4
1109///
1110/// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
1111///
1112SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
1113 Instruction::CastOps *CastOp = nullptr,
1114 unsigned Depth = 0);
1115
1116inline SelectPatternResult matchSelectPattern(const Value *V, const Value *&LHS,
1117 const Value *&RHS) {
1118 Value *L = const_cast<Value *>(LHS);
1119 Value *R = const_cast<Value *>(RHS);
1120 auto Result = matchSelectPattern(V: const_cast<Value *>(V), LHS&: L, RHS&: R);
1121 LHS = L;
1122 RHS = R;
1123 return Result;
1124}
1125
1126/// Determine the pattern that a select with the given compare as its
1127/// predicate and given values as its true/false operands would match.
1128SelectPatternResult matchDecomposedSelectPattern(
1129 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
1130 Instruction::CastOps *CastOp = nullptr, unsigned Depth = 0);
1131
1132/// Return the canonical comparison predicate for the specified
1133/// minimum/maximum flavor.
1134CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered = false);
1135
1136/// Return the inverse minimum/maximum flavor of the specified flavor.
1137/// For example, signed minimum is the inverse of signed maximum.
1138SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF);
1139
1140Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID);
1141
1142/// Return the minimum or maximum constant value for the specified integer
1143/// min/max flavor and type.
1144APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth);
1145
1146/// Check if the values in \p VL are select instructions that can be converted
1147/// to a min or max (vector) intrinsic. Returns the intrinsic ID, if such a
1148/// conversion is possible, together with a bool indicating whether all select
1149/// conditions are only used by the selects. Otherwise return
1150/// Intrinsic::not_intrinsic.
1151std::pair<Intrinsic::ID, bool>
1152canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL);
1153
1154/// Attempt to match a simple first order recurrence cycle of the form:
1155/// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1156/// %inc = binop %iv, %step
1157/// OR
1158/// %iv = phi Ty [%Start, %Entry], [%Inc, %backedge]
1159/// %inc = binop %step, %iv
1160///
1161/// A first order recurrence is a formula with the form: X_n = f(X_(n-1))
1162///
1163/// A couple of notes on subtleties in that definition:
1164/// * The Step does not have to be loop invariant. In math terms, it can
1165/// be a free variable. We allow recurrences with both constant and
1166/// variable coefficients. Callers may wish to filter cases where Step
1167/// does not dominate P.
1168/// * For non-commutative operators, we will match both forms. This
1169/// results in some odd recurrence structures. Callers may wish to filter
1170/// out recurrences where the phi is not the LHS of the returned operator.
1171/// * Because of the structure matched, the caller can assume as a post
1172/// condition of the match the presence of a Loop with P's parent as it's
1173/// header *except* in unreachable code. (Dominance decays in unreachable
1174/// code.)
1175///
1176/// NOTE: This is intentional simple. If you want the ability to analyze
1177/// non-trivial loop conditons, see ScalarEvolution instead.
1178bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start,
1179 Value *&Step);
1180
1181/// Analogous to the above, but starting from the binary operator
1182bool matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, Value *&Start,
1183 Value *&Step);
1184
1185/// Return true if RHS is known to be implied true by LHS. Return false if
1186/// RHS is known to be implied false by LHS. Otherwise, return std::nullopt if
1187/// no implication can be made. A & B must be i1 (boolean) values or a vector of
1188/// such values. Note that the truth table for implication is the same as <=u on
1189/// i1 values (but not
1190/// <=s!). The truth table for both is:
1191/// | T | F (B)
1192/// T | T | F
1193/// F | T | T
1194/// (A)
1195std::optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
1196 const DataLayout &DL,
1197 bool LHSIsTrue = true,
1198 unsigned Depth = 0);
1199std::optional<bool> isImpliedCondition(const Value *LHS,
1200 CmpInst::Predicate RHSPred,
1201 const Value *RHSOp0, const Value *RHSOp1,
1202 const DataLayout &DL,
1203 bool LHSIsTrue = true,
1204 unsigned Depth = 0);
1205
1206/// Return the boolean condition value in the context of the given instruction
1207/// if it is known based on dominating conditions.
1208std::optional<bool> isImpliedByDomCondition(const Value *Cond,
1209 const Instruction *ContextI,
1210 const DataLayout &DL);
1211std::optional<bool> isImpliedByDomCondition(CmpInst::Predicate Pred,
1212 const Value *LHS, const Value *RHS,
1213 const Instruction *ContextI,
1214 const DataLayout &DL);
1215
1216/// Call \p InsertAffected on all Values whose known bits / value may be
1217/// affected by the condition \p Cond. Used by AssumptionCache and
1218/// DomConditionCache.
1219void findValuesAffectedByCondition(Value *Cond, bool IsAssume,
1220 function_ref<void(Value *)> InsertAffected);
1221
1222} // end namespace llvm
1223
1224#endif // LLVM_ANALYSIS_VALUETRACKING_H
1225

source code of llvm/include/llvm/Analysis/ValueTracking.h