1//===- ScopInfo.cpp -------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Create a polyhedral description for a static control flow region.
10//
11// The pass creates a polyhedral description of the Scops detected by the Scop
12// detection derived from their LLVM-IR code.
13//
14// This representation is shared among several tools in the polyhedral
15// community, which are e.g. Cloog, Pluto, Loopo, Graphite.
16//
17//===----------------------------------------------------------------------===//
18
19#include "polly/ScopInfo.h"
20#include "polly/LinkAllPasses.h"
21#include "polly/Options.h"
22#include "polly/ScopBuilder.h"
23#include "polly/ScopDetection.h"
24#include "polly/Support/GICHelper.h"
25#include "polly/Support/ISLOStream.h"
26#include "polly/Support/ISLTools.h"
27#include "polly/Support/SCEVAffinator.h"
28#include "polly/Support/SCEVValidator.h"
29#include "polly/Support/ScopHelper.h"
30#include "llvm/ADT/APInt.h"
31#include "llvm/ADT/ArrayRef.h"
32#include "llvm/ADT/PostOrderIterator.h"
33#include "llvm/ADT/Sequence.h"
34#include "llvm/ADT/SmallPtrSet.h"
35#include "llvm/ADT/SmallSet.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/ADT/StringExtras.h"
38#include "llvm/Analysis/AliasAnalysis.h"
39#include "llvm/Analysis/AssumptionCache.h"
40#include "llvm/Analysis/Loads.h"
41#include "llvm/Analysis/LoopInfo.h"
42#include "llvm/Analysis/OptimizationRemarkEmitter.h"
43#include "llvm/Analysis/RegionInfo.h"
44#include "llvm/Analysis/RegionIterator.h"
45#include "llvm/Analysis/ScalarEvolution.h"
46#include "llvm/Analysis/ScalarEvolutionExpressions.h"
47#include "llvm/IR/BasicBlock.h"
48#include "llvm/IR/ConstantRange.h"
49#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/DebugLoc.h"
51#include "llvm/IR/Dominators.h"
52#include "llvm/IR/Function.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
55#include "llvm/IR/Instructions.h"
56#include "llvm/IR/Module.h"
57#include "llvm/IR/PassManager.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Value.h"
60#include "llvm/InitializePasses.h"
61#include "llvm/Support/Compiler.h"
62#include "llvm/Support/Debug.h"
63#include "llvm/Support/ErrorHandling.h"
64#include "llvm/Support/raw_ostream.h"
65#include "isl/aff.h"
66#include "isl/local_space.h"
67#include "isl/map.h"
68#include "isl/options.h"
69#include "isl/set.h"
70#include <cassert>
71#include <numeric>
72
73using namespace llvm;
74using namespace polly;
75
76#include "polly/Support/PollyDebug.h"
77#define DEBUG_TYPE "polly-scops"
78
79STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
80STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
81STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
82STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
83STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
84STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
85STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
86STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
87STATISTIC(AssumptionsInvariantLoad,
88 "Number of invariant loads assumptions taken.");
89STATISTIC(AssumptionsDelinearization,
90 "Number of delinearization assumptions taken.");
91
92STATISTIC(NumScops, "Number of feasible SCoPs after ScopInfo");
93STATISTIC(NumLoopsInScop, "Number of loops in scops");
94STATISTIC(NumBoxedLoops, "Number of boxed loops in SCoPs after ScopInfo");
95STATISTIC(NumAffineLoops, "Number of affine loops in SCoPs after ScopInfo");
96
97STATISTIC(NumScopsDepthZero, "Number of scops with maximal loop depth 0");
98STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
99STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
100STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
101STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
102STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
103STATISTIC(NumScopsDepthLarger,
104 "Number of scops with maximal loop depth 6 and larger");
105STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
106
107STATISTIC(NumValueWrites, "Number of scalar value writes after ScopInfo");
108STATISTIC(
109 NumValueWritesInLoops,
110 "Number of scalar value writes nested in affine loops after ScopInfo");
111STATISTIC(NumPHIWrites, "Number of scalar phi writes after ScopInfo");
112STATISTIC(NumPHIWritesInLoops,
113 "Number of scalar phi writes nested in affine loops after ScopInfo");
114STATISTIC(NumSingletonWrites, "Number of singleton writes after ScopInfo");
115STATISTIC(NumSingletonWritesInLoops,
116 "Number of singleton writes nested in affine loops after ScopInfo");
117
118unsigned const polly::MaxDisjunctsInDomain = 20;
119
120// The number of disjunct in the context after which we stop to add more
121// disjuncts. This parameter is there to avoid exponential growth in the
122// number of disjunct when adding non-convex sets to the context.
123static int const MaxDisjunctsInContext = 4;
124
125// Be a bit more generous for the defined behavior context which is used less
126// often.
127static int const MaxDisjunktsInDefinedBehaviourContext = 8;
128
129static cl::opt<bool> PollyRemarksMinimal(
130 "polly-remarks-minimal",
131 cl::desc("Do not emit remarks about assumptions that are known"),
132 cl::Hidden, cl::cat(PollyCategory));
133
134static cl::opt<bool>
135 IslOnErrorAbort("polly-on-isl-error-abort",
136 cl::desc("Abort if an isl error is encountered"),
137 cl::init(Val: true), cl::cat(PollyCategory));
138
139static cl::opt<bool> PollyPreciseInbounds(
140 "polly-precise-inbounds",
141 cl::desc("Take more precise inbounds assumptions (do not scale well)"),
142 cl::Hidden, cl::init(Val: false), cl::cat(PollyCategory));
143
144static cl::opt<bool> PollyIgnoreParamBounds(
145 "polly-ignore-parameter-bounds",
146 cl::desc(
147 "Do not add parameter bounds and do no gist simplify sets accordingly"),
148 cl::Hidden, cl::init(Val: false), cl::cat(PollyCategory));
149
150static cl::opt<bool> PollyPreciseFoldAccesses(
151 "polly-precise-fold-accesses",
152 cl::desc("Fold memory accesses to model more possible delinearizations "
153 "(does not scale well)"),
154 cl::Hidden, cl::init(Val: false), cl::cat(PollyCategory));
155
156bool polly::UseInstructionNames;
157
158static cl::opt<bool, true> XUseInstructionNames(
159 "polly-use-llvm-names",
160 cl::desc("Use LLVM-IR names when deriving statement names"),
161 cl::location(L&: UseInstructionNames), cl::Hidden, cl::cat(PollyCategory));
162
163static cl::opt<bool> PollyPrintInstructions(
164 "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
165 cl::Hidden, cl::Optional, cl::init(Val: false), cl::cat(PollyCategory));
166
167static cl::list<std::string> IslArgs("polly-isl-arg",
168 cl::value_desc("argument"),
169 cl::desc("Option passed to ISL"),
170 cl::cat(PollyCategory));
171
172//===----------------------------------------------------------------------===//
173
174static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
175 int dim, isl::dim type) {
176 isl::val V;
177 isl::ctx Ctx = S.ctx();
178
179 // The upper and lower bound for a parameter value is derived either from
180 // the data type of the parameter or from the - possibly more restrictive -
181 // range metadata.
182 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getSignedMin(), IsSigned: true);
183 S = S.lower_bound_val(type, pos: dim, value: V);
184 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getSignedMax(), IsSigned: true);
185 S = S.upper_bound_val(type, pos: dim, value: V);
186
187 if (Range.isFullSet())
188 return S;
189
190 if (S.n_basic_set().release() > MaxDisjunctsInContext)
191 return S;
192
193 // In case of signed wrapping, we can refine the set of valid values by
194 // excluding the part not covered by the wrapping range.
195 if (Range.isSignWrappedSet()) {
196 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getLower(), IsSigned: true);
197 isl::set SLB = S.lower_bound_val(type, pos: dim, value: V);
198
199 V = valFromAPInt(Ctx: Ctx.get(), Int: Range.getUpper(), IsSigned: true);
200 V = V.sub(v2: 1);
201 isl::set SUB = S.upper_bound_val(type, pos: dim, value: V);
202 S = SLB.unite(set2: SUB);
203 }
204
205 return S;
206}
207
208static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
209 LoadInst *BasePtrLI = dyn_cast<LoadInst>(Val: BasePtr);
210 if (!BasePtrLI)
211 return nullptr;
212
213 if (!S->contains(I: BasePtrLI))
214 return nullptr;
215
216 ScalarEvolution &SE = *S->getSE();
217
218 auto *OriginBaseSCEV =
219 SE.getPointerBase(V: SE.getSCEV(V: BasePtrLI->getPointerOperand()));
220 if (!OriginBaseSCEV)
221 return nullptr;
222
223 auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(Val: OriginBaseSCEV);
224 if (!OriginBaseSCEVUnknown)
225 return nullptr;
226
227 return S->getScopArrayInfo(BasePtr: OriginBaseSCEVUnknown->getValue(),
228 Kind: MemoryKind::Array);
229}
230
231ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl::ctx Ctx,
232 ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
233 const DataLayout &DL, Scop *S,
234 const char *BaseName)
235 : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
236 std::string BasePtrName =
237 BaseName ? BaseName
238 : getIslCompatibleName(Prefix: "MemRef", Val: BasePtr, Number: S->getNextArrayIdx(),
239 Suffix: Kind == MemoryKind::PHI ? "__phi" : "",
240 UseInstructionNames);
241 Id = isl::id::alloc(ctx: Ctx, name: BasePtrName, user: this);
242
243 updateSizes(Sizes);
244
245 if (!BasePtr || Kind != MemoryKind::Array) {
246 BasePtrOriginSAI = nullptr;
247 return;
248 }
249
250 BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
251 if (BasePtrOriginSAI)
252 const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(DerivedSAI: this);
253}
254
255ScopArrayInfo::~ScopArrayInfo() = default;
256
257isl::space ScopArrayInfo::getSpace() const {
258 auto Space = isl::space(Id.ctx(), 0, getNumberOfDimensions());
259 Space = Space.set_tuple_id(type: isl::dim::set, id: Id);
260 return Space;
261}
262
263bool ScopArrayInfo::isReadOnly() {
264 isl::union_set WriteSet = S.getWrites().range();
265 isl::space Space = getSpace();
266 WriteSet = WriteSet.extract_set(space: Space);
267
268 return bool(WriteSet.is_empty());
269}
270
271bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
272 if (Array->getElementType() != getElementType())
273 return false;
274
275 if (Array->getNumberOfDimensions() != getNumberOfDimensions())
276 return false;
277
278 for (unsigned i = 0; i < getNumberOfDimensions(); i++)
279 if (Array->getDimensionSize(Dim: i) != getDimensionSize(Dim: i))
280 return false;
281
282 return true;
283}
284
285void ScopArrayInfo::updateElementType(Type *NewElementType) {
286 if (NewElementType == ElementType)
287 return;
288
289 auto OldElementSize = DL.getTypeAllocSizeInBits(Ty: ElementType);
290 auto NewElementSize = DL.getTypeAllocSizeInBits(Ty: NewElementType);
291
292 if (NewElementSize == OldElementSize || NewElementSize == 0)
293 return;
294
295 if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
296 ElementType = NewElementType;
297 } else {
298 auto GCD = std::gcd(m: (uint64_t)NewElementSize, n: (uint64_t)OldElementSize);
299 ElementType = IntegerType::get(C&: ElementType->getContext(), NumBits: GCD);
300 }
301}
302
303bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
304 bool CheckConsistency) {
305 int SharedDims = std::min(a: NewSizes.size(), b: DimensionSizes.size());
306 int ExtraDimsNew = NewSizes.size() - SharedDims;
307 int ExtraDimsOld = DimensionSizes.size() - SharedDims;
308
309 if (CheckConsistency) {
310 for (int i = 0; i < SharedDims; i++) {
311 auto *NewSize = NewSizes[i + ExtraDimsNew];
312 auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
313 if (NewSize && KnownSize && NewSize != KnownSize)
314 return false;
315 }
316
317 if (DimensionSizes.size() >= NewSizes.size())
318 return true;
319 }
320
321 DimensionSizes.clear();
322 DimensionSizes.insert(I: DimensionSizes.begin(), From: NewSizes.begin(),
323 To: NewSizes.end());
324 DimensionSizesPw.clear();
325 for (const SCEV *Expr : DimensionSizes) {
326 if (!Expr) {
327 DimensionSizesPw.push_back(Elt: isl::pw_aff());
328 continue;
329 }
330 isl::pw_aff Size = S.getPwAffOnly(E: Expr);
331 DimensionSizesPw.push_back(Elt: Size);
332 }
333 return true;
334}
335
336std::string ScopArrayInfo::getName() const { return Id.get_name(); }
337
338int ScopArrayInfo::getElemSizeInBytes() const {
339 return DL.getTypeAllocSize(Ty: ElementType);
340}
341
342isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
343
344#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
345LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(OS&: errs()); }
346#endif
347
348void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
349 OS.indent(NumSpaces: 8) << *getElementType() << " " << getName();
350 unsigned u = 0;
351
352 if (getNumberOfDimensions() > 0 && !getDimensionSize(Dim: 0)) {
353 OS << "[*]";
354 u++;
355 }
356 for (; u < getNumberOfDimensions(); u++) {
357 OS << "[";
358
359 if (SizeAsPwAff) {
360 isl::pw_aff Size = getDimensionSizePw(Dim: u);
361 OS << " " << Size << " ";
362 } else {
363 OS << *getDimensionSize(Dim: u);
364 }
365
366 OS << "]";
367 }
368
369 OS << ";";
370
371 if (BasePtrOriginSAI)
372 OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
373
374 OS << " // Element size " << getElemSizeInBytes() << "\n";
375}
376
377const ScopArrayInfo *
378ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
379 isl::id Id = PMA.get_tuple_id(type: isl::dim::out);
380 assert(!Id.is_null() && "Output dimension didn't have an ID");
381 return getFromId(Id);
382}
383
384const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
385 void *User = Id.get_user();
386 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
387 return SAI;
388}
389
390void MemoryAccess::wrapConstantDimensions() {
391 auto *SAI = getScopArrayInfo();
392 isl::space ArraySpace = SAI->getSpace();
393 isl::ctx Ctx = ArraySpace.ctx();
394 unsigned DimsArray = SAI->getNumberOfDimensions();
395
396 isl::multi_aff DivModAff = isl::multi_aff::identity(
397 space: ArraySpace.map_from_domain_and_range(range: ArraySpace));
398 isl::local_space LArraySpace = isl::local_space(ArraySpace);
399
400 // Begin with last dimension, to iteratively carry into higher dimensions.
401 for (int i = DimsArray - 1; i > 0; i--) {
402 auto *DimSize = SAI->getDimensionSize(Dim: i);
403 auto *DimSizeCst = dyn_cast<SCEVConstant>(Val: DimSize);
404
405 // This transformation is not applicable to dimensions with dynamic size.
406 if (!DimSizeCst)
407 continue;
408
409 // This transformation is not applicable to dimensions of size zero.
410 if (DimSize->isZero())
411 continue;
412
413 isl::val DimSizeVal =
414 valFromAPInt(Ctx: Ctx.get(), Int: DimSizeCst->getAPInt(), IsSigned: false);
415 isl::aff Var = isl::aff::var_on_domain(ls: LArraySpace, type: isl::dim::set, pos: i);
416 isl::aff PrevVar =
417 isl::aff::var_on_domain(ls: LArraySpace, type: isl::dim::set, pos: i - 1);
418
419 // Compute: index % size
420 // Modulo must apply in the divide of the previous iteration, if any.
421 isl::aff Modulo = Var.mod(mod: DimSizeVal);
422 Modulo = Modulo.pullback(ma: DivModAff);
423
424 // Compute: floor(index / size)
425 isl::aff Divide = Var.div(aff2: isl::aff(LArraySpace, DimSizeVal));
426 Divide = Divide.floor();
427 Divide = Divide.add(aff2: PrevVar);
428 Divide = Divide.pullback(ma: DivModAff);
429
430 // Apply Modulo and Divide.
431 DivModAff = DivModAff.set_aff(pos: i, el: Modulo);
432 DivModAff = DivModAff.set_aff(pos: i - 1, el: Divide);
433 }
434
435 // Apply all modulo/divides on the accesses.
436 isl::map Relation = AccessRelation;
437 Relation = Relation.apply_range(map2: isl::map::from_multi_aff(maff: DivModAff));
438 Relation = Relation.detect_equalities();
439 AccessRelation = Relation;
440}
441
442void MemoryAccess::updateDimensionality() {
443 auto *SAI = getScopArrayInfo();
444 isl::space ArraySpace = SAI->getSpace();
445 isl::space AccessSpace = AccessRelation.get_space().range();
446 isl::ctx Ctx = ArraySpace.ctx();
447
448 unsigned DimsArray = unsignedFromIslSize(Size: ArraySpace.dim(type: isl::dim::set));
449 unsigned DimsAccess = unsignedFromIslSize(Size: AccessSpace.dim(type: isl::dim::set));
450 assert(DimsArray >= DimsAccess);
451 unsigned DimsMissing = DimsArray - DimsAccess;
452
453 auto *BB = getStatement()->getEntryBlock();
454 auto &DL = BB->getModule()->getDataLayout();
455 unsigned ArrayElemSize = SAI->getElemSizeInBytes();
456 unsigned ElemBytes = DL.getTypeAllocSize(Ty: getElementType());
457
458 isl::map Map = isl::map::from_domain_and_range(
459 domain: isl::set::universe(space: AccessSpace), range: isl::set::universe(space: ArraySpace));
460
461 for (auto i : seq<unsigned>(Begin: 0, End: DimsMissing))
462 Map = Map.fix_si(type: isl::dim::out, pos: i, value: 0);
463
464 for (auto i : seq<unsigned>(Begin: DimsMissing, End: DimsArray))
465 Map = Map.equate(type1: isl::dim::in, pos1: i - DimsMissing, type2: isl::dim::out, pos2: i);
466
467 AccessRelation = AccessRelation.apply_range(map2: Map);
468
469 // For the non delinearized arrays, divide the access function of the last
470 // subscript by the size of the elements in the array.
471 //
472 // A stride one array access in C expressed as A[i] is expressed in
473 // LLVM-IR as something like A[i * elementsize]. This hides the fact that
474 // two subsequent values of 'i' index two values that are stored next to
475 // each other in memory. By this division we make this characteristic
476 // obvious again. If the base pointer was accessed with offsets not divisible
477 // by the accesses element size, we will have chosen a smaller ArrayElemSize
478 // that divides the offsets of all accesses to this base pointer.
479 if (DimsAccess == 1) {
480 isl::val V = isl::val(Ctx, ArrayElemSize);
481 AccessRelation = AccessRelation.floordiv_val(d: V);
482 }
483
484 // We currently do this only if we added at least one dimension, which means
485 // some dimension's indices have not been specified, an indicator that some
486 // index values have been added together.
487 // TODO: Investigate general usefulness; Effect on unit tests is to make index
488 // expressions more complicated.
489 if (DimsMissing)
490 wrapConstantDimensions();
491
492 if (!isAffine())
493 computeBoundsOnAccessRelation(ElementSize: ArrayElemSize);
494
495 // Introduce multi-element accesses in case the type loaded by this memory
496 // access is larger than the canonical element type of the array.
497 //
498 // An access ((float *)A)[i] to an array char *A is modeled as
499 // {[i] -> A[o] : 4 i <= o <= 4 i + 3
500 if (ElemBytes > ArrayElemSize) {
501 assert(ElemBytes % ArrayElemSize == 0 &&
502 "Loaded element size should be multiple of canonical element size");
503 assert(DimsArray >= 1);
504 isl::map Map = isl::map::from_domain_and_range(
505 domain: isl::set::universe(space: ArraySpace), range: isl::set::universe(space: ArraySpace));
506 for (auto i : seq<unsigned>(Begin: 0, End: DimsArray - 1))
507 Map = Map.equate(type1: isl::dim::in, pos1: i, type2: isl::dim::out, pos2: i);
508
509 isl::constraint C;
510 isl::local_space LS;
511
512 LS = isl::local_space(Map.get_space());
513 int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
514
515 C = isl::constraint::alloc_inequality(ls: LS);
516 C = C.set_constant_val(isl::val(Ctx, Num - 1));
517 C = C.set_coefficient_si(type: isl::dim::in, pos: DimsArray - 1, v: 1);
518 C = C.set_coefficient_si(type: isl::dim::out, pos: DimsArray - 1, v: -1);
519 Map = Map.add_constraint(constraint: C);
520
521 C = isl::constraint::alloc_inequality(ls: LS);
522 C = C.set_coefficient_si(type: isl::dim::in, pos: DimsArray - 1, v: -1);
523 C = C.set_coefficient_si(type: isl::dim::out, pos: DimsArray - 1, v: 1);
524 C = C.set_constant_val(isl::val(Ctx, 0));
525 Map = Map.add_constraint(constraint: C);
526 AccessRelation = AccessRelation.apply_range(map2: Map);
527 }
528}
529
530const std::string
531MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
532 switch (RT) {
533 case MemoryAccess::RT_NONE:
534 llvm_unreachable("Requested a reduction operator string for a memory "
535 "access which isn't a reduction");
536 case MemoryAccess::RT_ADD:
537 return "+";
538 case MemoryAccess::RT_MUL:
539 return "*";
540 case MemoryAccess::RT_BOR:
541 return "|";
542 case MemoryAccess::RT_BXOR:
543 return "^";
544 case MemoryAccess::RT_BAND:
545 return "&";
546 }
547 llvm_unreachable("Unknown reduction type");
548}
549
550const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
551 isl::id ArrayId = getArrayId();
552 void *User = ArrayId.get_user();
553 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
554 return SAI;
555}
556
557const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
558 isl::id ArrayId = getLatestArrayId();
559 void *User = ArrayId.get_user();
560 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
561 return SAI;
562}
563
564isl::id MemoryAccess::getOriginalArrayId() const {
565 return AccessRelation.get_tuple_id(type: isl::dim::out);
566}
567
568isl::id MemoryAccess::getLatestArrayId() const {
569 if (!hasNewAccessRelation())
570 return getOriginalArrayId();
571 return NewAccessRelation.get_tuple_id(type: isl::dim::out);
572}
573
574isl::map MemoryAccess::getAddressFunction() const {
575 return getAccessRelation().lexmin();
576}
577
578isl::pw_multi_aff
579MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
580 isl::map Schedule, ScheduledAccRel;
581 isl::union_set UDomain;
582
583 UDomain = getStatement()->getDomain();
584 USchedule = USchedule.intersect_domain(uset: UDomain);
585 Schedule = isl::map::from_union_map(umap: USchedule);
586 ScheduledAccRel = getAddressFunction().apply_domain(map2: Schedule);
587 return isl::pw_multi_aff::from_map(map: ScheduledAccRel);
588}
589
590isl::map MemoryAccess::getOriginalAccessRelation() const {
591 return AccessRelation;
592}
593
594std::string MemoryAccess::getOriginalAccessRelationStr() const {
595 return stringFromIslObj(Obj: AccessRelation);
596}
597
598isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
599 return AccessRelation.get_space();
600}
601
602isl::map MemoryAccess::getNewAccessRelation() const {
603 return NewAccessRelation;
604}
605
606std::string MemoryAccess::getNewAccessRelationStr() const {
607 return stringFromIslObj(Obj: NewAccessRelation);
608}
609
610std::string MemoryAccess::getAccessRelationStr() const {
611 return stringFromIslObj(Obj: getAccessRelation());
612}
613
614isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
615 isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
616 Space = Space.align_params(space2: Statement->getDomainSpace());
617
618 return isl::basic_map::from_domain_and_range(
619 domain: isl::basic_set::universe(space: Statement->getDomainSpace()),
620 range: isl::basic_set::universe(space: Space));
621}
622
623// Formalize no out-of-bound access assumption
624//
625// When delinearizing array accesses we optimistically assume that the
626// delinearized accesses do not access out of bound locations (the subscript
627// expression of each array evaluates for each statement instance that is
628// executed to a value that is larger than zero and strictly smaller than the
629// size of the corresponding dimension). The only exception is the outermost
630// dimension for which we do not need to assume any upper bound. At this point
631// we formalize this assumption to ensure that at code generation time the
632// relevant run-time checks can be generated.
633//
634// To find the set of constraints necessary to avoid out of bound accesses, we
635// first build the set of data locations that are not within array bounds. We
636// then apply the reverse access relation to obtain the set of iterations that
637// may contain invalid accesses and reduce this set of iterations to the ones
638// that are actually executed by intersecting them with the domain of the
639// statement. If we now project out all loop dimensions, we obtain a set of
640// parameters that may cause statement instances to be executed that may
641// possibly yield out of bound memory accesses. The complement of these
642// constraints is the set of constraints that needs to be assumed to ensure such
643// statement instances are never executed.
644isl::set MemoryAccess::assumeNoOutOfBound() {
645 auto *SAI = getScopArrayInfo();
646 isl::space Space = getOriginalAccessRelationSpace().range();
647 isl::set Outside = isl::set::empty(space: Space);
648 for (int i = 1, Size = Space.dim(type: isl::dim::set).release(); i < Size; ++i) {
649 isl::local_space LS(Space);
650 isl::pw_aff Var = isl::pw_aff::var_on_domain(ls: LS, type: isl::dim::set, pos: i);
651 isl::pw_aff Zero = isl::pw_aff(LS);
652
653 isl::set DimOutside = Var.lt_set(pwaff2: Zero);
654 isl::pw_aff SizeE = SAI->getDimensionSizePw(Dim: i);
655 SizeE = SizeE.add_dims(type: isl::dim::in, n: Space.dim(type: isl::dim::set).release());
656 SizeE = SizeE.set_tuple_id(type: isl::dim::in, id: Space.get_tuple_id(type: isl::dim::set));
657 DimOutside = DimOutside.unite(set2: SizeE.le_set(pwaff2: Var));
658
659 Outside = Outside.unite(set2: DimOutside);
660 }
661
662 Outside = Outside.apply(map: getAccessRelation().reverse());
663 Outside = Outside.intersect(set2: Statement->getDomain());
664 Outside = Outside.params();
665
666 // Remove divs to avoid the construction of overly complicated assumptions.
667 // Doing so increases the set of parameter combinations that are assumed to
668 // not appear. This is always save, but may make the resulting run-time check
669 // bail out more often than strictly necessary.
670 Outside = Outside.remove_divs();
671 Outside = Outside.complement();
672
673 if (!PollyPreciseInbounds)
674 Outside = Outside.gist_params(context: Statement->getDomain().params());
675 return Outside;
676}
677
678void MemoryAccess::buildMemIntrinsicAccessRelation() {
679 assert(isMemoryIntrinsic());
680 assert(Subscripts.size() == 2 && Sizes.size() == 1);
681
682 isl::pw_aff SubscriptPWA = getPwAff(E: Subscripts[0]);
683 isl::map SubscriptMap = isl::map::from_pw_aff(pwaff: SubscriptPWA);
684
685 isl::map LengthMap;
686 if (Subscripts[1] == nullptr) {
687 LengthMap = isl::map::universe(space: SubscriptMap.get_space());
688 } else {
689 isl::pw_aff LengthPWA = getPwAff(E: Subscripts[1]);
690 LengthMap = isl::map::from_pw_aff(pwaff: LengthPWA);
691 isl::space RangeSpace = LengthMap.get_space().range();
692 LengthMap = LengthMap.apply_range(map2: isl::map::lex_gt(set_space: RangeSpace));
693 }
694 LengthMap = LengthMap.lower_bound_si(type: isl::dim::out, pos: 0, value: 0);
695 LengthMap = LengthMap.align_params(model: SubscriptMap.get_space());
696 SubscriptMap = SubscriptMap.align_params(model: LengthMap.get_space());
697 LengthMap = LengthMap.sum(map2: SubscriptMap);
698 AccessRelation =
699 LengthMap.set_tuple_id(type: isl::dim::in, id: getStatement()->getDomainId());
700}
701
702void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
703 ScalarEvolution *SE = Statement->getParent()->getSE();
704
705 auto MAI = MemAccInst(getAccessInstruction());
706 if (isa<MemIntrinsic>(Val: MAI))
707 return;
708
709 Value *Ptr = MAI.getPointerOperand();
710 if (!Ptr || !SE->isSCEVable(Ty: Ptr->getType()))
711 return;
712
713 auto *PtrSCEV = SE->getSCEV(V: Ptr);
714 if (isa<SCEVCouldNotCompute>(Val: PtrSCEV))
715 return;
716
717 auto *BasePtrSCEV = SE->getPointerBase(V: PtrSCEV);
718 if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(Val: BasePtrSCEV))
719 PtrSCEV = SE->getMinusSCEV(LHS: PtrSCEV, RHS: BasePtrSCEV);
720
721 const ConstantRange &Range = SE->getSignedRange(S: PtrSCEV);
722 if (Range.isFullSet())
723 return;
724
725 if (Range.isUpperWrapped() || Range.isSignWrappedSet())
726 return;
727
728 bool isWrapping = Range.isSignWrappedSet();
729
730 unsigned BW = Range.getBitWidth();
731 const auto One = APInt(BW, 1);
732 const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
733 const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
734
735 auto Min = LB.sdiv(RHS: APInt(BW, ElementSize));
736 auto Max = UB.sdiv(RHS: APInt(BW, ElementSize)) + One;
737
738 assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
739
740 isl::map Relation = AccessRelation;
741 isl::set AccessRange = Relation.range();
742 AccessRange = addRangeBoundsToSet(S: AccessRange, Range: ConstantRange(Min, Max), dim: 0,
743 type: isl::dim::set);
744 AccessRelation = Relation.intersect_range(set: AccessRange);
745}
746
747void MemoryAccess::foldAccessRelation() {
748 if (Sizes.size() < 2 || isa<SCEVConstant>(Val: Sizes[1]))
749 return;
750
751 int Size = Subscripts.size();
752
753 isl::map NewAccessRelation = AccessRelation;
754
755 for (int i = Size - 2; i >= 0; --i) {
756 isl::space Space;
757 isl::map MapOne, MapTwo;
758 isl::pw_aff DimSize = getPwAff(E: Sizes[i + 1]);
759
760 isl::space SpaceSize = DimSize.get_space();
761 isl::id ParamId = SpaceSize.get_dim_id(type: isl::dim::param, pos: 0);
762
763 Space = AccessRelation.get_space();
764 Space = Space.range().map_from_set();
765 Space = Space.align_params(space2: SpaceSize);
766
767 int ParamLocation = Space.find_dim_by_id(type: isl::dim::param, id: ParamId);
768
769 MapOne = isl::map::universe(space: Space);
770 for (int j = 0; j < Size; ++j)
771 MapOne = MapOne.equate(type1: isl::dim::in, pos1: j, type2: isl::dim::out, pos2: j);
772 MapOne = MapOne.lower_bound_si(type: isl::dim::in, pos: i + 1, value: 0);
773
774 MapTwo = isl::map::universe(space: Space);
775 for (int j = 0; j < Size; ++j)
776 if (j < i || j > i + 1)
777 MapTwo = MapTwo.equate(type1: isl::dim::in, pos1: j, type2: isl::dim::out, pos2: j);
778
779 isl::local_space LS(Space);
780 isl::constraint C;
781 C = isl::constraint::alloc_equality(ls: LS);
782 C = C.set_constant_si(-1);
783 C = C.set_coefficient_si(type: isl::dim::in, pos: i, v: 1);
784 C = C.set_coefficient_si(type: isl::dim::out, pos: i, v: -1);
785 MapTwo = MapTwo.add_constraint(constraint: C);
786 C = isl::constraint::alloc_equality(ls: LS);
787 C = C.set_coefficient_si(type: isl::dim::in, pos: i + 1, v: 1);
788 C = C.set_coefficient_si(type: isl::dim::out, pos: i + 1, v: -1);
789 C = C.set_coefficient_si(type: isl::dim::param, pos: ParamLocation, v: 1);
790 MapTwo = MapTwo.add_constraint(constraint: C);
791 MapTwo = MapTwo.upper_bound_si(type: isl::dim::in, pos: i + 1, value: -1);
792
793 MapOne = MapOne.unite(map2: MapTwo);
794 NewAccessRelation = NewAccessRelation.apply_range(map2: MapOne);
795 }
796
797 isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
798 isl::space Space = Statement->getDomainSpace();
799 NewAccessRelation = NewAccessRelation.set_tuple_id(
800 type: isl::dim::in, id: Space.get_tuple_id(type: isl::dim::set));
801 NewAccessRelation = NewAccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
802 NewAccessRelation = NewAccessRelation.gist_domain(context: Statement->getDomain());
803
804 // Access dimension folding might in certain cases increase the number of
805 // disjuncts in the memory access, which can possibly complicate the generated
806 // run-time checks and can lead to costly compilation.
807 if (!PollyPreciseFoldAccesses && NewAccessRelation.n_basic_map().release() >
808 AccessRelation.n_basic_map().release()) {
809 } else {
810 AccessRelation = NewAccessRelation;
811 }
812}
813
814void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
815 assert(AccessRelation.is_null() && "AccessRelation already built");
816
817 // Initialize the invalid domain which describes all iterations for which the
818 // access relation is not modeled correctly.
819 isl::set StmtInvalidDomain = getStatement()->getInvalidDomain();
820 InvalidDomain = isl::set::empty(space: StmtInvalidDomain.get_space());
821
822 isl::ctx Ctx = Id.ctx();
823 isl::id BaseAddrId = SAI->getBasePtrId();
824
825 if (getAccessInstruction() && isa<MemIntrinsic>(Val: getAccessInstruction())) {
826 buildMemIntrinsicAccessRelation();
827 AccessRelation = AccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
828 return;
829 }
830
831 if (!isAffine()) {
832 // We overapproximate non-affine accesses with a possible access to the
833 // whole array. For read accesses it does not make a difference, if an
834 // access must or may happen. However, for write accesses it is important to
835 // differentiate between writes that must happen and writes that may happen.
836 if (AccessRelation.is_null())
837 AccessRelation = createBasicAccessMap(Statement);
838
839 AccessRelation = AccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
840 return;
841 }
842
843 isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
844 AccessRelation = isl::map::universe(space: Space);
845
846 for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
847 isl::pw_aff Affine = getPwAff(E: Subscripts[i]);
848 isl::map SubscriptMap = isl::map::from_pw_aff(pwaff: Affine);
849 AccessRelation = AccessRelation.flat_range_product(map2: SubscriptMap);
850 }
851
852 Space = Statement->getDomainSpace();
853 AccessRelation = AccessRelation.set_tuple_id(
854 type: isl::dim::in, id: Space.get_tuple_id(type: isl::dim::set));
855 AccessRelation = AccessRelation.set_tuple_id(type: isl::dim::out, id: BaseAddrId);
856
857 AccessRelation = AccessRelation.gist_domain(context: Statement->getDomain());
858}
859
860MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
861 AccessType AccType, Value *BaseAddress,
862 Type *ElementType, bool Affine,
863 ArrayRef<const SCEV *> Subscripts,
864 ArrayRef<const SCEV *> Sizes, Value *AccessValue,
865 MemoryKind Kind)
866 : Kind(Kind), AccType(AccType), Statement(Stmt), InvalidDomain(),
867 BaseAddr(BaseAddress), ElementType(ElementType),
868 Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
869 AccessValue(AccessValue), IsAffine(Affine),
870 Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(),
871 NewAccessRelation() {
872 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
873 const std::string Access = TypeStrings[AccType] + utostr(X: Stmt->size());
874
875 std::string IdName = Stmt->getBaseName() + Access;
876 Id = isl::id::alloc(ctx: Stmt->getParent()->getIslCtx(), name: IdName, user: this);
877}
878
879MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
880 : Kind(MemoryKind::Array), AccType(AccType), Statement(Stmt),
881 InvalidDomain(), AccessRelation(), NewAccessRelation(AccRel) {
882 isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(type: isl::dim::out);
883 auto *SAI = ScopArrayInfo::getFromId(Id: ArrayInfoId);
884 Sizes.push_back(Elt: nullptr);
885 for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
886 Sizes.push_back(Elt: SAI->getDimensionSize(Dim: i));
887 ElementType = SAI->getElementType();
888 BaseAddr = SAI->getBasePtr();
889 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
890 const std::string Access = TypeStrings[AccType] + utostr(X: Stmt->size());
891
892 std::string IdName = Stmt->getBaseName() + Access;
893 Id = isl::id::alloc(ctx: Stmt->getParent()->getIslCtx(), name: IdName, user: this);
894}
895
896MemoryAccess::~MemoryAccess() = default;
897
898void MemoryAccess::realignParams() {
899 isl::set Ctx = Statement->getParent()->getContext();
900 InvalidDomain = InvalidDomain.gist_params(context: Ctx);
901 AccessRelation = AccessRelation.gist_params(context: Ctx);
902
903 // Predictable parameter order is required for JSON imports. Ensure alignment
904 // by explicitly calling align_params.
905 isl::space CtxSpace = Ctx.get_space();
906 InvalidDomain = InvalidDomain.align_params(model: CtxSpace);
907 AccessRelation = AccessRelation.align_params(model: CtxSpace);
908}
909
910const std::string MemoryAccess::getReductionOperatorStr() const {
911 return MemoryAccess::getReductionOperatorStr(RT: getReductionType());
912}
913
914isl::id MemoryAccess::getId() const { return Id; }
915
916raw_ostream &polly::operator<<(raw_ostream &OS,
917 MemoryAccess::ReductionType RT) {
918 if (RT == MemoryAccess::RT_NONE)
919 OS << "NONE";
920 else
921 OS << MemoryAccess::getReductionOperatorStr(RT);
922 return OS;
923}
924
925void MemoryAccess::print(raw_ostream &OS) const {
926 switch (AccType) {
927 case READ:
928 OS.indent(NumSpaces: 12) << "ReadAccess :=\t";
929 break;
930 case MUST_WRITE:
931 OS.indent(NumSpaces: 12) << "MustWriteAccess :=\t";
932 break;
933 case MAY_WRITE:
934 OS.indent(NumSpaces: 12) << "MayWriteAccess :=\t";
935 break;
936 }
937
938 OS << "[Reduction Type: " << getReductionType() << "] ";
939
940 OS << "[Scalar: " << isScalarKind() << "]\n";
941 OS.indent(NumSpaces: 16) << getOriginalAccessRelationStr() << ";\n";
942 if (hasNewAccessRelation())
943 OS.indent(NumSpaces: 11) << "new: " << getNewAccessRelationStr() << ";\n";
944}
945
946#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
947LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(OS&: errs()); }
948#endif
949
950isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
951 auto *Stmt = getStatement();
952 PWACtx PWAC = Stmt->getParent()->getPwAff(E, BB: Stmt->getEntryBlock());
953 isl::set StmtDom = getStatement()->getDomain();
954 StmtDom = StmtDom.reset_tuple_id();
955 isl::set NewInvalidDom = StmtDom.intersect(set2: PWAC.second);
956 InvalidDomain = InvalidDomain.unite(set2: NewInvalidDom);
957 return PWAC.first;
958}
959
960// Create a map in the size of the provided set domain, that maps from the
961// one element of the provided set domain to another element of the provided
962// set domain.
963// The mapping is limited to all points that are equal in all but the last
964// dimension and for which the last dimension of the input is strict smaller
965// than the last dimension of the output.
966//
967// getEqualAndLarger(set[i0, i1, ..., iX]):
968//
969// set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
970// : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
971//
972static isl::map getEqualAndLarger(isl::space SetDomain) {
973 isl::space Space = SetDomain.map_from_set();
974 isl::map Map = isl::map::universe(space: Space);
975 unsigned lastDimension = Map.domain_tuple_dim().release() - 1;
976
977 // Set all but the last dimension to be equal for the input and output
978 //
979 // input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
980 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
981 for (unsigned i = 0; i < lastDimension; ++i)
982 Map = Map.equate(type1: isl::dim::in, pos1: i, type2: isl::dim::out, pos2: i);
983
984 // Set the last dimension of the input to be strict smaller than the
985 // last dimension of the output.
986 //
987 // input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
988 Map = Map.order_lt(type1: isl::dim::in, pos1: lastDimension, type2: isl::dim::out, pos2: lastDimension);
989 return Map;
990}
991
992isl::set MemoryAccess::getStride(isl::map Schedule) const {
993 isl::map AccessRelation = getAccessRelation();
994 isl::space Space = Schedule.get_space().range();
995 isl::map NextScatt = getEqualAndLarger(SetDomain: Space);
996
997 Schedule = Schedule.reverse();
998 NextScatt = NextScatt.lexmin();
999
1000 NextScatt = NextScatt.apply_range(map2: Schedule);
1001 NextScatt = NextScatt.apply_range(map2: AccessRelation);
1002 NextScatt = NextScatt.apply_domain(map2: Schedule);
1003 NextScatt = NextScatt.apply_domain(map2: AccessRelation);
1004
1005 isl::set Deltas = NextScatt.deltas();
1006 return Deltas;
1007}
1008
1009bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1010 isl::set Stride, StrideX;
1011 bool IsStrideX;
1012
1013 Stride = getStride(Schedule);
1014 StrideX = isl::set::universe(space: Stride.get_space());
1015 int Size = unsignedFromIslSize(Size: StrideX.tuple_dim());
1016 for (auto i : seq<int>(Begin: 0, End: Size - 1))
1017 StrideX = StrideX.fix_si(type: isl::dim::set, pos: i, value: 0);
1018 StrideX = StrideX.fix_si(type: isl::dim::set, pos: Size - 1, value: StrideWidth);
1019 IsStrideX = Stride.is_subset(set2: StrideX);
1020
1021 return IsStrideX;
1022}
1023
1024bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1025 return isStrideX(Schedule, StrideWidth: 0);
1026}
1027
1028bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1029 return isStrideX(Schedule, StrideWidth: 1);
1030}
1031
1032void MemoryAccess::setAccessRelation(isl::map NewAccess) {
1033 AccessRelation = NewAccess;
1034}
1035
1036void MemoryAccess::setNewAccessRelation(isl::map NewAccess) {
1037 assert(!NewAccess.is_null());
1038
1039#ifndef NDEBUG
1040 // Check domain space compatibility.
1041 isl::space NewSpace = NewAccess.get_space();
1042 isl::space NewDomainSpace = NewSpace.domain();
1043 isl::space OriginalDomainSpace = getStatement()->getDomainSpace();
1044 assert(OriginalDomainSpace.has_equal_tuples(NewDomainSpace));
1045
1046 // Reads must be executed unconditionally. Writes might be executed in a
1047 // subdomain only.
1048 if (isRead()) {
1049 // Check whether there is an access for every statement instance.
1050 isl::set StmtDomain = getStatement()->getDomain();
1051 isl::set DefinedContext =
1052 getStatement()->getParent()->getBestKnownDefinedBehaviorContext();
1053 StmtDomain = StmtDomain.intersect_params(params: DefinedContext);
1054 isl::set NewDomain = NewAccess.domain();
1055 assert(!StmtDomain.is_subset(NewDomain).is_false() &&
1056 "Partial READ accesses not supported");
1057 }
1058
1059 isl::space NewAccessSpace = NewAccess.get_space();
1060 assert(NewAccessSpace.has_tuple_id(isl::dim::set) &&
1061 "Must specify the array that is accessed");
1062 isl::id NewArrayId = NewAccessSpace.get_tuple_id(type: isl::dim::set);
1063 auto *SAI = static_cast<ScopArrayInfo *>(NewArrayId.get_user());
1064 assert(SAI && "Must set a ScopArrayInfo");
1065
1066 if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1067 InvariantEquivClassTy *EqClass =
1068 getStatement()->getParent()->lookupInvariantEquivClass(
1069 Val: SAI->getBasePtr());
1070 assert(EqClass &&
1071 "Access functions to indirect arrays must have an invariant and "
1072 "hoisted base pointer");
1073 }
1074
1075 // Check whether access dimensions correspond to number of dimensions of the
1076 // accesses array.
1077 unsigned Dims = SAI->getNumberOfDimensions();
1078 unsigned SpaceSize = unsignedFromIslSize(Size: NewAccessSpace.dim(type: isl::dim::set));
1079 assert(SpaceSize == Dims && "Access dims must match array dims");
1080#endif
1081
1082 NewAccess = NewAccess.gist_params(context: getStatement()->getParent()->getContext());
1083 NewAccess = NewAccess.gist_domain(context: getStatement()->getDomain());
1084 NewAccessRelation = NewAccess;
1085}
1086
1087bool MemoryAccess::isLatestPartialAccess() const {
1088 isl::set StmtDom = getStatement()->getDomain();
1089 isl::set AccDom = getLatestAccessRelation().domain();
1090
1091 return !StmtDom.is_subset(set2: AccDom);
1092}
1093
1094//===----------------------------------------------------------------------===//
1095
1096isl::map ScopStmt::getSchedule() const {
1097 isl::set Domain = getDomain();
1098 if (Domain.is_empty())
1099 return isl::map::from_aff(aff: isl::aff(isl::local_space(getDomainSpace())));
1100 auto Schedule = getParent()->getSchedule();
1101 if (Schedule.is_null())
1102 return {};
1103 Schedule = Schedule.intersect_domain(uset: isl::union_set(Domain));
1104 if (Schedule.is_empty())
1105 return isl::map::from_aff(aff: isl::aff(isl::local_space(getDomainSpace())));
1106 isl::map M = M.from_union_map(umap: Schedule);
1107 M = M.coalesce();
1108 M = M.gist_domain(context: Domain);
1109 M = M.coalesce();
1110 return M;
1111}
1112
1113void ScopStmt::restrictDomain(isl::set NewDomain) {
1114 assert(NewDomain.is_subset(Domain) &&
1115 "New domain is not a subset of old domain!");
1116 Domain = NewDomain;
1117}
1118
1119void ScopStmt::addAccess(MemoryAccess *Access, bool Prepend) {
1120 Instruction *AccessInst = Access->getAccessInstruction();
1121
1122 if (Access->isArrayKind()) {
1123 MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1124 MAL.emplace_front(args&: Access);
1125 } else if (Access->isValueKind() && Access->isWrite()) {
1126 Instruction *AccessVal = cast<Instruction>(Val: Access->getAccessValue());
1127 assert(!ValueWrites.lookup(AccessVal));
1128
1129 ValueWrites[AccessVal] = Access;
1130 } else if (Access->isValueKind() && Access->isRead()) {
1131 Value *AccessVal = Access->getAccessValue();
1132 assert(!ValueReads.lookup(AccessVal));
1133
1134 ValueReads[AccessVal] = Access;
1135 } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1136 PHINode *PHI = cast<PHINode>(Val: Access->getAccessValue());
1137 assert(!PHIWrites.lookup(PHI));
1138
1139 PHIWrites[PHI] = Access;
1140 } else if (Access->isAnyPHIKind() && Access->isRead()) {
1141 PHINode *PHI = cast<PHINode>(Val: Access->getAccessValue());
1142 assert(!PHIReads.lookup(PHI));
1143
1144 PHIReads[PHI] = Access;
1145 }
1146
1147 if (Prepend) {
1148 MemAccs.insert(I: MemAccs.begin(), Elt: Access);
1149 return;
1150 }
1151 MemAccs.push_back(Elt: Access);
1152}
1153
1154void ScopStmt::realignParams() {
1155 for (MemoryAccess *MA : *this)
1156 MA->realignParams();
1157
1158 simplify(Set&: InvalidDomain);
1159 simplify(Set&: Domain);
1160
1161 isl::set Ctx = Parent.getContext();
1162 InvalidDomain = InvalidDomain.gist_params(context: Ctx);
1163 Domain = Domain.gist_params(context: Ctx);
1164
1165 // Predictable parameter order is required for JSON imports. Ensure alignment
1166 // by explicitly calling align_params.
1167 isl::space CtxSpace = Ctx.get_space();
1168 InvalidDomain = InvalidDomain.align_params(model: CtxSpace);
1169 Domain = Domain.align_params(model: CtxSpace);
1170}
1171
1172ScopStmt::ScopStmt(Scop &parent, Region &R, StringRef Name,
1173 Loop *SurroundingLoop,
1174 std::vector<Instruction *> EntryBlockInstructions)
1175 : Parent(parent), InvalidDomain(), Domain(), R(&R), Build(), BaseName(Name),
1176 SurroundingLoop(SurroundingLoop), Instructions(EntryBlockInstructions) {}
1177
1178ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, StringRef Name,
1179 Loop *SurroundingLoop,
1180 std::vector<Instruction *> Instructions)
1181 : Parent(parent), InvalidDomain(), Domain(), BB(&bb), Build(),
1182 BaseName(Name), SurroundingLoop(SurroundingLoop),
1183 Instructions(Instructions) {}
1184
1185ScopStmt::ScopStmt(Scop &parent, isl::map SourceRel, isl::map TargetRel,
1186 isl::set NewDomain)
1187 : Parent(parent), InvalidDomain(), Domain(NewDomain), Build() {
1188 BaseName = getIslCompatibleName(Prefix: "CopyStmt_", Middle: "",
1189 Suffix: std::to_string(val: parent.getCopyStmtsNum()));
1190 isl::id Id = isl::id::alloc(ctx: getIslCtx(), name: getBaseName(), user: this);
1191 Domain = Domain.set_tuple_id(Id);
1192 TargetRel = TargetRel.set_tuple_id(type: isl::dim::in, id: Id);
1193 auto *Access =
1194 new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE, TargetRel);
1195 parent.addAccessFunction(Access);
1196 addAccess(Access);
1197 SourceRel = SourceRel.set_tuple_id(type: isl::dim::in, id: Id);
1198 Access = new MemoryAccess(this, MemoryAccess::AccessType::READ, SourceRel);
1199 parent.addAccessFunction(Access);
1200 addAccess(Access);
1201}
1202
1203ScopStmt::~ScopStmt() = default;
1204
1205std::string ScopStmt::getDomainStr() const { return stringFromIslObj(Obj: Domain); }
1206
1207std::string ScopStmt::getScheduleStr() const {
1208 return stringFromIslObj(Obj: getSchedule());
1209}
1210
1211void ScopStmt::setInvalidDomain(isl::set ID) { InvalidDomain = ID; }
1212
1213BasicBlock *ScopStmt::getEntryBlock() const {
1214 if (isBlockStmt())
1215 return getBasicBlock();
1216 return getRegion()->getEntry();
1217}
1218
1219unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1220
1221const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1222
1223Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1224 return NestLoops[Dimension];
1225}
1226
1227isl::ctx ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1228
1229isl::set ScopStmt::getDomain() const { return Domain; }
1230
1231isl::space ScopStmt::getDomainSpace() const { return Domain.get_space(); }
1232
1233isl::id ScopStmt::getDomainId() const { return Domain.get_tuple_id(); }
1234
1235void ScopStmt::printInstructions(raw_ostream &OS) const {
1236 OS << "Instructions {\n";
1237
1238 for (Instruction *Inst : Instructions)
1239 OS.indent(NumSpaces: 16) << *Inst << "\n";
1240
1241 OS.indent(NumSpaces: 12) << "}\n";
1242}
1243
1244void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1245 OS << "\t" << getBaseName() << "\n";
1246 OS.indent(NumSpaces: 12) << "Domain :=\n";
1247
1248 if (!Domain.is_null()) {
1249 OS.indent(NumSpaces: 16) << getDomainStr() << ";\n";
1250 } else
1251 OS.indent(NumSpaces: 16) << "n/a\n";
1252
1253 OS.indent(NumSpaces: 12) << "Schedule :=\n";
1254
1255 if (!Domain.is_null()) {
1256 OS.indent(NumSpaces: 16) << getScheduleStr() << ";\n";
1257 } else
1258 OS.indent(NumSpaces: 16) << "n/a\n";
1259
1260 for (MemoryAccess *Access : MemAccs)
1261 Access->print(OS);
1262
1263 if (PrintInstructions)
1264 printInstructions(OS&: OS.indent(NumSpaces: 12));
1265}
1266
1267#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1268LLVM_DUMP_METHOD void ScopStmt::dump() const { print(OS&: dbgs(), PrintInstructions: true); }
1269#endif
1270
1271void ScopStmt::removeAccessData(MemoryAccess *MA) {
1272 if (MA->isRead() && MA->isOriginalValueKind()) {
1273 bool Found = ValueReads.erase(Val: MA->getAccessValue());
1274 (void)Found;
1275 assert(Found && "Expected access data not found");
1276 }
1277 if (MA->isWrite() && MA->isOriginalValueKind()) {
1278 bool Found = ValueWrites.erase(Val: cast<Instruction>(Val: MA->getAccessValue()));
1279 (void)Found;
1280 assert(Found && "Expected access data not found");
1281 }
1282 if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1283 bool Found = PHIWrites.erase(Val: cast<PHINode>(Val: MA->getAccessInstruction()));
1284 (void)Found;
1285 assert(Found && "Expected access data not found");
1286 }
1287 if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1288 bool Found = PHIReads.erase(Val: cast<PHINode>(Val: MA->getAccessInstruction()));
1289 (void)Found;
1290 assert(Found && "Expected access data not found");
1291 }
1292}
1293
1294void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
1295 // Remove the memory accesses from this statement together with all scalar
1296 // accesses that were caused by it. MemoryKind::Value READs have no access
1297 // instruction, hence would not be removed by this function. However, it is
1298 // only used for invariant LoadInst accesses, its arguments are always affine,
1299 // hence synthesizable, and therefore there are no MemoryKind::Value READ
1300 // accesses to be removed.
1301 auto Predicate = [&](MemoryAccess *Acc) {
1302 return Acc->getAccessInstruction() == MA->getAccessInstruction();
1303 };
1304 for (auto *MA : MemAccs) {
1305 if (Predicate(MA)) {
1306 removeAccessData(MA);
1307 Parent.removeAccessData(Access: MA);
1308 }
1309 }
1310 llvm::erase_if(C&: MemAccs, P: Predicate);
1311 InstructionToAccess.erase(Val: MA->getAccessInstruction());
1312}
1313
1314void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA, bool AfterHoisting) {
1315 if (AfterHoisting) {
1316 auto MAIt = std::find(first: MemAccs.begin(), last: MemAccs.end(), val: MA);
1317 assert(MAIt != MemAccs.end());
1318 MemAccs.erase(CI: MAIt);
1319
1320 removeAccessData(MA);
1321 Parent.removeAccessData(Access: MA);
1322 }
1323
1324 auto It = InstructionToAccess.find(Val: MA->getAccessInstruction());
1325 if (It != InstructionToAccess.end()) {
1326 It->second.remove(val: MA);
1327 if (It->second.empty())
1328 InstructionToAccess.erase(Val: MA->getAccessInstruction());
1329 }
1330}
1331
1332MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
1333 MemoryAccess *Access = lookupInputAccessOf(Val: V);
1334 if (Access)
1335 return Access;
1336
1337 ScopArrayInfo *SAI =
1338 Parent.getOrCreateScopArrayInfo(BasePtr: V, ElementType: V->getType(), Sizes: {}, Kind: MemoryKind::Value);
1339 Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
1340 true, {}, {}, V, MemoryKind::Value);
1341 Parent.addAccessFunction(Access);
1342 Access->buildAccessRelation(SAI);
1343 addAccess(Access);
1344 Parent.addAccessData(Access);
1345 return Access;
1346}
1347
1348raw_ostream &polly::operator<<(raw_ostream &OS, const ScopStmt &S) {
1349 S.print(OS, PrintInstructions: PollyPrintInstructions);
1350 return OS;
1351}
1352
1353//===----------------------------------------------------------------------===//
1354/// Scop class implement
1355
1356void Scop::setContext(isl::set NewContext) {
1357 Context = NewContext.align_params(model: Context.get_space());
1358}
1359
1360namespace {
1361
1362/// Remap parameter values but keep AddRecs valid wrt. invariant loads.
1363class SCEVSensitiveParameterRewriter final
1364 : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
1365 const ValueToValueMap &VMap;
1366
1367public:
1368 SCEVSensitiveParameterRewriter(const ValueToValueMap &VMap,
1369 ScalarEvolution &SE)
1370 : SCEVRewriteVisitor(SE), VMap(VMap) {}
1371
1372 static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1373 const ValueToValueMap &VMap) {
1374 SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1375 return SSPR.visit(S: E);
1376 }
1377
1378 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1379 auto *Start = visit(S: E->getStart());
1380 auto *AddRec = SE.getAddRecExpr(Start: SE.getConstant(Ty: E->getType(), V: 0),
1381 Step: visit(S: E->getStepRecurrence(SE)),
1382 L: E->getLoop(), Flags: SCEV::FlagAnyWrap);
1383 return SE.getAddExpr(LHS: Start, RHS: AddRec);
1384 }
1385
1386 const SCEV *visitUnknown(const SCEVUnknown *E) {
1387 if (auto *NewValue = VMap.lookup(Val: E->getValue()))
1388 return SE.getUnknown(V: NewValue);
1389 return E;
1390 }
1391};
1392
1393/// Check whether we should remap a SCEV expression.
1394class SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
1395 const ValueToValueMap &VMap;
1396 bool FoundInside = false;
1397 const Scop *S;
1398
1399public:
1400 SCEVFindInsideScop(const ValueToValueMap &VMap, ScalarEvolution &SE,
1401 const Scop *S)
1402 : SCEVTraversal(*this), VMap(VMap), S(S) {}
1403
1404 static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
1405 const ValueToValueMap &VMap, const Scop *S) {
1406 SCEVFindInsideScop SFIS(VMap, SE, S);
1407 SFIS.visitAll(Root: E);
1408 return SFIS.FoundInside;
1409 }
1410
1411 bool follow(const SCEV *E) {
1412 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Val: E)) {
1413 FoundInside |= S->getRegion().contains(L: AddRec->getLoop());
1414 } else if (auto *Unknown = dyn_cast<SCEVUnknown>(Val: E)) {
1415 if (Instruction *I = dyn_cast<Instruction>(Val: Unknown->getValue()))
1416 FoundInside |= S->getRegion().contains(Inst: I) && !VMap.count(Val: I);
1417 }
1418 return !FoundInside;
1419 }
1420
1421 bool isDone() { return FoundInside; }
1422};
1423} // end anonymous namespace
1424
1425const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) const {
1426 // Check whether it makes sense to rewrite the SCEV. (ScalarEvolution
1427 // doesn't like addition between an AddRec and an expression that
1428 // doesn't have a dominance relationship with it.)
1429 if (SCEVFindInsideScop::hasVariant(E, SE&: *SE, VMap: InvEquivClassVMap, S: this))
1430 return E;
1431
1432 // Rewrite SCEV.
1433 return SCEVSensitiveParameterRewriter::rewrite(E, SE&: *SE, VMap: InvEquivClassVMap);
1434}
1435
1436void Scop::createParameterId(const SCEV *Parameter) {
1437 assert(Parameters.count(Parameter));
1438 assert(!ParameterIds.count(Parameter));
1439
1440 std::string ParameterName = "p_" + std::to_string(val: getNumParams() - 1);
1441
1442 if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Val: Parameter)) {
1443 Value *Val = ValueParameter->getValue();
1444
1445 if (UseInstructionNames) {
1446 // If this parameter references a specific Value and this value has a name
1447 // we use this name as it is likely to be unique and more useful than just
1448 // a number.
1449 if (Val->hasName())
1450 ParameterName = Val->getName().str();
1451 else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
1452 auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
1453 if (LoadOrigin->hasName()) {
1454 ParameterName += "_loaded_from_";
1455 ParameterName +=
1456 LI->getPointerOperand()->stripInBoundsOffsets()->getName();
1457 }
1458 }
1459 }
1460
1461 ParameterName = getIslCompatibleName(Prefix: "", Middle: ParameterName, Suffix: "");
1462 }
1463
1464 isl::id Id = isl::id::alloc(ctx: getIslCtx(), name: ParameterName,
1465 user: const_cast<void *>((const void *)Parameter));
1466 ParameterIds[Parameter] = Id;
1467}
1468
1469void Scop::addParams(const ParameterSetTy &NewParameters) {
1470 for (const SCEV *Parameter : NewParameters) {
1471 // Normalize the SCEV to get the representing element for an invariant load.
1472 Parameter = extractConstantFactor(M: Parameter, SE&: *SE).second;
1473 Parameter = getRepresentingInvariantLoadSCEV(E: Parameter);
1474
1475 if (Parameters.insert(X: Parameter))
1476 createParameterId(Parameter);
1477 }
1478}
1479
1480isl::id Scop::getIdForParam(const SCEV *Parameter) const {
1481 // Normalize the SCEV to get the representing element for an invariant load.
1482 Parameter = getRepresentingInvariantLoadSCEV(E: Parameter);
1483 return ParameterIds.lookup(Val: Parameter);
1484}
1485
1486bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
1487 return DT.dominates(A: BB, B: getEntry());
1488}
1489
1490void Scop::buildContext() {
1491 isl::space Space = isl::space::params_alloc(ctx: getIslCtx(), nparam: 0);
1492 Context = isl::set::universe(space: Space);
1493 InvalidContext = isl::set::empty(space: Space);
1494 AssumedContext = isl::set::universe(space: Space);
1495 DefinedBehaviorContext = isl::set::universe(space: Space);
1496}
1497
1498void Scop::addParameterBounds() {
1499 unsigned PDim = 0;
1500 for (auto *Parameter : Parameters) {
1501 ConstantRange SRange = SE->getSignedRange(S: Parameter);
1502 Context = addRangeBoundsToSet(S: Context, Range: SRange, dim: PDim++, type: isl::dim::param);
1503 }
1504 intersectDefinedBehavior(Set: Context, Sign: AS_ASSUMPTION);
1505}
1506
1507void Scop::realignParams() {
1508 if (PollyIgnoreParamBounds)
1509 return;
1510
1511 // Add all parameters into a common model.
1512 isl::space Space = getFullParamSpace();
1513
1514 // Align the parameters of all data structures to the model.
1515 Context = Context.align_params(model: Space);
1516 AssumedContext = AssumedContext.align_params(model: Space);
1517 InvalidContext = InvalidContext.align_params(model: Space);
1518
1519 // As all parameters are known add bounds to them.
1520 addParameterBounds();
1521
1522 for (ScopStmt &Stmt : *this)
1523 Stmt.realignParams();
1524 // Simplify the schedule according to the context too.
1525 Schedule = Schedule.gist_domain_params(context: getContext());
1526
1527 // Predictable parameter order is required for JSON imports. Ensure alignment
1528 // by explicitly calling align_params.
1529 Schedule = Schedule.align_params(space: Space);
1530}
1531
1532static isl::set simplifyAssumptionContext(isl::set AssumptionContext,
1533 const Scop &S) {
1534 // If we have modeled all blocks in the SCoP that have side effects we can
1535 // simplify the context with the constraints that are needed for anything to
1536 // be executed at all. However, if we have error blocks in the SCoP we already
1537 // assumed some parameter combinations cannot occur and removed them from the
1538 // domains, thus we cannot use the remaining domain to simplify the
1539 // assumptions.
1540 if (!S.hasErrorBlock()) {
1541 auto DomainParameters = S.getDomains().params();
1542 AssumptionContext = AssumptionContext.gist_params(context: DomainParameters);
1543 }
1544
1545 AssumptionContext = AssumptionContext.gist_params(context: S.getContext());
1546 return AssumptionContext;
1547}
1548
1549void Scop::simplifyContexts() {
1550 // The parameter constraints of the iteration domains give us a set of
1551 // constraints that need to hold for all cases where at least a single
1552 // statement iteration is executed in the whole scop. We now simplify the
1553 // assumed context under the assumption that such constraints hold and at
1554 // least a single statement iteration is executed. For cases where no
1555 // statement instances are executed, the assumptions we have taken about
1556 // the executed code do not matter and can be changed.
1557 //
1558 // WARNING: This only holds if the assumptions we have taken do not reduce
1559 // the set of statement instances that are executed. Otherwise we
1560 // may run into a case where the iteration domains suggest that
1561 // for a certain set of parameter constraints no code is executed,
1562 // but in the original program some computation would have been
1563 // performed. In such a case, modifying the run-time conditions and
1564 // possibly influencing the run-time check may cause certain scops
1565 // to not be executed.
1566 //
1567 // Example:
1568 //
1569 // When delinearizing the following code:
1570 //
1571 // for (long i = 0; i < 100; i++)
1572 // for (long j = 0; j < m; j++)
1573 // A[i+p][j] = 1.0;
1574 //
1575 // we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
1576 // otherwise we would access out of bound data. Now, knowing that code is
1577 // only executed for the case m >= 0, it is sufficient to assume p >= 0.
1578 AssumedContext = simplifyAssumptionContext(AssumptionContext: AssumedContext, S: *this);
1579 InvalidContext = InvalidContext.align_params(model: getParamSpace());
1580 simplify(Set&: DefinedBehaviorContext);
1581 DefinedBehaviorContext = DefinedBehaviorContext.align_params(model: getParamSpace());
1582}
1583
1584isl::set Scop::getDomainConditions(const ScopStmt *Stmt) const {
1585 return getDomainConditions(BB: Stmt->getEntryBlock());
1586}
1587
1588isl::set Scop::getDomainConditions(BasicBlock *BB) const {
1589 auto DIt = DomainMap.find(Val: BB);
1590 if (DIt != DomainMap.end())
1591 return DIt->getSecond();
1592
1593 auto &RI = *R.getRegionInfo();
1594 auto *BBR = RI.getRegionFor(BB);
1595 while (BBR->getEntry() == BB)
1596 BBR = BBR->getParent();
1597 return getDomainConditions(BB: BBR->getEntry());
1598}
1599
1600Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
1601 DominatorTree &DT, ScopDetection::DetectionContext &DC,
1602 OptimizationRemarkEmitter &ORE, int ID)
1603 : IslCtx(isl_ctx_alloc(), isl_ctx_free), SE(&ScalarEvolution), DT(&DT),
1604 R(R), name(std::nullopt), HasSingleExitEdge(R.getExitingBlock()), DC(DC),
1605 ORE(ORE), Affinator(this, LI), ID(ID) {
1606
1607 // Options defaults that are different from ISL's.
1608 isl_options_set_schedule_serialize_sccs(ctx: IslCtx.get(), val: true);
1609
1610 SmallVector<char *, 8> IslArgv;
1611 IslArgv.reserve(N: 1 + IslArgs.size());
1612
1613 // Substitute for program name.
1614 IslArgv.push_back(Elt: const_cast<char *>("-polly-isl-arg"));
1615
1616 for (std::string &Arg : IslArgs)
1617 IslArgv.push_back(Elt: const_cast<char *>(Arg.c_str()));
1618
1619 // Abort if unknown argument is passed.
1620 // Note that "-V" (print isl version) will always call exit(0), so we cannot
1621 // avoid ISL aborting the program at this point.
1622 unsigned IslParseFlags = ISL_ARG_ALL;
1623
1624 isl_ctx_parse_options(ctx: IslCtx.get(), argc: IslArgv.size(), argv: IslArgv.data(),
1625 flags: IslParseFlags);
1626
1627 if (IslOnErrorAbort)
1628 isl_options_set_on_error(ctx: getIslCtx().get(), ISL_ON_ERROR_ABORT);
1629 buildContext();
1630}
1631
1632Scop::~Scop() = default;
1633
1634void Scop::removeFromStmtMap(ScopStmt &Stmt) {
1635 for (Instruction *Inst : Stmt.getInstructions())
1636 InstStmtMap.erase(Val: Inst);
1637
1638 if (Stmt.isRegionStmt()) {
1639 for (BasicBlock *BB : Stmt.getRegion()->blocks()) {
1640 StmtMap.erase(Val: BB);
1641 // Skip entry basic block, as its instructions are already deleted as
1642 // part of the statement's instruction list.
1643 if (BB == Stmt.getEntryBlock())
1644 continue;
1645 for (Instruction &Inst : *BB)
1646 InstStmtMap.erase(Val: &Inst);
1647 }
1648 } else {
1649 auto StmtMapIt = StmtMap.find(Val: Stmt.getBasicBlock());
1650 if (StmtMapIt != StmtMap.end())
1651 llvm::erase(C&: StmtMapIt->second, V: &Stmt);
1652 for (Instruction *Inst : Stmt.getInstructions())
1653 InstStmtMap.erase(Val: Inst);
1654 }
1655}
1656
1657void Scop::removeStmts(function_ref<bool(ScopStmt &)> ShouldDelete,
1658 bool AfterHoisting) {
1659 for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
1660 if (!ShouldDelete(*StmtIt)) {
1661 StmtIt++;
1662 continue;
1663 }
1664
1665 // Start with removing all of the statement's accesses including erasing it
1666 // from all maps that are pointing to them.
1667 // Make a temporary copy because removing MAs invalidates the iterator.
1668 SmallVector<MemoryAccess *, 16> MAList(StmtIt->begin(), StmtIt->end());
1669 for (MemoryAccess *MA : MAList)
1670 StmtIt->removeSingleMemoryAccess(MA, AfterHoisting);
1671
1672 removeFromStmtMap(Stmt&: *StmtIt);
1673 StmtIt = Stmts.erase(position: StmtIt);
1674 }
1675}
1676
1677void Scop::removeStmtNotInDomainMap() {
1678 removeStmts(ShouldDelete: [this](ScopStmt &Stmt) -> bool {
1679 isl::set Domain = DomainMap.lookup(Val: Stmt.getEntryBlock());
1680 if (Domain.is_null())
1681 return true;
1682 return Domain.is_empty();
1683 });
1684}
1685
1686void Scop::simplifySCoP(bool AfterHoisting) {
1687 removeStmts(
1688 ShouldDelete: [AfterHoisting](ScopStmt &Stmt) -> bool {
1689 // Never delete statements that contain calls to debug functions.
1690 if (hasDebugCall(Stmt: &Stmt))
1691 return false;
1692
1693 bool RemoveStmt = Stmt.isEmpty();
1694
1695 // Remove read only statements only after invariant load hoisting.
1696 if (!RemoveStmt && AfterHoisting) {
1697 bool OnlyRead = true;
1698 for (MemoryAccess *MA : Stmt) {
1699 if (MA->isRead())
1700 continue;
1701
1702 OnlyRead = false;
1703 break;
1704 }
1705
1706 RemoveStmt = OnlyRead;
1707 }
1708 return RemoveStmt;
1709 },
1710 AfterHoisting);
1711}
1712
1713InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
1714 LoadInst *LInst = dyn_cast<LoadInst>(Val);
1715 if (!LInst)
1716 return nullptr;
1717
1718 if (Value *Rep = InvEquivClassVMap.lookup(Val: LInst))
1719 LInst = cast<LoadInst>(Val: Rep);
1720
1721 Type *Ty = LInst->getType();
1722 const SCEV *PointerSCEV = SE->getSCEV(V: LInst->getPointerOperand());
1723 for (auto &IAClass : InvariantEquivClasses) {
1724 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
1725 continue;
1726
1727 auto &MAs = IAClass.InvariantAccesses;
1728 for (auto *MA : MAs)
1729 if (MA->getAccessInstruction() == Val)
1730 return &IAClass;
1731 }
1732
1733 return nullptr;
1734}
1735
1736ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
1737 ArrayRef<const SCEV *> Sizes,
1738 MemoryKind Kind,
1739 const char *BaseName) {
1740 assert((BasePtr || BaseName) &&
1741 "BasePtr and BaseName can not be nullptr at the same time.");
1742 assert(!(BasePtr && BaseName) && "BaseName is redundant.");
1743 auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(x&: BasePtr, y&: Kind)]
1744 : ScopArrayNameMap[BaseName];
1745 if (!SAI) {
1746 auto &DL = getFunction().getParent()->getDataLayout();
1747 SAI.reset(p: new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
1748 DL, this, BaseName));
1749 ScopArrayInfoSet.insert(X: SAI.get());
1750 } else {
1751 SAI->updateElementType(NewElementType: ElementType);
1752 // In case of mismatching array sizes, we bail out by setting the run-time
1753 // context to false.
1754 if (!SAI->updateSizes(NewSizes: Sizes))
1755 invalidate(Kind: DELINEARIZATION, Loc: DebugLoc());
1756 }
1757 return SAI.get();
1758}
1759
1760ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
1761 const std::string &BaseName,
1762 const std::vector<unsigned> &Sizes) {
1763 auto *DimSizeType = Type::getInt64Ty(C&: getSE()->getContext());
1764 std::vector<const SCEV *> SCEVSizes;
1765
1766 for (auto size : Sizes)
1767 if (size)
1768 SCEVSizes.push_back(x: getSE()->getConstant(Ty: DimSizeType, V: size, isSigned: false));
1769 else
1770 SCEVSizes.push_back(x: nullptr);
1771
1772 auto *SAI = getOrCreateScopArrayInfo(BasePtr: nullptr, ElementType, Sizes: SCEVSizes,
1773 Kind: MemoryKind::Array, BaseName: BaseName.c_str());
1774 return SAI;
1775}
1776
1777ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr, MemoryKind Kind) {
1778 auto *SAI = ScopArrayInfoMap[std::make_pair(x&: BasePtr, y&: Kind)].get();
1779 return SAI;
1780}
1781
1782ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
1783 auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
1784 assert(SAI && "No ScopArrayInfo available for this base pointer");
1785 return SAI;
1786}
1787
1788std::string Scop::getContextStr() const {
1789 return stringFromIslObj(Obj: getContext());
1790}
1791
1792std::string Scop::getAssumedContextStr() const {
1793 assert(!AssumedContext.is_null() && "Assumed context not yet built");
1794 return stringFromIslObj(Obj: AssumedContext);
1795}
1796
1797std::string Scop::getInvalidContextStr() const {
1798 return stringFromIslObj(Obj: InvalidContext);
1799}
1800
1801std::string Scop::getNameStr() const {
1802 std::string ExitName, EntryName;
1803 std::tie(args&: EntryName, args&: ExitName) = getEntryExitStr();
1804 return EntryName + "---" + ExitName;
1805}
1806
1807std::pair<std::string, std::string> Scop::getEntryExitStr() const {
1808 std::string ExitName, EntryName;
1809 raw_string_ostream ExitStr(ExitName);
1810 raw_string_ostream EntryStr(EntryName);
1811
1812 R.getEntry()->printAsOperand(O&: EntryStr, PrintType: false);
1813 EntryStr.str();
1814
1815 if (R.getExit()) {
1816 R.getExit()->printAsOperand(O&: ExitStr, PrintType: false);
1817 ExitStr.str();
1818 } else
1819 ExitName = "FunctionExit";
1820
1821 return std::make_pair(x&: EntryName, y&: ExitName);
1822}
1823
1824isl::set Scop::getContext() const { return Context; }
1825
1826isl::space Scop::getParamSpace() const { return getContext().get_space(); }
1827
1828isl::space Scop::getFullParamSpace() const {
1829
1830 isl::space Space = isl::space::params_alloc(ctx: getIslCtx(), nparam: ParameterIds.size());
1831
1832 unsigned PDim = 0;
1833 for (const SCEV *Parameter : Parameters) {
1834 isl::id Id = getIdForParam(Parameter);
1835 Space = Space.set_dim_id(type: isl::dim::param, pos: PDim++, id: Id);
1836 }
1837
1838 return Space;
1839}
1840
1841isl::set Scop::getAssumedContext() const {
1842 assert(!AssumedContext.is_null() && "Assumed context not yet built");
1843 return AssumedContext;
1844}
1845
1846bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
1847 if (PollyProcessUnprofitable)
1848 return true;
1849
1850 if (isEmpty())
1851 return false;
1852
1853 unsigned OptimizableStmtsOrLoops = 0;
1854 for (auto &Stmt : *this) {
1855 if (Stmt.getNumIterators() == 0)
1856 continue;
1857
1858 bool ContainsArrayAccs = false;
1859 bool ContainsScalarAccs = false;
1860 for (auto *MA : Stmt) {
1861 if (MA->isRead())
1862 continue;
1863 ContainsArrayAccs |= MA->isLatestArrayKind();
1864 ContainsScalarAccs |= MA->isLatestScalarKind();
1865 }
1866
1867 if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
1868 OptimizableStmtsOrLoops += Stmt.getNumIterators();
1869 }
1870
1871 return OptimizableStmtsOrLoops > 1;
1872}
1873
1874bool Scop::hasFeasibleRuntimeContext() const {
1875 if (Stmts.empty())
1876 return false;
1877
1878 isl::set PositiveContext = getAssumedContext();
1879 isl::set NegativeContext = getInvalidContext();
1880 PositiveContext = PositiveContext.intersect_params(params: Context);
1881 PositiveContext = PositiveContext.intersect_params(params: getDomains().params());
1882 return PositiveContext.is_empty().is_false() &&
1883 PositiveContext.is_subset(set2: NegativeContext).is_false();
1884}
1885
1886MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
1887 Value *PointerBase = MA->getOriginalBaseAddr();
1888
1889 auto *PointerBaseInst = dyn_cast<Instruction>(Val: PointerBase);
1890 if (!PointerBaseInst)
1891 return nullptr;
1892
1893 auto *BasePtrStmt = getStmtFor(Inst: PointerBaseInst);
1894 if (!BasePtrStmt)
1895 return nullptr;
1896
1897 return BasePtrStmt->getArrayAccessOrNULLFor(Inst: PointerBaseInst);
1898}
1899
1900static std::string toString(AssumptionKind Kind) {
1901 switch (Kind) {
1902 case ALIASING:
1903 return "No-aliasing";
1904 case INBOUNDS:
1905 return "Inbounds";
1906 case WRAPPING:
1907 return "No-overflows";
1908 case UNSIGNED:
1909 return "Signed-unsigned";
1910 case COMPLEXITY:
1911 return "Low complexity";
1912 case PROFITABLE:
1913 return "Profitable";
1914 case ERRORBLOCK:
1915 return "No-error";
1916 case INFINITELOOP:
1917 return "Finite loop";
1918 case INVARIANTLOAD:
1919 return "Invariant load";
1920 case DELINEARIZATION:
1921 return "Delinearization";
1922 }
1923 llvm_unreachable("Unknown AssumptionKind!");
1924}
1925
1926bool Scop::isEffectiveAssumption(isl::set Set, AssumptionSign Sign) {
1927 if (Sign == AS_ASSUMPTION) {
1928 if (Context.is_subset(set2: Set))
1929 return false;
1930
1931 if (AssumedContext.is_subset(set2: Set))
1932 return false;
1933 } else {
1934 if (Set.is_disjoint(set2: Context))
1935 return false;
1936
1937 if (Set.is_subset(set2: InvalidContext))
1938 return false;
1939 }
1940 return true;
1941}
1942
1943bool Scop::trackAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
1944 AssumptionSign Sign, BasicBlock *BB) {
1945 if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
1946 return false;
1947
1948 // Do never emit trivial assumptions as they only clutter the output.
1949 if (!PollyRemarksMinimal) {
1950 isl::set Univ;
1951 if (Sign == AS_ASSUMPTION)
1952 Univ = isl::set::universe(space: Set.get_space());
1953
1954 bool IsTrivial = (Sign == AS_RESTRICTION && Set.is_empty()) ||
1955 (Sign == AS_ASSUMPTION && Univ.is_equal(set2: Set));
1956
1957 if (IsTrivial)
1958 return false;
1959 }
1960
1961 switch (Kind) {
1962 case ALIASING:
1963 AssumptionsAliasing++;
1964 break;
1965 case INBOUNDS:
1966 AssumptionsInbounds++;
1967 break;
1968 case WRAPPING:
1969 AssumptionsWrapping++;
1970 break;
1971 case UNSIGNED:
1972 AssumptionsUnsigned++;
1973 break;
1974 case COMPLEXITY:
1975 AssumptionsComplexity++;
1976 break;
1977 case PROFITABLE:
1978 AssumptionsUnprofitable++;
1979 break;
1980 case ERRORBLOCK:
1981 AssumptionsErrorBlock++;
1982 break;
1983 case INFINITELOOP:
1984 AssumptionsInfiniteLoop++;
1985 break;
1986 case INVARIANTLOAD:
1987 AssumptionsInvariantLoad++;
1988 break;
1989 case DELINEARIZATION:
1990 AssumptionsDelinearization++;
1991 break;
1992 }
1993
1994 auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
1995 std::string Msg = toString(Kind) + Suffix + stringFromIslObj(Obj: Set);
1996 if (BB)
1997 ORE.emit(OptDiag&: OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
1998 << Msg);
1999 else
2000 ORE.emit(OptDiag&: OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
2001 R.getEntry())
2002 << Msg);
2003 return true;
2004}
2005
2006void Scop::addAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2007 AssumptionSign Sign, BasicBlock *BB,
2008 bool RequiresRTC) {
2009 // Simplify the assumptions/restrictions first.
2010 Set = Set.gist_params(context: getContext());
2011 intersectDefinedBehavior(Set, Sign);
2012
2013 if (!RequiresRTC)
2014 return;
2015
2016 if (!trackAssumption(Kind, Set, Loc, Sign, BB))
2017 return;
2018
2019 if (Sign == AS_ASSUMPTION)
2020 AssumedContext = AssumedContext.intersect(set2: Set).coalesce();
2021 else
2022 InvalidContext = InvalidContext.unite(set2: Set).coalesce();
2023}
2024
2025void Scop::intersectDefinedBehavior(isl::set Set, AssumptionSign Sign) {
2026 if (DefinedBehaviorContext.is_null())
2027 return;
2028
2029 if (Sign == AS_ASSUMPTION)
2030 DefinedBehaviorContext = DefinedBehaviorContext.intersect(set2: Set);
2031 else
2032 DefinedBehaviorContext = DefinedBehaviorContext.subtract(set2: Set);
2033
2034 // Limit the complexity of the context. If complexity is exceeded, simplify
2035 // the set and check again.
2036 if (DefinedBehaviorContext.n_basic_set().release() >
2037 MaxDisjunktsInDefinedBehaviourContext) {
2038 simplify(Set&: DefinedBehaviorContext);
2039 if (DefinedBehaviorContext.n_basic_set().release() >
2040 MaxDisjunktsInDefinedBehaviourContext)
2041 DefinedBehaviorContext = {};
2042 }
2043}
2044
2045void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
2046 POLLY_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
2047 addAssumption(Kind, Set: isl::set::empty(space: getParamSpace()), Loc, Sign: AS_ASSUMPTION, BB);
2048}
2049
2050isl::set Scop::getInvalidContext() const { return InvalidContext; }
2051
2052void Scop::printContext(raw_ostream &OS) const {
2053 OS << "Context:\n";
2054 OS.indent(NumSpaces: 4) << Context << "\n";
2055
2056 OS.indent(NumSpaces: 4) << "Assumed Context:\n";
2057 OS.indent(NumSpaces: 4) << AssumedContext << "\n";
2058
2059 OS.indent(NumSpaces: 4) << "Invalid Context:\n";
2060 OS.indent(NumSpaces: 4) << InvalidContext << "\n";
2061
2062 OS.indent(NumSpaces: 4) << "Defined Behavior Context:\n";
2063 if (!DefinedBehaviorContext.is_null())
2064 OS.indent(NumSpaces: 4) << DefinedBehaviorContext << "\n";
2065 else
2066 OS.indent(NumSpaces: 4) << "<unavailable>\n";
2067
2068 unsigned Dim = 0;
2069 for (const SCEV *Parameter : Parameters)
2070 OS.indent(NumSpaces: 4) << "p" << Dim++ << ": " << *Parameter << "\n";
2071}
2072
2073void Scop::printAliasAssumptions(raw_ostream &OS) const {
2074 int noOfGroups = 0;
2075 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2076 if (Pair.second.size() == 0)
2077 noOfGroups += 1;
2078 else
2079 noOfGroups += Pair.second.size();
2080 }
2081
2082 OS.indent(NumSpaces: 4) << "Alias Groups (" << noOfGroups << "):\n";
2083 if (MinMaxAliasGroups.empty()) {
2084 OS.indent(NumSpaces: 8) << "n/a\n";
2085 return;
2086 }
2087
2088 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2089
2090 // If the group has no read only accesses print the write accesses.
2091 if (Pair.second.empty()) {
2092 OS.indent(NumSpaces: 8) << "[[";
2093 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2094 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2095 << ">";
2096 }
2097 OS << " ]]\n";
2098 }
2099
2100 for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
2101 OS.indent(NumSpaces: 8) << "[[";
2102 OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
2103 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2104 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2105 << ">";
2106 }
2107 OS << " ]]\n";
2108 }
2109 }
2110}
2111
2112void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
2113 OS << "Statements {\n";
2114
2115 for (const ScopStmt &Stmt : *this) {
2116 OS.indent(NumSpaces: 4);
2117 Stmt.print(OS, PrintInstructions);
2118 }
2119
2120 OS.indent(NumSpaces: 4) << "}\n";
2121}
2122
2123void Scop::printArrayInfo(raw_ostream &OS) const {
2124 OS << "Arrays {\n";
2125
2126 for (auto &Array : arrays())
2127 Array->print(OS);
2128
2129 OS.indent(NumSpaces: 4) << "}\n";
2130
2131 OS.indent(NumSpaces: 4) << "Arrays (Bounds as pw_affs) {\n";
2132
2133 for (auto &Array : arrays())
2134 Array->print(OS, /* SizeAsPwAff */ true);
2135
2136 OS.indent(NumSpaces: 4) << "}\n";
2137}
2138
2139void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
2140 OS.indent(NumSpaces: 4) << "Function: " << getFunction().getName() << "\n";
2141 OS.indent(NumSpaces: 4) << "Region: " << getNameStr() << "\n";
2142 OS.indent(NumSpaces: 4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
2143 OS.indent(NumSpaces: 4) << "Invariant Accesses: {\n";
2144 for (const auto &IAClass : InvariantEquivClasses) {
2145 const auto &MAs = IAClass.InvariantAccesses;
2146 if (MAs.empty()) {
2147 OS.indent(NumSpaces: 12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
2148 } else {
2149 MAs.front()->print(OS);
2150 OS.indent(NumSpaces: 12) << "Execution Context: " << IAClass.ExecutionContext
2151 << "\n";
2152 }
2153 }
2154 OS.indent(NumSpaces: 4) << "}\n";
2155 printContext(OS&: OS.indent(NumSpaces: 4));
2156 printArrayInfo(OS&: OS.indent(NumSpaces: 4));
2157 printAliasAssumptions(OS);
2158 printStatements(OS&: OS.indent(NumSpaces: 4), PrintInstructions);
2159}
2160
2161#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2162LLVM_DUMP_METHOD void Scop::dump() const { print(OS&: dbgs(), PrintInstructions: true); }
2163#endif
2164
2165isl::ctx Scop::getIslCtx() const { return IslCtx.get(); }
2166
2167__isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
2168 bool NonNegative,
2169 RecordedAssumptionsTy *RecordedAssumptions) {
2170 // First try to use the SCEVAffinator to generate a piecewise defined
2171 // affine function from @p E in the context of @p BB. If that tasks becomes to
2172 // complex the affinator might return a nullptr. In such a case we invalidate
2173 // the SCoP and return a dummy value. This way we do not need to add error
2174 // handling code to all users of this function.
2175 auto PWAC = Affinator.getPwAff(E, BB, RecordedAssumptions);
2176 if (!PWAC.first.is_null()) {
2177 // TODO: We could use a heuristic and either use:
2178 // SCEVAffinator::takeNonNegativeAssumption
2179 // or
2180 // SCEVAffinator::interpretAsUnsigned
2181 // to deal with unsigned or "NonNegative" SCEVs.
2182 if (NonNegative)
2183 Affinator.takeNonNegativeAssumption(PWAC, RecordedAssumptions);
2184 return PWAC;
2185 }
2186
2187 auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
2188 invalidate(Kind: COMPLEXITY, Loc: DL, BB);
2189 return Affinator.getPwAff(E: SE->getZero(Ty: E->getType()), BB, RecordedAssumptions);
2190}
2191
2192isl::union_set Scop::getDomains() const {
2193 isl_space *EmptySpace = isl_space_params_alloc(ctx: getIslCtx().get(), nparam: 0);
2194 isl_union_set *Domain = isl_union_set_empty(space: EmptySpace);
2195
2196 for (const ScopStmt &Stmt : *this)
2197 Domain = isl_union_set_add_set(uset: Domain, set: Stmt.getDomain().release());
2198
2199 return isl::manage(ptr: Domain);
2200}
2201
2202isl::pw_aff Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB,
2203 RecordedAssumptionsTy *RecordedAssumptions) {
2204 PWACtx PWAC = getPwAff(E, BB, NonNegative: RecordedAssumptions);
2205 return PWAC.first;
2206}
2207
2208isl::union_map
2209Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
2210 isl::union_map Accesses = isl::union_map::empty(ctx: getIslCtx());
2211
2212 for (ScopStmt &Stmt : *this) {
2213 for (MemoryAccess *MA : Stmt) {
2214 if (!Predicate(*MA))
2215 continue;
2216
2217 isl::set Domain = Stmt.getDomain();
2218 isl::map AccessDomain = MA->getAccessRelation();
2219 AccessDomain = AccessDomain.intersect_domain(set: Domain);
2220 Accesses = Accesses.unite(umap2: AccessDomain);
2221 }
2222 }
2223
2224 return Accesses.coalesce();
2225}
2226
2227isl::union_map Scop::getMustWrites() {
2228 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isMustWrite(); });
2229}
2230
2231isl::union_map Scop::getMayWrites() {
2232 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isMayWrite(); });
2233}
2234
2235isl::union_map Scop::getWrites() {
2236 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isWrite(); });
2237}
2238
2239isl::union_map Scop::getReads() {
2240 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return MA.isRead(); });
2241}
2242
2243isl::union_map Scop::getAccesses() {
2244 return getAccessesOfType(Predicate: [](MemoryAccess &MA) { return true; });
2245}
2246
2247isl::union_map Scop::getAccesses(ScopArrayInfo *Array) {
2248 return getAccessesOfType(
2249 Predicate: [Array](MemoryAccess &MA) { return MA.getScopArrayInfo() == Array; });
2250}
2251
2252isl::union_map Scop::getSchedule() const {
2253 auto Tree = getScheduleTree();
2254 return Tree.get_map();
2255}
2256
2257isl::schedule Scop::getScheduleTree() const {
2258 return Schedule.intersect_domain(domain: getDomains());
2259}
2260
2261void Scop::setSchedule(isl::union_map NewSchedule) {
2262 auto S = isl::schedule::from_domain(domain: getDomains());
2263 Schedule = S.insert_partial_schedule(
2264 partial: isl::multi_union_pw_aff::from_union_map(umap: NewSchedule));
2265 ScheduleModified = true;
2266}
2267
2268void Scop::setScheduleTree(isl::schedule NewSchedule) {
2269 Schedule = NewSchedule;
2270 ScheduleModified = true;
2271}
2272
2273bool Scop::restrictDomains(isl::union_set Domain) {
2274 bool Changed = false;
2275 for (ScopStmt &Stmt : *this) {
2276 isl::union_set StmtDomain = isl::union_set(Stmt.getDomain());
2277 isl::union_set NewStmtDomain = StmtDomain.intersect(uset2: Domain);
2278
2279 if (StmtDomain.is_subset(uset2: NewStmtDomain))
2280 continue;
2281
2282 Changed = true;
2283
2284 NewStmtDomain = NewStmtDomain.coalesce();
2285
2286 if (NewStmtDomain.is_empty())
2287 Stmt.restrictDomain(NewDomain: isl::set::empty(space: Stmt.getDomainSpace()));
2288 else
2289 Stmt.restrictDomain(NewDomain: isl::set(NewStmtDomain));
2290 }
2291 return Changed;
2292}
2293
2294ScalarEvolution *Scop::getSE() const { return SE; }
2295
2296void Scop::addScopStmt(BasicBlock *BB, StringRef Name, Loop *SurroundingLoop,
2297 std::vector<Instruction *> Instructions) {
2298 assert(BB && "Unexpected nullptr!");
2299 Stmts.emplace_back(args&: *this, args&: *BB, args&: Name, args&: SurroundingLoop, args&: Instructions);
2300 auto *Stmt = &Stmts.back();
2301 StmtMap[BB].push_back(x: Stmt);
2302 for (Instruction *Inst : Instructions) {
2303 assert(!InstStmtMap.count(Inst) &&
2304 "Unexpected statement corresponding to the instruction.");
2305 InstStmtMap[Inst] = Stmt;
2306 }
2307}
2308
2309void Scop::addScopStmt(Region *R, StringRef Name, Loop *SurroundingLoop,
2310 std::vector<Instruction *> Instructions) {
2311 assert(R && "Unexpected nullptr!");
2312 Stmts.emplace_back(args&: *this, args&: *R, args&: Name, args&: SurroundingLoop, args&: Instructions);
2313 auto *Stmt = &Stmts.back();
2314
2315 for (Instruction *Inst : Instructions) {
2316 assert(!InstStmtMap.count(Inst) &&
2317 "Unexpected statement corresponding to the instruction.");
2318 InstStmtMap[Inst] = Stmt;
2319 }
2320
2321 for (BasicBlock *BB : R->blocks()) {
2322 StmtMap[BB].push_back(x: Stmt);
2323 if (BB == R->getEntry())
2324 continue;
2325 for (Instruction &Inst : *BB) {
2326 assert(!InstStmtMap.count(&Inst) &&
2327 "Unexpected statement corresponding to the instruction.");
2328 InstStmtMap[&Inst] = Stmt;
2329 }
2330 }
2331}
2332
2333ScopStmt *Scop::addScopStmt(isl::map SourceRel, isl::map TargetRel,
2334 isl::set Domain) {
2335#ifndef NDEBUG
2336 isl::set SourceDomain = SourceRel.domain();
2337 isl::set TargetDomain = TargetRel.domain();
2338 assert(Domain.is_subset(TargetDomain) &&
2339 "Target access not defined for complete statement domain");
2340 assert(Domain.is_subset(SourceDomain) &&
2341 "Source access not defined for complete statement domain");
2342#endif
2343 Stmts.emplace_back(args&: *this, args&: SourceRel, args&: TargetRel, args&: Domain);
2344 CopyStmtsNum++;
2345 return &(Stmts.back());
2346}
2347
2348ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
2349 auto StmtMapIt = StmtMap.find(Val: BB);
2350 if (StmtMapIt == StmtMap.end())
2351 return {};
2352 return StmtMapIt->second;
2353}
2354
2355ScopStmt *Scop::getIncomingStmtFor(const Use &U) const {
2356 auto *PHI = cast<PHINode>(Val: U.getUser());
2357 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
2358
2359 // If the value is a non-synthesizable from the incoming block, use the
2360 // statement that contains it as user statement.
2361 if (auto *IncomingInst = dyn_cast<Instruction>(Val: U.get())) {
2362 if (IncomingInst->getParent() == IncomingBB) {
2363 if (ScopStmt *IncomingStmt = getStmtFor(Inst: IncomingInst))
2364 return IncomingStmt;
2365 }
2366 }
2367
2368 // Otherwise, use the epilogue/last statement.
2369 return getLastStmtFor(BB: IncomingBB);
2370}
2371
2372ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
2373 ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
2374 if (!StmtList.empty())
2375 return StmtList.back();
2376 return nullptr;
2377}
2378
2379ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
2380 if (RN->isSubRegion())
2381 return getStmtListFor(R: RN->getNodeAs<Region>());
2382 return getStmtListFor(BB: RN->getNodeAs<BasicBlock>());
2383}
2384
2385ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
2386 return getStmtListFor(BB: R->getEntry());
2387}
2388
2389int Scop::getRelativeLoopDepth(const Loop *L) const {
2390 if (!L || !R.contains(L))
2391 return -1;
2392 // outermostLoopInRegion always returns nullptr for top level regions
2393 if (R.isTopLevelRegion()) {
2394 // LoopInfo's depths start at 1, we start at 0
2395 return L->getLoopDepth() - 1;
2396 } else {
2397 Loop *OuterLoop = R.outermostLoopInRegion(L: const_cast<Loop *>(L));
2398 assert(OuterLoop);
2399 return L->getLoopDepth() - OuterLoop->getLoopDepth();
2400 }
2401}
2402
2403ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
2404 for (auto &SAI : arrays()) {
2405 if (SAI->getName() == BaseName)
2406 return SAI;
2407 }
2408 return nullptr;
2409}
2410
2411void Scop::addAccessData(MemoryAccess *Access) {
2412 const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
2413 assert(SAI && "can only use after access relations have been constructed");
2414
2415 if (Access->isOriginalValueKind() && Access->isRead())
2416 ValueUseAccs[SAI].push_back(Elt: Access);
2417 else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
2418 PHIIncomingAccs[SAI].push_back(Elt: Access);
2419}
2420
2421void Scop::removeAccessData(MemoryAccess *Access) {
2422 if (Access->isOriginalValueKind() && Access->isWrite()) {
2423 ValueDefAccs.erase(Val: Access->getAccessValue());
2424 } else if (Access->isOriginalValueKind() && Access->isRead()) {
2425 auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
2426 llvm::erase(C&: Uses, V: Access);
2427 } else if (Access->isOriginalPHIKind() && Access->isRead()) {
2428 PHINode *PHI = cast<PHINode>(Val: Access->getAccessInstruction());
2429 PHIReadAccs.erase(Val: PHI);
2430 } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
2431 auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
2432 llvm::erase(C&: Incomings, V: Access);
2433 }
2434}
2435
2436MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
2437 assert(SAI->isValueKind());
2438
2439 Instruction *Val = dyn_cast<Instruction>(Val: SAI->getBasePtr());
2440 if (!Val)
2441 return nullptr;
2442
2443 return ValueDefAccs.lookup(Val);
2444}
2445
2446ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
2447 assert(SAI->isValueKind());
2448 auto It = ValueUseAccs.find(Val: SAI);
2449 if (It == ValueUseAccs.end())
2450 return {};
2451 return It->second;
2452}
2453
2454MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
2455 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2456
2457 if (SAI->isExitPHIKind())
2458 return nullptr;
2459
2460 PHINode *PHI = cast<PHINode>(Val: SAI->getBasePtr());
2461 return PHIReadAccs.lookup(Val: PHI);
2462}
2463
2464ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
2465 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2466 auto It = PHIIncomingAccs.find(Val: SAI);
2467 if (It == PHIIncomingAccs.end())
2468 return {};
2469 return It->second;
2470}
2471
2472bool Scop::isEscaping(Instruction *Inst) {
2473 assert(contains(Inst) && "The concept of escaping makes only sense for "
2474 "values defined inside the SCoP");
2475
2476 for (Use &Use : Inst->uses()) {
2477 BasicBlock *UserBB = getUseBlock(U: Use);
2478 if (!contains(BB: UserBB))
2479 return true;
2480
2481 // When the SCoP region exit needs to be simplified, PHIs in the region exit
2482 // move to a new basic block such that its incoming blocks are not in the
2483 // SCoP anymore.
2484 if (hasSingleExitEdge() && isa<PHINode>(Val: Use.getUser()) &&
2485 isExit(BB: cast<PHINode>(Val: Use.getUser())->getParent()))
2486 return true;
2487 }
2488 return false;
2489}
2490
2491void Scop::incrementNumberOfAliasingAssumptions(unsigned step) {
2492 AssumptionsAliasing += step;
2493}
2494
2495Scop::ScopStatistics Scop::getStatistics() const {
2496 ScopStatistics Result;
2497#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2498 auto LoopStat = ScopDetection::countBeneficialLoops(R: &R, SE&: *SE, LI&: *getLI(), MinProfitableTrips: 0);
2499
2500 int NumTotalLoops = LoopStat.NumLoops;
2501 Result.NumBoxedLoops = getBoxedLoops().size();
2502 Result.NumAffineLoops = NumTotalLoops - Result.NumBoxedLoops;
2503
2504 for (const ScopStmt &Stmt : *this) {
2505 isl::set Domain = Stmt.getDomain().intersect_params(params: getContext());
2506 bool IsInLoop = Stmt.getNumIterators() >= 1;
2507 for (MemoryAccess *MA : Stmt) {
2508 if (!MA->isWrite())
2509 continue;
2510
2511 if (MA->isLatestValueKind()) {
2512 Result.NumValueWrites += 1;
2513 if (IsInLoop)
2514 Result.NumValueWritesInLoops += 1;
2515 }
2516
2517 if (MA->isLatestAnyPHIKind()) {
2518 Result.NumPHIWrites += 1;
2519 if (IsInLoop)
2520 Result.NumPHIWritesInLoops += 1;
2521 }
2522
2523 isl::set AccSet =
2524 MA->getAccessRelation().intersect_domain(set: Domain).range();
2525 if (AccSet.is_singleton()) {
2526 Result.NumSingletonWrites += 1;
2527 if (IsInLoop)
2528 Result.NumSingletonWritesInLoops += 1;
2529 }
2530 }
2531 }
2532#endif
2533 return Result;
2534}
2535
2536raw_ostream &polly::operator<<(raw_ostream &OS, const Scop &scop) {
2537 scop.print(OS, PrintInstructions: PollyPrintInstructions);
2538 return OS;
2539}
2540
2541//===----------------------------------------------------------------------===//
2542void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
2543 AU.addRequired<LoopInfoWrapperPass>();
2544 AU.addRequired<RegionInfoPass>();
2545 AU.addRequired<DominatorTreeWrapperPass>();
2546 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2547 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2548 AU.addRequired<AAResultsWrapperPass>();
2549 AU.addRequired<AssumptionCacheTracker>();
2550 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2551 AU.setPreservesAll();
2552}
2553
2554void updateLoopCountStatistic(ScopDetection::LoopStats Stats,
2555 Scop::ScopStatistics ScopStats) {
2556 assert(Stats.NumLoops == ScopStats.NumAffineLoops + ScopStats.NumBoxedLoops);
2557
2558 NumScops++;
2559 NumLoopsInScop += Stats.NumLoops;
2560 MaxNumLoopsInScop =
2561 std::max(a: MaxNumLoopsInScop.getValue(), b: (uint64_t)Stats.NumLoops);
2562
2563 if (Stats.MaxDepth == 0)
2564 NumScopsDepthZero++;
2565 else if (Stats.MaxDepth == 1)
2566 NumScopsDepthOne++;
2567 else if (Stats.MaxDepth == 2)
2568 NumScopsDepthTwo++;
2569 else if (Stats.MaxDepth == 3)
2570 NumScopsDepthThree++;
2571 else if (Stats.MaxDepth == 4)
2572 NumScopsDepthFour++;
2573 else if (Stats.MaxDepth == 5)
2574 NumScopsDepthFive++;
2575 else
2576 NumScopsDepthLarger++;
2577
2578 NumAffineLoops += ScopStats.NumAffineLoops;
2579 NumBoxedLoops += ScopStats.NumBoxedLoops;
2580
2581 NumValueWrites += ScopStats.NumValueWrites;
2582 NumValueWritesInLoops += ScopStats.NumValueWritesInLoops;
2583 NumPHIWrites += ScopStats.NumPHIWrites;
2584 NumPHIWritesInLoops += ScopStats.NumPHIWritesInLoops;
2585 NumSingletonWrites += ScopStats.NumSingletonWrites;
2586 NumSingletonWritesInLoops += ScopStats.NumSingletonWritesInLoops;
2587}
2588
2589bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
2590 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2591
2592 if (!SD.isMaxRegionInScop(R: *R))
2593 return false;
2594
2595 Function *F = R->getEntry()->getParent();
2596 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2597 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2598 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2599 auto const &DL = F->getParent()->getDataLayout();
2600 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2601 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F&: *F);
2602 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2603
2604 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2605 S = SB.getScop(); // take ownership of scop object
2606
2607#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2608 if (S) {
2609 ScopDetection::LoopStats Stats =
2610 ScopDetection::countBeneficialLoops(R: &S->getRegion(), SE, LI, MinProfitableTrips: 0);
2611 updateLoopCountStatistic(Stats, ScopStats: S->getStatistics());
2612 }
2613#endif
2614
2615 return false;
2616}
2617
2618void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
2619 if (S)
2620 S->print(OS, PrintInstructions: PollyPrintInstructions);
2621 else
2622 OS << "Invalid Scop!\n";
2623}
2624
2625char ScopInfoRegionPass::ID = 0;
2626
2627Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
2628
2629INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
2630 "Polly - Create polyhedral description of Scops", false,
2631 false);
2632INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2633INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2634INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2635INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2636INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2637INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2638INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2639INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
2640 "Polly - Create polyhedral description of Scops", false,
2641 false)
2642
2643//===----------------------------------------------------------------------===//
2644
2645namespace {
2646
2647/// Print result from ScopInfoRegionPass.
2648class ScopInfoPrinterLegacyRegionPass final : public RegionPass {
2649public:
2650 static char ID;
2651
2652 ScopInfoPrinterLegacyRegionPass() : ScopInfoPrinterLegacyRegionPass(outs()) {}
2653
2654 explicit ScopInfoPrinterLegacyRegionPass(llvm::raw_ostream &OS)
2655 : RegionPass(ID), OS(OS) {}
2656
2657 bool runOnRegion(Region *R, RGPassManager &RGM) override {
2658 ScopInfoRegionPass &P = getAnalysis<ScopInfoRegionPass>();
2659
2660 OS << "Printing analysis '" << P.getPassName() << "' for region: '"
2661 << R->getNameStr() << "' in function '"
2662 << R->getEntry()->getParent()->getName() << "':\n";
2663 P.print(OS);
2664
2665 return false;
2666 }
2667
2668 void getAnalysisUsage(AnalysisUsage &AU) const override {
2669 RegionPass::getAnalysisUsage(AU);
2670 AU.addRequired<ScopInfoRegionPass>();
2671 AU.setPreservesAll();
2672 }
2673
2674private:
2675 llvm::raw_ostream &OS;
2676};
2677
2678char ScopInfoPrinterLegacyRegionPass::ID = 0;
2679} // namespace
2680
2681Pass *polly::createScopInfoPrinterLegacyRegionPass(raw_ostream &OS) {
2682 return new ScopInfoPrinterLegacyRegionPass(OS);
2683}
2684
2685INITIALIZE_PASS_BEGIN(ScopInfoPrinterLegacyRegionPass, "polly-print-scops",
2686 "Polly - Print polyhedral description of Scops", false,
2687 false);
2688INITIALIZE_PASS_DEPENDENCY(ScopInfoRegionPass);
2689INITIALIZE_PASS_END(ScopInfoPrinterLegacyRegionPass, "polly-print-scops",
2690 "Polly - Print polyhedral description of Scops", false,
2691 false)
2692
2693//===----------------------------------------------------------------------===//
2694
2695ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
2696 LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
2697 AssumptionCache &AC, OptimizationRemarkEmitter &ORE)
2698 : DL(DL), SD(SD), SE(SE), LI(LI), AA(AA), DT(DT), AC(AC), ORE(ORE) {
2699 recompute();
2700}
2701
2702void ScopInfo::recompute() {
2703 RegionToScopMap.clear();
2704 /// Create polyhedral description of scops for all the valid regions of a
2705 /// function.
2706 for (auto &It : SD) {
2707 Region *R = const_cast<Region *>(It);
2708 if (!SD.isMaxRegionInScop(R: *R))
2709 continue;
2710
2711 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2712 std::unique_ptr<Scop> S = SB.getScop();
2713 if (!S)
2714 continue;
2715#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2716 ScopDetection::LoopStats Stats =
2717 ScopDetection::countBeneficialLoops(R: &S->getRegion(), SE, LI, MinProfitableTrips: 0);
2718 updateLoopCountStatistic(Stats, ScopStats: S->getStatistics());
2719#endif
2720 bool Inserted = RegionToScopMap.insert(KV: {R, std::move(S)}).second;
2721 assert(Inserted && "Building Scop for the same region twice!");
2722 (void)Inserted;
2723 }
2724}
2725
2726bool ScopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
2727 FunctionAnalysisManager::Invalidator &Inv) {
2728 // Check whether the analysis, all analyses on functions have been preserved
2729 // or anything we're holding references to is being invalidated
2730 auto PAC = PA.getChecker<ScopInfoAnalysis>();
2731 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2732 Inv.invalidate<ScopAnalysis>(IR&: F, PA) ||
2733 Inv.invalidate<ScalarEvolutionAnalysis>(IR&: F, PA) ||
2734 Inv.invalidate<LoopAnalysis>(IR&: F, PA) ||
2735 Inv.invalidate<AAManager>(IR&: F, PA) ||
2736 Inv.invalidate<DominatorTreeAnalysis>(IR&: F, PA) ||
2737 Inv.invalidate<AssumptionAnalysis>(IR&: F, PA);
2738}
2739
2740AnalysisKey ScopInfoAnalysis::Key;
2741
2742ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
2743 FunctionAnalysisManager &FAM) {
2744 auto &SD = FAM.getResult<ScopAnalysis>(IR&: F);
2745 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(IR&: F);
2746 auto &LI = FAM.getResult<LoopAnalysis>(IR&: F);
2747 auto &AA = FAM.getResult<AAManager>(IR&: F);
2748 auto &DT = FAM.getResult<DominatorTreeAnalysis>(IR&: F);
2749 auto &AC = FAM.getResult<AssumptionAnalysis>(IR&: F);
2750 auto &DL = F.getParent()->getDataLayout();
2751 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(IR&: F);
2752 return {DL, SD, SE, LI, AA, DT, AC, ORE};
2753}
2754
2755PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
2756 FunctionAnalysisManager &FAM) {
2757 auto &SI = FAM.getResult<ScopInfoAnalysis>(IR&: F);
2758 // Since the legacy PM processes Scops in bottom up, we print them in reverse
2759 // order here to keep the output persistent
2760 for (auto &It : reverse(C&: SI)) {
2761 if (It.second)
2762 It.second->print(OS&: Stream, PrintInstructions: PollyPrintInstructions);
2763 else
2764 Stream << "Invalid Scop!\n";
2765 }
2766 return PreservedAnalyses::all();
2767}
2768
2769void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2770 AU.addRequired<LoopInfoWrapperPass>();
2771 AU.addRequired<RegionInfoPass>();
2772 AU.addRequired<DominatorTreeWrapperPass>();
2773 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2774 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2775 AU.addRequired<AAResultsWrapperPass>();
2776 AU.addRequired<AssumptionCacheTracker>();
2777 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2778 AU.setPreservesAll();
2779}
2780
2781bool ScopInfoWrapperPass::runOnFunction(Function &F) {
2782 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2783 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2784 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2785 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2786 auto const &DL = F.getParent()->getDataLayout();
2787 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2788 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2789 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2790
2791 Result.reset(p: new ScopInfo{DL, SD, SE, LI, AA, DT, AC, ORE});
2792 return false;
2793}
2794
2795void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
2796 for (auto &It : *Result) {
2797 if (It.second)
2798 It.second->print(OS, PrintInstructions: PollyPrintInstructions);
2799 else
2800 OS << "Invalid Scop!\n";
2801 }
2802}
2803
2804char ScopInfoWrapperPass::ID = 0;
2805
2806Pass *polly::createScopInfoWrapperPassPass() {
2807 return new ScopInfoWrapperPass();
2808}
2809
2810INITIALIZE_PASS_BEGIN(
2811 ScopInfoWrapperPass, "polly-function-scops",
2812 "Polly - Create polyhedral description of all Scops of a function", false,
2813 false);
2814INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2815INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2816INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2817INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2818INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2819INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2820INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2821INITIALIZE_PASS_END(
2822 ScopInfoWrapperPass, "polly-function-scops",
2823 "Polly - Create polyhedral description of all Scops of a function", false,
2824 false)
2825
2826//===----------------------------------------------------------------------===//
2827
2828namespace {
2829/// Print result from ScopInfoWrapperPass.
2830class ScopInfoPrinterLegacyFunctionPass final : public FunctionPass {
2831public:
2832 static char ID;
2833
2834 ScopInfoPrinterLegacyFunctionPass()
2835 : ScopInfoPrinterLegacyFunctionPass(outs()) {}
2836 explicit ScopInfoPrinterLegacyFunctionPass(llvm::raw_ostream &OS)
2837 : FunctionPass(ID), OS(OS) {}
2838
2839 bool runOnFunction(Function &F) override {
2840 ScopInfoWrapperPass &P = getAnalysis<ScopInfoWrapperPass>();
2841
2842 OS << "Printing analysis '" << P.getPassName() << "' for function '"
2843 << F.getName() << "':\n";
2844 P.print(OS);
2845
2846 return false;
2847 }
2848
2849 void getAnalysisUsage(AnalysisUsage &AU) const override {
2850 FunctionPass::getAnalysisUsage(AU);
2851 AU.addRequired<ScopInfoWrapperPass>();
2852 AU.setPreservesAll();
2853 }
2854
2855private:
2856 llvm::raw_ostream &OS;
2857};
2858
2859char ScopInfoPrinterLegacyFunctionPass::ID = 0;
2860} // namespace
2861
2862Pass *polly::createScopInfoPrinterLegacyFunctionPass(raw_ostream &OS) {
2863 return new ScopInfoPrinterLegacyFunctionPass(OS);
2864}
2865
2866INITIALIZE_PASS_BEGIN(
2867 ScopInfoPrinterLegacyFunctionPass, "polly-print-function-scops",
2868 "Polly - Print polyhedral description of all Scops of a function", false,
2869 false);
2870INITIALIZE_PASS_DEPENDENCY(ScopInfoWrapperPass);
2871INITIALIZE_PASS_END(
2872 ScopInfoPrinterLegacyFunctionPass, "polly-print-function-scops",
2873 "Polly - Print polyhedral description of all Scops of a function", false,
2874 false)
2875

source code of polly/lib/Analysis/ScopInfo.cpp