Polly 20.0.0git
ScopBuilder.h
Go to the documentation of this file.
1//===- polly/ScopBuilder.h --------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Create a polyhedral description for a static control flow region.
10//
11// The pass creates a polyhedral description of the Scops detected by the SCoP
12// detection derived from their LLVM-IR code.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef POLLY_SCOPBUILDER_H
17#define POLLY_SCOPBUILDER_H
18
19#include "polly/ScopInfo.h"
21#include "llvm/ADT/ArrayRef.h"
22#include "llvm/ADT/SetVector.h"
23
24namespace polly {
25using llvm::SmallSetVector;
26
27class ScopDetection;
28
29/// Command line switch whether to model read-only accesses.
30extern bool ModelReadOnlyScalars;
31
32/// Build the Polly IR (Scop and ScopStmt) on a Region.
33class ScopBuilder final {
34
35 /// The AAResults to build AliasSetTracker.
36 AAResults &AA;
37
38 /// Target data for element size computing.
39 const DataLayout &DL;
40
41 /// DominatorTree to reason about guaranteed execution.
42 DominatorTree &DT;
43
44 /// LoopInfo for information about loops.
45 LoopInfo &LI;
46
47 /// Valid Regions for Scop
49
50 /// The ScalarEvolution to help building Scop.
51 ScalarEvolution &SE;
52
53 /// An optimization diagnostic interface to add optimization remarks.
54 OptimizationRemarkEmitter &ORE;
55
56 /// Set of instructions that might read any memory location.
57 SmallVector<std::pair<ScopStmt *, Instruction *>, 16> GlobalReads;
58
59 /// Set of all accessed array base pointers.
60 SmallSetVector<Value *, 16> ArrayBasePointers;
61
62 // The Scop
63 std::unique_ptr<Scop> scop;
64
65 /// Collection to hold taken assumptions.
66 ///
67 /// There are two reasons why we want to record assumptions first before we
68 /// add them to the assumed/invalid context:
69 /// 1) If the SCoP is not profitable or otherwise invalid without the
70 /// assumed/invalid context we do not have to compute it.
71 /// 2) Information about the context are gathered rather late in the SCoP
72 /// construction (basically after we know all parameters), thus the user
73 /// might see overly complicated assumptions to be taken while they will
74 /// only be simplified later on.
76
77 // Build the SCoP for Region @p R.
78 void buildScop(Region &R, AssumptionCache &AC);
79
80 /// Adjust the dimensions of @p Dom that was constructed for @p OldL
81 /// to be compatible to domains constructed for loop @p NewL.
82 ///
83 /// This function assumes @p NewL and @p OldL are equal or there is a CFG
84 /// edge from @p OldL to @p NewL.
85 isl::set adjustDomainDimensions(isl::set Dom, Loop *OldL, Loop *NewL);
86
87 /// Compute the domain for each basic block in @p R.
88 ///
89 /// @param R The region we currently traverse.
90 /// @param InvalidDomainMap BB to InvalidDomain map for the BB of current
91 /// region.
92 ///
93 /// @returns True if there was no problem and false otherwise.
94 bool buildDomains(Region *R,
95 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap);
96
97 /// Compute the branching constraints for each basic block in @p R.
98 ///
99 /// @param R The region we currently build branching conditions
100 /// for.
101 /// @param InvalidDomainMap BB to InvalidDomain map for the BB of current
102 /// region.
103 ///
104 /// @returns True if there was no problem and false otherwise.
106 Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap);
107
108 /// Build the conditions sets for the terminator @p TI in the @p Domain.
109 ///
110 /// This will fill @p ConditionSets with the conditions under which control
111 /// will be moved from @p TI to its successors. Hence, @p ConditionSets will
112 /// have as many elements as @p TI has successors.
113 bool buildConditionSets(BasicBlock *BB, Instruction *TI, Loop *L,
115 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
116 SmallVectorImpl<__isl_give isl_set *> &ConditionSets);
117
118 /// Build the conditions sets for the branch condition @p Condition in
119 /// the @p Domain.
120 ///
121 /// This will fill @p ConditionSets with the conditions under which control
122 /// will be moved from @p TI to its successors. Hence, @p ConditionSets will
123 /// have as many elements as @p TI has successors. If @p TI is nullptr the
124 /// context under which @p Condition is true/false will be returned as the
125 /// new elements of @p ConditionSets.
126 bool buildConditionSets(BasicBlock *BB, Value *Condition, Instruction *TI,
127 Loop *L, __isl_keep isl_set *Domain,
128 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
129 SmallVectorImpl<__isl_give isl_set *> &ConditionSets);
130
131 /// Build the conditions sets for the switch @p SI in the @p Domain.
132 ///
133 /// This will fill @p ConditionSets with the conditions under which control
134 /// will be moved from @p SI to its successors. Hence, @p ConditionSets will
135 /// have as many elements as @p SI has successors.
136 bool buildConditionSets(BasicBlock *BB, SwitchInst *SI, Loop *L,
138 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
139 SmallVectorImpl<__isl_give isl_set *> &ConditionSets);
140
141 /// Build condition sets for unsigned ICmpInst(s).
142 /// Special handling is required for unsigned operands to ensure that if
143 /// MSB (aka the Sign bit) is set for an operands in an unsigned ICmpInst
144 /// it should wrap around.
145 ///
146 /// @param IsStrictUpperBound holds information on the predicate relation
147 /// between TestVal and UpperBound, i.e,
148 /// TestVal < UpperBound OR TestVal <= UpperBound
150 BasicBlock *BB, Value *Condition, __isl_keep isl_set *Domain,
151 const SCEV *SCEV_TestVal, const SCEV *SCEV_UpperBound,
152 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
153 bool IsStrictUpperBound);
154
155 /// Propagate the domain constraints through the region @p R.
156 ///
157 /// @param R The region we currently build branching
158 /// conditions for.
159 /// @param InvalidDomainMap BB to InvalidDomain map for the BB of current
160 /// region.
161 ///
162 /// @returns True if there was no problem and false otherwise.
164 Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap);
165
166 /// Propagate domains that are known due to graph properties.
167 ///
168 /// As a CFG is mostly structured we use the graph properties to propagate
169 /// domains without the need to compute all path conditions. In particular,
170 /// if a block A dominates a block B and B post-dominates A we know that the
171 /// domain of B is a superset of the domain of A. As we do not have
172 /// post-dominator information available here we use the less precise region
173 /// information. Given a region R, we know that the exit is always executed
174 /// if the entry was executed, thus the domain of the exit is a superset of
175 /// the domain of the entry. In case the exit can only be reached from
176 /// within the region the domains are in fact equal. This function will use
177 /// this property to avoid the generation of condition constraints that
178 /// determine when a branch is taken. If @p BB is a region entry block we
179 /// will propagate its domain to the region exit block. Additionally, we put
180 /// the region exit block in the @p FinishedExitBlocks set so we can later
181 /// skip edges from within the region to that block.
182 ///
183 /// @param BB The block for which the domain is currently
184 /// propagated.
185 /// @param BBLoop The innermost affine loop surrounding @p BB.
186 /// @param FinishedExitBlocks Set of region exits the domain was set for.
187 /// @param InvalidDomainMap BB to InvalidDomain map for the BB of current
188 /// region.
190 BasicBlock *BB, Loop *BBLoop,
191 SmallPtrSetImpl<BasicBlock *> &FinishedExitBlocks,
192 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap);
193
194 /// Propagate invalid domains of statements through @p R.
195 ///
196 /// This method will propagate invalid statement domains through @p R and at
197 /// the same time add error block domains to them. Additionally, the domains
198 /// of error statements and those only reachable via error statements will
199 /// be replaced by an empty set. Later those will be removed completely.
200 ///
201 /// @param R The currently traversed region.
202 /// @param InvalidDomainMap BB to InvalidDomain map for the BB of current
203 /// region.
204 //
205 /// @returns True if there was no problem and false otherwise.
207 Region *R, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap);
208
209 /// Compute the union of predecessor domains for @p BB.
210 ///
211 /// To compute the union of all domains of predecessors of @p BB this
212 /// function applies similar reasoning on the CFG structure as described for
213 /// @see propagateDomainConstraintsToRegionExit
214 ///
215 /// @param BB The block for which the predecessor domains are collected.
216 /// @param Domain The domain under which BB is executed.
217 ///
218 /// @returns The domain under which @p BB is executed.
220
221 /// Add loop carried constraints to the header block of the loop @p L.
222 ///
223 /// @param L The loop to process.
224 /// @param InvalidDomainMap BB to InvalidDomain map for the BB of current
225 /// region.
226 ///
227 /// @returns True if there was no problem and false otherwise.
229 Loop *L, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap);
230
231 /// Compute the isl representation for the SCEV @p E in this BB.
232 ///
233 /// @param BB The BB for which isl representation is to be
234 /// computed.
235 /// @param InvalidDomainMap A map of BB to their invalid domains.
236 /// @param E The SCEV that should be translated.
237 /// @param NonNegative Flag to indicate the @p E has to be
238 /// non-negative.
239 ///
240 /// Note that this function will also adjust the invalid context
241 /// accordingly.
243 getPwAff(BasicBlock *BB, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
244 const SCEV *E, bool NonNegative = false);
245
246 /// Create equivalence classes for required invariant accesses.
247 ///
248 /// These classes will consolidate multiple required invariant loads from the
249 /// same address in order to keep the number of dimensions in the SCoP
250 /// description small. For each such class equivalence class only one
251 /// representing element, hence one required invariant load, will be chosen
252 /// and modeled as parameter. The method
253 /// Scop::getRepresentingInvariantLoadSCEV() will replace each element from an
254 /// equivalence class with the representing element that is modeled. As a
255 /// consequence Scop::getIdForParam() will only return an id for the
256 /// representing element of each equivalence class, thus for each required
257 /// invariant location.
259
260 /// Try to build a multi-dimensional fixed sized MemoryAccess from the
261 /// Load/Store instruction.
262 ///
263 /// @param Inst The Load/Store instruction that access the memory
264 /// @param Stmt The parent statement of the instruction
265 ///
266 /// @returns True if the access could be built, False otherwise.
268
269 /// Try to build a multi-dimensional parametric sized MemoryAccess.
270 /// from the Load/Store instruction.
271 ///
272 /// @param Inst The Load/Store instruction that access the memory
273 /// @param Stmt The parent statement of the instruction
274 ///
275 /// @returns True if the access could be built, False otherwise.
277
278 /// Try to build a MemoryAccess for a memory intrinsic.
279 ///
280 /// @param Inst The instruction that access the memory
281 /// @param Stmt The parent statement of the instruction
282 ///
283 /// @returns True if the access could be built, False otherwise.
285
286 /// Try to build a MemoryAccess for a call instruction.
287 ///
288 /// @param Inst The call instruction that access the memory
289 /// @param Stmt The parent statement of the instruction
290 ///
291 /// @returns True if the access could be built, False otherwise.
292 bool buildAccessCallInst(MemAccInst Inst, ScopStmt *Stmt);
293
294 /// Build a single-dimensional parametric sized MemoryAccess
295 /// from the Load/Store instruction.
296 ///
297 /// @param Inst The Load/Store instruction that access the memory
298 /// @param Stmt The parent statement of the instruction
299 ///
300 /// @returns True if the access could be built, False otherwise.
301 bool buildAccessSingleDim(MemAccInst Inst, ScopStmt *Stmt);
302
303 /// Finalize all access relations.
304 ///
305 /// When building up access relations, temporary access relations that
306 /// correctly represent each individual access are constructed. However, these
307 /// access relations can be inconsistent or non-optimal when looking at the
308 /// set of accesses as a whole. This function finalizes the memory accesses
309 /// and constructs a globally consistent state.
310 void finalizeAccesses();
311
312 /// Update access dimensionalities.
313 ///
314 /// When detecting memory accesses different accesses to the same array may
315 /// have built with different dimensionality, as outer zero-values dimensions
316 /// may not have been recognized as separate dimensions. This function goes
317 /// again over all memory accesses and updates their dimensionality to match
318 /// the dimensionality of the underlying ScopArrayInfo object.
320
321 /// Fold size constants to the right.
322 ///
323 /// In case all memory accesses in a given dimension are multiplied with a
324 /// common constant, we can remove this constant from the individual access
325 /// functions and move it to the size of the memory access. We do this as this
326 /// increases the size of the innermost dimension, consequently widens the
327 /// valid range the array subscript in this dimension can evaluate to, and
328 /// as a result increases the likelihood that our delinearization is
329 /// correct.
330 ///
331 /// Example:
332 ///
333 /// A[][n]
334 /// S[i,j] -> A[2i][2j+1]
335 /// S[i,j] -> A[2i][2j]
336 ///
337 /// =>
338 ///
339 /// A[][2n]
340 /// S[i,j] -> A[i][2j+1]
341 /// S[i,j] -> A[i][2j]
342 ///
343 /// Constants in outer dimensions can arise when the elements of a parametric
344 /// multi-dimensional array are not elementary data types, but e.g.,
345 /// structures.
347
348 /// Fold memory accesses to handle parametric offset.
349 ///
350 /// As a post-processing step, we 'fold' memory accesses to parametric
351 /// offsets in the access functions. @see MemoryAccess::foldAccess for
352 /// details.
353 void foldAccessRelations();
354
355 /// Assume that all memory accesses are within bounds.
356 ///
357 /// After we have built a model of all memory accesses, we need to assume
358 /// that the model we built matches reality -- aka. all modeled memory
359 /// accesses always remain within bounds. We do this as last step, after
360 /// all memory accesses have been modeled and canonicalized.
361 void assumeNoOutOfBounds();
362
363 /// Build the alias checks for this SCoP.
364 bool buildAliasChecks();
365
366 /// A vector of memory accesses that belong to an alias group.
367 using AliasGroupTy = SmallVector<MemoryAccess *, 4>;
368
369 /// A vector of alias groups.
370 using AliasGroupVectorTy = SmallVector<AliasGroupTy, 4>;
371
372 /// Build a given alias group and its access data.
373 ///
374 /// @param AliasGroup The alias group to build.
375 /// @param HasWriteAccess A set of arrays through which memory is not only
376 /// read, but also written.
377 //
378 /// @returns True if __no__ error occurred, false otherwise.
379 bool buildAliasGroup(AliasGroupTy &AliasGroup,
380 DenseSet<const ScopArrayInfo *> HasWriteAccess);
381
382 /// Build all alias groups for this SCoP.
383 ///
384 /// @returns True if __no__ error occurred, false otherwise.
385 bool buildAliasGroups();
386
387 /// Build alias groups for all memory accesses in the Scop.
388 ///
389 /// Using the alias analysis and an alias set tracker we build alias sets
390 /// for all memory accesses inside the Scop. For each alias set we then map
391 /// the aliasing pointers back to the memory accesses we know, thus obtain
392 /// groups of memory accesses which might alias. We also collect the set of
393 /// arrays through which memory is written.
394 ///
395 /// @returns A pair consistent of a vector of alias groups and a set of arrays
396 /// through which memory is written.
397 std::tuple<AliasGroupVectorTy, DenseSet<const ScopArrayInfo *>>
399
400 /// Split alias groups by iteration domains.
401 ///
402 /// We split each group based on the domains of the minimal/maximal accesses.
403 /// That means two minimal/maximal accesses are only in a group if their
404 /// access domains intersect. Otherwise, they are in different groups.
405 ///
406 /// @param AliasGroups The alias groups to split
408
409 /// Build an instance of MemoryAccess from the Load/Store instruction.
410 ///
411 /// @param Inst The Load/Store instruction that access the memory
412 /// @param Stmt The parent statement of the instruction
413 void buildMemoryAccess(MemAccInst Inst, ScopStmt *Stmt);
414
415 /// Analyze and extract the cross-BB scalar dependences (or, dataflow
416 /// dependencies) of an instruction.
417 ///
418 /// @param UserStmt The statement @p Inst resides in.
419 /// @param Inst The instruction to be analyzed.
420 void buildScalarDependences(ScopStmt *UserStmt, Instruction *Inst);
421
422 /// Build the escaping dependences for @p Inst.
423 ///
424 /// Search for uses of the llvm::Value defined by @p Inst that are not
425 /// within the SCoP. If there is such use, add a SCALAR WRITE such that
426 /// it is available after the SCoP as escaping value.
427 ///
428 /// @param Inst The instruction to be analyzed.
429 void buildEscapingDependences(Instruction *Inst);
430
431 /// Create MemoryAccesses for the given PHI node in the given region.
432 ///
433 /// @param PHIStmt The statement @p PHI resides in.
434 /// @param PHI The PHI node to be handled
435 /// @param NonAffineSubRegion The non affine sub-region @p PHI is in.
436 /// @param IsExitBlock Flag to indicate that @p PHI is in the exit BB.
437 void buildPHIAccesses(ScopStmt *PHIStmt, PHINode *PHI,
438 Region *NonAffineSubRegion, bool IsExitBlock = false);
439
440 /// Build the access functions for the subregion @p SR.
442
443 /// Should an instruction be modeled in a ScopStmt.
444 ///
445 /// @param Inst The instruction to check.
446 /// @param L The loop in which context the instruction is looked at.
447 ///
448 /// @returns True if the instruction should be modeled.
449 bool shouldModelInst(Instruction *Inst, Loop *L);
450
451 /// Create one or more ScopStmts for @p BB.
452 ///
453 /// Consecutive instructions are associated to the same statement until a
454 /// separator is found.
455 void buildSequentialBlockStmts(BasicBlock *BB, bool SplitOnStore = false);
456
457 /// Create one or more ScopStmts for @p BB using equivalence classes.
458 ///
459 /// Instructions of a basic block that belong to the same equivalence class
460 /// are added to the same statement.
461 void buildEqivClassBlockStmts(BasicBlock *BB);
462
463 /// Create ScopStmt for all BBs and non-affine subregions of @p SR.
464 ///
465 /// @param SR A subregion of @p R.
466 ///
467 /// Some of the statements might be optimized away later when they do not
468 /// access any memory and thus have no effect.
469 void buildStmts(Region &SR);
470
471 /// Build the access functions for the statement @p Stmt in or represented by
472 /// @p BB.
473 ///
474 /// @param Stmt Statement to add MemoryAccesses to.
475 /// @param BB A basic block in @p R.
476 /// @param NonAffineSubRegion The non affine sub-region @p BB is in.
477 void buildAccessFunctions(ScopStmt *Stmt, BasicBlock &BB,
478 Region *NonAffineSubRegion = nullptr);
479
480 /// Create a new MemoryAccess object and add it to #AccFuncMap.
481 ///
482 /// @param Stmt The statement where the access takes place.
483 /// @param Inst The instruction doing the access. It is not necessarily
484 /// inside @p BB.
485 /// @param AccType The kind of access.
486 /// @param BaseAddress The accessed array's base address.
487 /// @param ElemType The type of the accessed array elements.
488 /// @param Affine Whether all subscripts are affine expressions.
489 /// @param AccessValue Value read or written.
490 /// @param Subscripts Access subscripts per dimension.
491 /// @param Sizes The array dimension's sizes.
492 /// @param Kind The kind of memory accessed.
493 ///
494 /// @return The created MemoryAccess, or nullptr if the access is not within
495 /// the SCoP.
496 MemoryAccess *addMemoryAccess(ScopStmt *Stmt, Instruction *Inst,
498 Value *BaseAddress, Type *ElemType, bool Affine,
499 Value *AccessValue,
500 ArrayRef<const SCEV *> Subscripts,
501 ArrayRef<const SCEV *> Sizes, MemoryKind Kind);
502
503 /// Create a MemoryAccess that represents either a LoadInst or
504 /// StoreInst.
505 ///
506 /// @param Stmt The statement to add the MemoryAccess to.
507 /// @param MemAccInst The LoadInst or StoreInst.
508 /// @param AccType The kind of access.
509 /// @param BaseAddress The accessed array's base address.
510 /// @param ElemType The type of the accessed array elements.
511 /// @param IsAffine Whether all subscripts are affine expressions.
512 /// @param Subscripts Access subscripts per dimension.
513 /// @param Sizes The array dimension's sizes.
514 /// @param AccessValue Value read or written.
515 ///
516 /// @see MemoryKind
518 MemoryAccess::AccessType AccType, Value *BaseAddress,
519 Type *ElemType, bool IsAffine,
520 ArrayRef<const SCEV *> Subscripts,
521 ArrayRef<const SCEV *> Sizes, Value *AccessValue);
522
523 /// Create a MemoryAccess for writing an llvm::Instruction.
524 ///
525 /// The access will be created at the position of @p Inst.
526 ///
527 /// @param Inst The instruction to be written.
528 ///
529 /// @see ensureValueRead()
530 /// @see MemoryKind
531 void ensureValueWrite(Instruction *Inst);
532
533 /// Ensure an llvm::Value is available in the BB's statement, creating a
534 /// MemoryAccess for reloading it if necessary.
535 ///
536 /// @param V The value expected to be loaded.
537 /// @param UserStmt Where to reload the value.
538 ///
539 /// @see ensureValueStore()
540 /// @see MemoryKind
541 void ensureValueRead(Value *V, ScopStmt *UserStmt);
542
543 /// Create a write MemoryAccess for the incoming block of a phi node.
544 ///
545 /// Each of the incoming blocks write their incoming value to be picked in the
546 /// phi's block.
547 ///
548 /// @param PHI PHINode under consideration.
549 /// @param IncomingStmt The statement to add the MemoryAccess to.
550 /// @param IncomingBlock Some predecessor block.
551 /// @param IncomingValue @p PHI's value when coming from @p IncomingBlock.
552 /// @param IsExitBlock When true, uses the .s2a alloca instead of the
553 /// .phiops one. Required for values escaping through a
554 /// PHINode in the SCoP region's exit block.
555 /// @see addPHIReadAccess()
556 /// @see MemoryKind
557 void ensurePHIWrite(PHINode *PHI, ScopStmt *IncomintStmt,
558 BasicBlock *IncomingBlock, Value *IncomingValue,
559 bool IsExitBlock);
560
561 /// Add user provided parameter constraints to context (command line).
562 void addUserContext();
563
564 /// Add user provided parameter constraints to context (source code).
565 void addUserAssumptions(AssumptionCache &AC,
566 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap);
567
568 /// Add all recorded assumptions to the assumed context.
570
571 /// Create a MemoryAccess for reading the value of a phi.
572 ///
573 /// The modeling assumes that all incoming blocks write their incoming value
574 /// to the same location. Thus, this access will read the incoming block's
575 /// value as instructed by this @p PHI.
576 ///
577 /// @param PHIStmt Statement @p PHI resides in.
578 /// @param PHI PHINode under consideration; the READ access will be added
579 /// here.
580 ///
581 /// @see ensurePHIWrite()
582 /// @see MemoryKind
583 void addPHIReadAccess(ScopStmt *PHIStmt, PHINode *PHI);
584
585 /// Wrapper function to calculate minimal/maximal accesses to each array.
586 bool calculateMinMaxAccess(AliasGroupTy AliasGroup,
587 Scop::MinMaxVectorTy &MinMaxAccesses);
588 /// Build the domain of @p Stmt.
589 void buildDomain(ScopStmt &Stmt);
590
591 /// Fill NestLoops with loops surrounding @p Stmt.
593
594 /// Check for reductions in @p Stmt.
595 ///
596 /// Iterate over all store memory accesses and check for valid binary
597 /// reduction like chains. For all candidates we check if they have the same
598 /// base address and there are no other accesses which overlap with them. The
599 /// base address check rules out impossible reductions candidates early. The
600 /// overlap check, together with the "only one user" check in
601 /// collectCandidateReductionLoads, guarantees that none of the intermediate
602 /// results will escape during execution of the loop nest. We basically check
603 /// here that no other memory access can access the same memory as the
604 /// potential reduction.
605 void checkForReductions(ScopStmt &Stmt);
606
607 /// Verify that all required invariant loads have been hoisted.
608 ///
609 /// Invariant load hoisting is not guaranteed to hoist all loads that were
610 /// assumed to be scop invariant during scop detection. This function checks
611 /// for cases where the hoisting failed, but where it would have been
612 /// necessary for our scop modeling to be correct. In case of insufficient
613 /// hoisting the scop is marked as invalid.
614 ///
615 /// In the example below Bound[1] is required to be invariant:
616 ///
617 /// for (int i = 1; i < Bound[0]; i++)
618 /// for (int j = 1; j < Bound[1]; j++)
619 /// ...
621
622 /// Hoist invariant memory loads and check for required ones.
623 ///
624 /// We first identify "common" invariant loads, thus loads that are invariant
625 /// and can be hoisted. Then we check if all required invariant loads have
626 /// been identified as (common) invariant. A load is a required invariant load
627 /// if it was assumed to be invariant during SCoP detection, e.g., to assume
628 /// loop bounds to be affine or runtime alias checks to be placeable. In case
629 /// a required invariant load was not identified as (common) invariant we will
630 /// drop this SCoP. An example for both "common" as well as required invariant
631 /// loads is given below:
632 ///
633 /// for (int i = 1; i < *LB[0]; i++)
634 /// for (int j = 1; j < *LB[1]; j++)
635 /// A[i][j] += A[0][0] + (*V);
636 ///
637 /// Common inv. loads: V, A[0][0], LB[0], LB[1]
638 /// Required inv. loads: LB[0], LB[1], (V, if it may alias with A or LB)
639 void hoistInvariantLoads();
640
641 /// Add invariant loads listed in @p InvMAs with the domain of @p Stmt.
643
644 /// Check if @p MA can always be hoisted without execution context.
645 bool canAlwaysBeHoisted(MemoryAccess *MA, bool StmtInvalidCtxIsEmpty,
646 bool MAInvalidCtxIsEmpty,
647 bool NonHoistableCtxIsEmpty);
648
649 /// Return true if and only if @p LI is a required invariant load.
650 bool isRequiredInvariantLoad(LoadInst *LI) const {
651 return scop->getRequiredInvariantLoads().count(LI);
652 }
653
654 /// Check if the base ptr of @p MA is in the SCoP but not hoistable.
656
657 /// Return the context under which the access cannot be hoisted.
658 ///
659 /// @param Access The access to check.
660 /// @param Writes The set of all memory writes in the scop.
661 ///
662 /// @return Return the context under which the access cannot be hoisted or a
663 /// nullptr if it cannot be hoisted at all.
665
666 /// Build the access relation of all memory accesses of @p Stmt.
667 void buildAccessRelations(ScopStmt &Stmt);
668
669 /// Canonicalize arrays with base pointers from the same equivalence class.
670 ///
671 /// Some context: in our normal model we assume that each base pointer is
672 /// related to a single specific memory region, where memory regions
673 /// associated with different base pointers are disjoint. Consequently we do
674 /// not need to compute additional data dependences that model possible
675 /// overlaps of these memory regions. To verify our assumption we compute
676 /// alias checks that verify that modeled arrays indeed do not overlap. In
677 /// case an overlap is detected the runtime check fails and we fall back to
678 /// the original code.
679 ///
680 /// In case of arrays where the base pointers are know to be identical,
681 /// because they are dynamically loaded by accesses that are in the same
682 /// invariant load equivalence class, such run-time alias check would always
683 /// be false.
684 ///
685 /// This function makes sure that we do not generate consistently failing
686 /// run-time checks for code that contains distinct arrays with known
687 /// equivalent base pointers. It identifies for each invariant load
688 /// equivalence class a single canonical array and canonicalizes all memory
689 /// accesses that reference arrays that have base pointers that are known to
690 /// be equal to the base pointer of such a canonical array to this canonical
691 /// array.
692 ///
693 /// We currently do not canonicalize arrays for which certain memory accesses
694 /// have been hoisted as loop invariant.
696
697 /// Construct the schedule of this SCoP.
698 void buildSchedule();
699
700 /// A loop stack element to keep track of per-loop information during
701 /// schedule construction.
703 // The loop for which we keep information.
704 Loop *L;
705
706 // The (possibly incomplete) schedule for this loop.
708
709 // The number of basic blocks in the current loop, for which a schedule has
710 // already been constructed.
712
715 };
716
717 /// The loop stack used for schedule construction.
718 ///
719 /// The loop stack keeps track of schedule information for a set of nested
720 /// loops as well as an (optional) 'nullptr' loop that models the outermost
721 /// schedule dimension. The loops in a loop stack always have a parent-child
722 /// relation where the loop at position n is the parent of the loop at
723 /// position n + 1.
724 using LoopStackTy = SmallVector<LoopStackElementTy, 4>;
725
726 /// Construct schedule information for a given Region and add the
727 /// derived information to @p LoopStack.
728 ///
729 /// Given a Region we derive schedule information for all RegionNodes
730 /// contained in this region ensuring that the assigned execution times
731 /// correctly model the existing control flow relations.
732 ///
733 /// @param R The region which to process.
734 /// @param LoopStack A stack of loops that are currently under
735 /// construction.
736 void buildSchedule(Region *R, LoopStackTy &LoopStack);
737
738 /// Build Schedule for the region node @p RN and add the derived
739 /// information to @p LoopStack.
740 ///
741 /// In case @p RN is a BasicBlock or a non-affine Region, we construct the
742 /// schedule for this @p RN and also finalize loop schedules in case the
743 /// current @p RN completes the loop.
744 ///
745 /// In case @p RN is a not-non-affine Region, we delegate the construction to
746 /// buildSchedule(Region *R, ...).
747 ///
748 /// @param RN The RegionNode region traversed.
749 /// @param LoopStack A stack of loops that are currently under
750 /// construction.
751 void buildSchedule(RegionNode *RN, LoopStackTy &LoopStack);
752
753public:
754 explicit ScopBuilder(Region *R, AssumptionCache &AC, AAResults &AA,
755 const DataLayout &DL, DominatorTree &DT, LoopInfo &LI,
756 ScopDetection &SD, ScalarEvolution &SE,
757 OptimizationRemarkEmitter &ORE);
758 ScopBuilder(const ScopBuilder &) = delete;
760 ~ScopBuilder() = default;
761
762 /// Try to build the Polly IR of static control part on the current
763 /// SESE-Region.
764 ///
765 /// @return Give up the ownership of the scop object or static control part
766 /// for the region
767 std::unique_ptr<Scop> getScop() { return std::move(scop); }
768};
769} // end namespace polly
770
771#endif // POLLY_SCOPBUILDER_H
Utility proxy to wrap the common members of LoadInst and StoreInst.
Definition: ScopHelper.h:140
Represent memory accesses in statements.
Definition: ScopInfo.h:431
AccessType
The access type of a memory access.
Definition: ScopInfo.h:457
Build the Polly IR (Scop and ScopStmt) on a Region.
Definition: ScopBuilder.h:33
void buildDomain(ScopStmt &Stmt)
Build the domain of Stmt.
void propagateDomainConstraintsToRegionExit(BasicBlock *BB, Loop *BBLoop, SmallPtrSetImpl< BasicBlock * > &FinishedExitBlocks, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap)
Propagate domains that are known due to graph properties.
bool isRequiredInvariantLoad(LoadInst *LI) const
Return true if and only if LI is a required invariant load.
Definition: ScopBuilder.h:650
bool propagateInvalidStmtDomains(Region *R, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap)
Propagate invalid domains of statements through R.
void ensurePHIWrite(PHINode *PHI, ScopStmt *IncomintStmt, BasicBlock *IncomingBlock, Value *IncomingValue, bool IsExitBlock)
Create a write MemoryAccess for the incoming block of a phi node.
void addInvariantLoads(ScopStmt &Stmt, InvariantAccessesTy &InvMAs)
Add invariant loads listed in InvMAs with the domain of Stmt.
void canonicalizeDynamicBasePtrs()
Canonicalize arrays with base pointers from the same equivalence class.
bool calculateMinMaxAccess(AliasGroupTy AliasGroup, Scop::MinMaxVectorTy &MinMaxAccesses)
Wrapper function to calculate minimal/maximal accesses to each array.
void verifyInvariantLoads()
Verify that all required invariant loads have been hoisted.
LoopStackElement { Loop *L LoopStackElementTy
A loop stack element to keep track of per-loop information during schedule construction.
Definition: ScopBuilder.h:704
isl::schedule Schedule
Definition: ScopBuilder.h:707
void addUserContext()
Add user provided parameter constraints to context (command line).
void ensureValueRead(Value *V, ScopStmt *UserStmt)
Ensure an llvm::Value is available in the BB's statement, creating a MemoryAccess for reloading it if...
void buildPHIAccesses(ScopStmt *PHIStmt, PHINode *PHI, Region *NonAffineSubRegion, bool IsExitBlock=false)
Create MemoryAccesses for the given PHI node in the given region.
void buildSchedule()
Construct the schedule of this SCoP.
SmallVector< std::pair< ScopStmt *, Instruction * >, 16 > GlobalReads
Set of instructions that might read any memory location.
Definition: ScopBuilder.h:57
ScalarEvolution & SE
The ScalarEvolution to help building Scop.
Definition: ScopBuilder.h:51
void foldAccessRelations()
Fold memory accesses to handle parametric offset.
std::tuple< AliasGroupVectorTy, DenseSet< const ScopArrayInfo * > > buildAliasGroupsForAccesses()
Build alias groups for all memory accesses in the Scop.
bool propagateDomainConstraints(Region *R, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap)
Propagate the domain constraints through the region R.
bool buildConditionSets(BasicBlock *BB, Instruction *TI, Loop *L, __isl_keep isl_set *Domain, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap, SmallVectorImpl< __isl_give isl_set * > &ConditionSets)
Build the conditions sets for the terminator TI in the Domain.
void addPHIReadAccess(ScopStmt *PHIStmt, PHINode *PHI)
Create a MemoryAccess for reading the value of a phi.
bool buildAccessCallInst(MemAccInst Inst, ScopStmt *Stmt)
Try to build a MemoryAccess for a call instruction.
void buildScalarDependences(ScopStmt *UserStmt, Instruction *Inst)
Analyze and extract the cross-BB scalar dependences (or, dataflow dependencies) of an instruction.
void foldSizeConstantsToRight()
Fold size constants to the right.
SmallSetVector< Value *, 16 > ArrayBasePointers
Set of all accessed array base pointers.
Definition: ScopBuilder.h:60
SmallVector< LoopStackElementTy, 4 > LoopStackTy
The loop stack used for schedule construction.
Definition: ScopBuilder.h:724
MemoryAccess * addMemoryAccess(ScopStmt *Stmt, Instruction *Inst, MemoryAccess::AccessType AccType, Value *BaseAddress, Type *ElemType, bool Affine, Value *AccessValue, ArrayRef< const SCEV * > Subscripts, ArrayRef< const SCEV * > Sizes, MemoryKind Kind)
Create a new MemoryAccess object and add it to #AccFuncMap.
void hoistInvariantLoads()
Hoist invariant memory loads and check for required ones.
SmallVector< AliasGroupTy, 4 > AliasGroupVectorTy
A vector of alias groups.
Definition: ScopBuilder.h:370
AAResults & AA
The AAResults to build AliasSetTracker.
Definition: ScopBuilder.h:36
bool buildAccessMultiDimFixed(MemAccInst Inst, ScopStmt *Stmt)
Try to build a multi-dimensional fixed sized MemoryAccess from the Load/Store instruction.
DominatorTree & DT
DominatorTree to reason about guaranteed execution.
Definition: ScopBuilder.h:42
__isl_give isl_set * buildUnsignedConditionSets(BasicBlock *BB, Value *Condition, __isl_keep isl_set *Domain, const SCEV *SCEV_TestVal, const SCEV *SCEV_UpperBound, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap, bool IsStrictUpperBound)
Build condition sets for unsigned ICmpInst(s).
const DataLayout & DL
Target data for element size computing.
Definition: ScopBuilder.h:39
bool buildAccessMemIntrinsic(MemAccInst Inst, ScopStmt *Stmt)
Try to build a MemoryAccess for a memory intrinsic.
void assumeNoOutOfBounds()
Assume that all memory accesses are within bounds.
isl::set getNonHoistableCtx(MemoryAccess *Access, isl::union_map Writes)
Return the context under which the access cannot be hoisted.
void buildInvariantEquivalenceClasses()
Create equivalence classes for required invariant accesses.
ScopBuilder & operator=(const ScopBuilder &)=delete
bool buildAliasGroups()
Build all alias groups for this SCoP.
void addArrayAccess(ScopStmt *Stmt, MemAccInst MemAccInst, MemoryAccess::AccessType AccType, Value *BaseAddress, Type *ElemType, bool IsAffine, ArrayRef< const SCEV * > Subscripts, ArrayRef< const SCEV * > Sizes, Value *AccessValue)
Create a MemoryAccess that represents either a LoadInst or StoreInst.
isl::set adjustDomainDimensions(isl::set Dom, Loop *OldL, Loop *NewL)
Adjust the dimensions of Dom that was constructed for OldL to be compatible to domains constructed fo...
bool buildAccessMultiDimParam(MemAccInst Inst, ScopStmt *Stmt)
Try to build a multi-dimensional parametric sized MemoryAccess.
unsigned NumBlocksProcessed
Definition: ScopBuilder.h:711
void buildEscapingDependences(Instruction *Inst)
Build the escaping dependences for Inst.
void buildEqivClassBlockStmts(BasicBlock *BB)
Create one or more ScopStmts for BB using equivalence classes.
void splitAliasGroupsByDomain(AliasGroupVectorTy &AliasGroups)
Split alias groups by iteration domains.
bool buildAliasGroup(AliasGroupTy &AliasGroup, DenseSet< const ScopArrayInfo * > HasWriteAccess)
Build a given alias group and its access data.
void addUserAssumptions(AssumptionCache &AC, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap)
Add user provided parameter constraints to context (source code).
void checkForReductions(ScopStmt &Stmt)
Check for reductions in Stmt.
bool buildDomains(Region *R, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap)
Compute the domain for each basic block in R.
void buildSequentialBlockStmts(BasicBlock *BB, bool SplitOnStore=false)
Create one or more ScopStmts for BB.
ScopDetection & SD
Valid Regions for Scop.
Definition: ScopBuilder.h:48
bool shouldModelInst(Instruction *Inst, Loop *L)
Should an instruction be modeled in a ScopStmt.
std::unique_ptr< Scop > scop
Definition: ScopBuilder.h:63
void buildMemoryAccess(MemAccInst Inst, ScopStmt *Stmt)
Build an instance of MemoryAccess from the Load/Store instruction.
bool buildAliasChecks()
Build the alias checks for this SCoP.
void updateAccessDimensionality()
Update access dimensionalities.
void addRecordedAssumptions()
Add all recorded assumptions to the assumed context.
void buildAccessRelations(ScopStmt &Stmt)
Build the access relation of all memory accesses of Stmt.
RecordedAssumptionsTy RecordedAssumptions
Collection to hold taken assumptions.
Definition: ScopBuilder.h:75
LoopStackElement(Loop *L, isl::schedule S, unsigned NumBlocksProcessed)
Definition: ScopBuilder.h:713
bool hasNonHoistableBasePtrInScop(MemoryAccess *MA, isl::union_map Writes)
Check if the base ptr of MA is in the SCoP but not hoistable.
bool addLoopBoundsToHeaderDomain(Loop *L, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap)
Add loop carried constraints to the header block of the loop L.
bool buildDomainsWithBranchConstraints(Region *R, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap)
Compute the branching constraints for each basic block in R.
void buildAccessFunctions()
Build the access functions for the subregion SR.
bool canAlwaysBeHoisted(MemoryAccess *MA, bool StmtInvalidCtxIsEmpty, bool MAInvalidCtxIsEmpty, bool NonHoistableCtxIsEmpty)
Check if MA can always be hoisted without execution context.
bool buildAccessSingleDim(MemAccInst Inst, ScopStmt *Stmt)
Build a single-dimensional parametric sized MemoryAccess from the Load/Store instruction.
std::unique_ptr< Scop > getScop()
Try to build the Polly IR of static control part on the current SESE-Region.
Definition: ScopBuilder.h:767
void collectSurroundingLoops(ScopStmt &Stmt)
Fill NestLoops with loops surrounding Stmt.
void finalizeAccesses()
Finalize all access relations.
void buildScop(Region &R, AssumptionCache &AC)
LoopInfo & LI
LoopInfo for information about loops.
Definition: ScopBuilder.h:45
~ScopBuilder()=default
OptimizationRemarkEmitter & ORE
An optimization diagnostic interface to add optimization remarks.
Definition: ScopBuilder.h:54
void buildStmts(Region &SR)
Create ScopStmt for all BBs and non-affine subregions of SR.
ScopBuilder(const ScopBuilder &)=delete
void ensureValueWrite(Instruction *Inst)
Create a MemoryAccess for writing an llvm::Instruction.
SmallVector< MemoryAccess *, 4 > AliasGroupTy
A vector of memory accesses that belong to an alias group.
Definition: ScopBuilder.h:367
__isl_give isl_pw_aff * getPwAff(BasicBlock *BB, DenseMap< BasicBlock *, isl::set > &InvalidDomainMap, const SCEV *E, bool NonNegative=false)
Compute the isl representation for the SCEV E in this BB.
isl::set getPredecessorDomainConstraints(BasicBlock *BB, isl::set Domain)
Compute the union of predecessor domains for BB.
Pass to detect the maximal static control parts (Scops) of a function.
Statement of the Scop.
Definition: ScopInfo.h:1140
SmallVector< MinMaxAccessTy, 4 > MinMaxVectorTy
Vector of minimal/maximal accesses to different arrays.
Definition: ScopInfo.h:1636
#define __isl_give
Definition: ctx.h:19
#define __isl_keep
Definition: ctx.h:25
struct isl_set isl_set
Definition: map_type.h:26
MemoryKind
The different memory kinds used in Polly.
Definition: ScopInfo.h:100
@ PHI
MemoryKind::PHI: Models PHI nodes within the SCoP.
llvm::SmallVector< Assumption, 8 > RecordedAssumptionsTy
Definition: ScopHelper.h:80
bool ModelReadOnlyScalars
Command line switch whether to model read-only accesses.
Definition: ScopBuilder.cpp:71
SmallVector< InvariantAccess, 8 > InvariantAccessesTy
Ordered container type to hold invariant accesses.
Definition: ScopInfo.h:1103
static TupleKindPtr Domain("Domain")