Polly 22.0.0git
MatmulOptimizer.cpp
Go to the documentation of this file.
1//===- MatmulOptimizer.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
11#include "polly/Options.h"
13#include "polly/ScopInfo.h"
14#include "polly/Simplify.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/DenseSet.h"
19#include "llvm/ADT/Sequence.h"
20#include "llvm/ADT/SetOperations.h"
21#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/StringRef.h"
23#include "llvm/ADT/iterator_range.h"
24#include "llvm/Analysis/TargetTransformInfo.h"
25#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/Module.h"
28#include "llvm/Support/CommandLine.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/TypeSize.h"
31#include "llvm/Support/raw_ostream.h"
32#include "isl/ctx.h"
33#include "isl/schedule_node.h"
34#include "isl/schedule_type.h"
35#include "isl/union_map.h"
36#include "isl/union_set.h"
37#include <algorithm>
38#include <cassert>
39#include <cmath>
40#include <cstdint>
41#include <string>
42#include <vector>
43
45#define DEBUG_TYPE "polly-opt-isl"
46
47using namespace llvm;
48using namespace polly;
49
50namespace llvm {
51class Value;
52}
53
54static cl::opt<int> LatencyVectorFma(
55 "polly-target-latency-vector-fma",
56 cl::desc("The minimal number of cycles between issuing two "
57 "dependent consecutive vector fused multiply-add "
58 "instructions."),
59 cl::Hidden, cl::init(8), cl::cat(PollyCategory));
60
61static cl::opt<int> ThroughputVectorFma(
62 "polly-target-throughput-vector-fma",
63 cl::desc("A throughput of the processor floating-point arithmetic units "
64 "expressed in the number of vector fused multiply-add "
65 "instructions per clock cycle."),
66 cl::Hidden, cl::init(1), cl::cat(PollyCategory));
67
68static cl::opt<int> FirstCacheLevelSize(
69 "polly-target-1st-cache-level-size",
70 cl::desc("The size of the first cache level specified in bytes."),
71 cl::Hidden, cl::init(-1), cl::cat(PollyCategory));
72
73static cl::opt<int> FirstCacheLevelDefaultSize(
74 "polly-target-1st-cache-level-default-size",
75 cl::desc("The default size of the first cache level specified in bytes"
76 " (if not enough were provided by the TargetTransformInfo)."),
77 cl::Hidden, cl::init(32768), cl::cat(PollyCategory));
78
79static cl::opt<int> SecondCacheLevelSize(
80 "polly-target-2nd-cache-level-size",
81 cl::desc("The size of the second level specified in bytes."), cl::Hidden,
82 cl::init(-1), cl::cat(PollyCategory));
83
84static cl::opt<int> SecondCacheLevelDefaultSize(
85 "polly-target-2nd-cache-level-default-size",
86 cl::desc("The default size of the second cache level specified in bytes"
87 " (if not enough were provided by the TargetTransformInfo)."),
88 cl::Hidden, cl::init(262144), cl::cat(PollyCategory));
89
90// This option, along with --polly-target-2nd-cache-level-associativity,
91// --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size
92// represent the parameters of the target cache, which do not have typical
93// values that can be used by default. However, to apply the pattern matching
94// optimizations, we use the values of the parameters of Intel Core i7-3820
95// SandyBridge in case the parameters are not specified or not provided by the
96// TargetTransformInfo.
97static cl::opt<int> FirstCacheLevelAssociativity(
98 "polly-target-1st-cache-level-associativity",
99 cl::desc("The associativity of the first cache level."), cl::Hidden,
100 cl::init(-1), cl::cat(PollyCategory));
101
103 "polly-target-1st-cache-level-default-associativity",
104 cl::desc("The default associativity of the first cache level"
105 " (if not enough were provided by the TargetTransformInfo)."),
106 cl::Hidden, cl::init(8), cl::cat(PollyCategory));
107
109 "polly-target-2nd-cache-level-associativity",
110 cl::desc("The associativity of the second cache level."), cl::Hidden,
111 cl::init(-1), cl::cat(PollyCategory));
112
114 "polly-target-2nd-cache-level-default-associativity",
115 cl::desc("The default associativity of the second cache level"
116 " (if not enough were provided by the TargetTransformInfo)."),
117 cl::Hidden, cl::init(8), cl::cat(PollyCategory));
118
119static cl::opt<int> VectorRegisterBitwidth(
120 "polly-target-vector-register-bitwidth",
121 cl::desc("The size in bits of a vector register (if not set, this "
122 "information is taken from LLVM's target information."),
123 cl::Hidden, cl::init(-1), cl::cat(PollyCategory));
124
126 "polly-pattern-matching-nc-quotient",
127 cl::desc("Quotient that is obtained by dividing Nc, the parameter of the"
128 "macro-kernel, by Nr, the parameter of the micro-kernel"),
129 cl::Hidden, cl::init(256), cl::cat(PollyCategory));
130
131static cl::opt<bool>
132 PMBasedTCOpts("polly-tc-opt",
133 cl::desc("Perform optimizations of tensor contractions based "
134 "on pattern matching"),
135 cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory));
136
137static cl::opt<bool>
138 PMBasedMMMOpts("polly-matmul-opt",
139 cl::desc("Perform optimizations of matrix multiplications "
140 "based on pattern matching"),
141 cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
142
143static cl::opt<int> OptComputeOut(
144 "polly-tc-dependences-computeout",
145 cl::desc("Bound the dependence analysis by a maximal amount of "
146 "computational steps (0 means no bound)"),
147 cl::Hidden, cl::init(500000), cl::ZeroOrMore, cl::cat(PollyCategory));
148
149namespace {
150/// Parameters of the micro kernel.
151///
152/// Parameters, which determine sizes of rank-1 (i.e., outer product) update
153/// used in the optimized matrix multiplication.
154struct MicroKernelParamsTy {
155 int Mr;
156 int Nr;
157};
158
159/// Parameters of the macro kernel.
160///
161/// Parameters, which determine sizes of blocks of partitioned matrices
162/// used in the optimized matrix multiplication.
163struct MacroKernelParamsTy {
164 int Mc;
165 int Nc;
166 int Kc;
167};
168
169/// Parameters of the matrix multiplication operands.
170///
171/// Parameters, which describe access relations that represent operands of the
172/// matrix multiplication.
173struct MatMulInfoTy {
174 MemoryAccess *A = nullptr;
175 MemoryAccess *B = nullptr;
176 MemoryAccess *ReadFromC = nullptr;
177 MemoryAccess *WriteToC = nullptr;
178 int i = -1;
179 int j = -1;
180 int k = -1;
181};
182
183/// Parameters of the tensor contraction operands.
184///
185/// A general d-dimensional tensor T ∈ R ^ Nu0 x ... x Nud−1 can be defined
186/// as the set of scalar elements indexed by the set of indices u0 ... ud,
187///
188/// T ≡ {Anu0...nud−1 ∈ R | (u0,...,ud−1) ∈ Nu0 x ... x Nud−1}.
189///
190/// Let A, B, and C be dA, dB, and dC-dimensional tensors, respectively.
191/// Let the free and the contracted indices of the tensor A be grouped into
192/// two bundles I = i0...ir−1 and P = p0...pt−1, respectively. Similarly,
193/// the free and the contracted indices of B are grouped into bundles
194/// J = j0..js−1 and P and the free indices of C are grouped into
195/// bundles I and J.
196///
197/// Tensor contraction (TC) of tensors A, B into tensor C can be represented as
198/// C(shuffle(I,J))=∑α·A(shuffle(I,P))·B(shuffle(P,J))+β·C(shuffle(I,J)),
199/// where ∑ is a summation over all contracted indices of P,
200/// α, β ∈ R, Npi is the length of the tensor dimension that corresponds
201/// to the index pi, A(shuffle(I, P)), B(shuffle(P, J)), C(shuffle(I, J)) are
202/// accesses to tensors A, B, C, respectively,
203/// shuffle(I, J), shuffle(I, P), and shuffle(P, J) are permutations of
204/// the enclosed indices.
205///
206/// Multiplication of C(shuffle(I,J)) by β can be moved into a different SCoP
207/// statement by loop distribution, which is done by the isl scheduler.
208// If β is not equal to one, the optimization of TC of Polly requires
209/// such a transformation.
210///
211/// TCInfoTy contains parameters, which describe access relations that represent
212/// operands of the tensor contraction.
213struct TCInfoTy {
214 /// @{
215 /// Memory accesses that represent reading from tensors, which are operands of
216 /// the tensor contraction.
217 MemoryAccess *A = nullptr;
218 MemoryAccess *B = nullptr;
219 /// @}
220
221 /// @{
222 /// Memory accesses that represent reading from and writing into the tensor,
223 /// which contains the result of the tensor contraction.
224 MemoryAccess *ReadFromC = nullptr;
225 MemoryAccess *WriteToC = nullptr;
226 /// @}
227
228 /// @{
229 /// Input dimensions of the schedule space, which represent free
230 /// indices of tensors.
231 SmallDenseSet<int> I;
232 SmallDenseSet<int> J;
233 /// @}
234
235 /// Input dimension of the schedule space, which represents contracted
236 /// indices of tensors.
237 SmallDenseSet<int> P;
238
239 /// @{
240 /// Sizes of tensor dimensions for corresponding input dimensions of
241 /// the schedule space. The size of the tensor dimension can be larger than
242 /// the size of the corresponding input dimension of the schedule space.
243 /// This does not correspond to a tensor contraction. However, such a pattern
244 /// will be optimized by the transformation.
245 SmallVector<int> DimensionSizes;
246 SmallVector<int> ADimensions;
247 SmallVector<int> BDimensions;
248 SmallVector<int> CDimensions;
249 /// @}
250
251 /// @{
252 /// Permutations of indices of I, J, and P, which describe operands of
253 /// the tensor contraction and its result.
254 SmallVector<int> OrderedI;
255 SmallVector<int> OrderedJ;
256 SmallVector<int> OrderedP;
257 /// @}
258};
259
260/// Create an isl::union_set, which describes the option of the form
261/// [isolate[] -> unroll[x]].
262///
263/// @param Ctx An isl::ctx, which is used to create the isl::union_set.
264static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) {
265 isl::space Space = isl::space(Ctx, 0, 0, 1);
266 isl::map UnrollIsolatedSetOption = isl::map::universe(Space);
267 isl::id DimInId = isl::id::alloc(Ctx, "isolate", nullptr);
268 isl::id DimOutId = isl::id::alloc(Ctx, "unroll", nullptr);
269 UnrollIsolatedSetOption =
270 UnrollIsolatedSetOption.set_tuple_id(isl::dim::in, DimInId);
271 UnrollIsolatedSetOption =
272 UnrollIsolatedSetOption.set_tuple_id(isl::dim::out, DimOutId);
273 return UnrollIsolatedSetOption.wrap();
274}
275
276/// Permute the two dimensions of the isl map.
277///
278/// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that
279/// have type @p DimType.
280///
281/// @param Map The isl map to be modified.
282/// @param DimType The type of the dimensions.
283/// @param DstPos The first dimension.
284/// @param SrcPos The second dimension.
285/// @return The modified map.
286static isl::map permuteDimensions(isl::map Map, isl::dim DimType,
287 unsigned DstPos, unsigned SrcPos) {
288 assert(DstPos < unsignedFromIslSize(Map.dim(DimType)) &&
289 SrcPos < unsignedFromIslSize(Map.dim(DimType)));
290 if (DstPos == SrcPos)
291 return Map;
292 isl::id DimId;
293 if (Map.has_tuple_id(DimType))
294 DimId = Map.get_tuple_id(DimType);
295 auto FreeDim = DimType == isl::dim::in ? isl::dim::out : isl::dim::in;
296 isl::id FreeDimId;
297 if (Map.has_tuple_id(FreeDim))
298 FreeDimId = Map.get_tuple_id(FreeDim);
299 auto MaxDim = std::max(DstPos, SrcPos);
300 auto MinDim = std::min(DstPos, SrcPos);
301 Map = Map.move_dims(FreeDim, 0, DimType, MaxDim, 1);
302 Map = Map.move_dims(FreeDim, 0, DimType, MinDim, 1);
303 Map = Map.move_dims(DimType, MinDim, FreeDim, 1, 1);
304 Map = Map.move_dims(DimType, MaxDim, FreeDim, 0, 1);
305 if (!DimId.is_null())
306 Map = Map.set_tuple_id(DimType, DimId);
307 if (!FreeDimId.is_null())
308 Map = Map.set_tuple_id(FreeDim, FreeDimId);
309 return Map;
310}
311
312/// Check the form of the access relation.
313///
314/// Check that the access relation @p AccMap has the form M[i][j], where i
315/// is a @p FirstPos and j is a @p SecondPos.
316///
317/// @param AccMap The access relation to be checked.
318/// @param FirstPos The index of the input dimension that is mapped to
319/// the first output dimension.
320/// @param SecondPos The index of the input dimension that is mapped to the
321/// second output dimension.
322/// @return True in case @p AccMap has the expected form and false,
323/// otherwise.
324static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos,
325 int &SecondPos) {
326 isl::space Space = AccMap.get_space();
327 isl::map Universe = isl::map::universe(Space);
328
329 if (unsignedFromIslSize(Space.dim(isl::dim::out)) != 2)
330 return false;
331
332 // MatMul has the form:
333 // for (i = 0; i < N; i++)
334 // for (j = 0; j < M; j++)
335 // for (k = 0; k < P; k++)
336 // C[i, j] += A[i, k] * B[k, j]
337 //
338 // Permutation of three outer loops: 3! = 6 possibilities.
339 int FirstDims[] = {0, 0, 1, 1, 2, 2};
340 int SecondDims[] = {1, 2, 2, 0, 0, 1};
341 for (int i = 0; i < 6; i += 1) {
342 auto PossibleMatMul =
343 Universe.equate(isl::dim::in, FirstDims[i], isl::dim::out, 0)
344 .equate(isl::dim::in, SecondDims[i], isl::dim::out, 1);
345
346 AccMap = AccMap.intersect_domain(Domain);
347 PossibleMatMul = PossibleMatMul.intersect_domain(Domain);
348
349 // If AccMap spans entire domain (Non-partial write),
350 // compute FirstPos and SecondPos.
351 // If AccMap != PossibleMatMul here (the two maps have been gisted at
352 // this point), it means that the writes are not complete, or in other
353 // words, it is a Partial write and Partial writes must be rejected.
354 if (AccMap.is_equal(PossibleMatMul)) {
355 if (FirstPos != -1 && FirstPos != FirstDims[i])
356 continue;
357 FirstPos = FirstDims[i];
358 if (SecondPos != -1 && SecondPos != SecondDims[i])
359 continue;
360 SecondPos = SecondDims[i];
361 return true;
362 }
363 }
364
365 return false;
366}
367
368/// Does the memory access represent a non-scalar operand of the matrix
369/// multiplication.
370///
371/// Check that the memory access @p MemAccess is the read access to a non-scalar
372/// operand of the matrix multiplication or its result.
373///
374/// @param MemAccess The memory access to be checked.
375/// @param MMI Parameters of the matrix multiplication operands.
376/// @return True in case the memory access represents the read access
377/// to a non-scalar operand of the matrix multiplication and
378/// false, otherwise.
379static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess,
380 MatMulInfoTy &MMI) {
381 if (!MemAccess->isLatestArrayKind() || !MemAccess->isRead())
382 return false;
383 auto AccMap = MemAccess->getLatestAccessRelation();
384 isl::set StmtDomain = MemAccess->getStatement()->getDomain();
385 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.j) && !MMI.ReadFromC) {
386 MMI.ReadFromC = MemAccess;
387 return true;
388 }
389 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.i, MMI.k) && !MMI.A) {
390 MMI.A = MemAccess;
391 return true;
392 }
393 if (isMatMulOperandAcc(StmtDomain, AccMap, MMI.k, MMI.j) && !MMI.B) {
394 MMI.B = MemAccess;
395 return true;
396 }
397 return false;
398}
399
400/// Check accesses to operands of the matrix multiplication.
401///
402/// Check that accesses of the SCoP statement, which corresponds to
403/// the partial schedule @p PartialSchedule, are scalar in terms of loops
404/// containing the matrix multiplication, in case they do not represent
405/// accesses to the non-scalar operands of the matrix multiplication or
406/// its result.
407///
408/// @param PartialSchedule The partial schedule of the SCoP statement.
409/// @param MMI Parameters of the matrix multiplication operands.
410/// @return True in case the corresponding SCoP statement
411/// represents matrix multiplication and false,
412/// otherwise.
413static bool containsOnlyMatrMultAcc(isl::map PartialSchedule,
414 MatMulInfoTy &MMI) {
415 auto InputDimId = PartialSchedule.get_tuple_id(isl::dim::in);
416 auto *Stmt = static_cast<ScopStmt *>(InputDimId.get_user());
417 unsigned OutDimNum = unsignedFromIslSize(PartialSchedule.range_tuple_dim());
418 assert(OutDimNum > 2 && "In case of the matrix multiplication the loop nest "
419 "and, consequently, the corresponding scheduling "
420 "functions have at least three dimensions.");
421 auto MapI =
422 permuteDimensions(PartialSchedule, isl::dim::out, MMI.i, OutDimNum - 1);
423 auto MapJ =
424 permuteDimensions(PartialSchedule, isl::dim::out, MMI.j, OutDimNum - 1);
425 auto MapK =
426 permuteDimensions(PartialSchedule, isl::dim::out, MMI.k, OutDimNum - 1);
427
428 auto Accesses = getAccessesInOrder(*Stmt);
429 for (auto *MemA = Accesses.begin(); MemA != Accesses.end() - 1; MemA++) {
430 auto *MemAccessPtr = *MemA;
431 if (MemAccessPtr->isLatestArrayKind() && MemAccessPtr != MMI.WriteToC &&
432 !isMatMulNonScalarReadAccess(MemAccessPtr, MMI) &&
433 !(MemAccessPtr->isStrideZero(MapI) &&
434 MemAccessPtr->isStrideZero(MapJ) && MemAccessPtr->isStrideZero(MapK)))
435 return false;
436 }
437 return true;
438}
439
440/// Check for dependencies corresponding to the matrix multiplication.
441///
442/// Check that there is only true dependence of the form
443/// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement
444/// represented by @p Schedule and k is @p Pos. Such a dependence corresponds
445/// to the dependency produced by the matrix multiplication.
446///
447/// @param Schedule The schedule of the SCoP statement.
448/// @param D The SCoP dependencies.
449/// @param Pos The parameter to describe an acceptable true dependence.
450/// In case it has a negative value, try to determine its
451/// acceptable value.
452/// @return True in case dependencies correspond to the matrix multiplication
453/// and false, otherwise.
454static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D,
455 int &Pos) {
456 isl::union_map Dep = D->getDependences(Dependences::TYPE_RAW);
457 isl::union_map Red = D->getDependences(Dependences::TYPE_RED);
458 if (!Red.is_null())
459 Dep = Dep.unite(Red);
460 auto DomainSpace = Schedule.get_space().domain();
461 auto Space = DomainSpace.map_from_domain_and_range(DomainSpace);
462 auto Deltas = Dep.extract_map(Space).deltas();
463 int DeltasDimNum = unsignedFromIslSize(Deltas.dim(isl::dim::set));
464 for (int i = 0; i < DeltasDimNum; i++) {
465 auto Val = Deltas.plain_get_val_if_fixed(isl::dim::set, i);
466 Pos = Pos < 0 && Val.is_one() ? i : Pos;
467 if (Val.is_nan() || !(Val.is_zero() || (i == Pos && Val.is_one())))
468 return false;
469 }
470 if (DeltasDimNum == 0 || Pos < 0)
471 return false;
472 return true;
473}
474
475/// Check if the SCoP statement could probably be optimized with analytical
476/// modeling.
477///
478/// containsMatrMult tries to determine whether the following conditions
479/// are true:
480/// 1. The last memory access modeling an array, MA1, represents writing to
481/// memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or
482/// S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement
483/// under consideration.
484/// 2. There is only one loop-carried true dependency, and it has the
485/// form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no
486/// loop-carried or anti dependencies.
487/// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent
488/// reading from memory and have the form S(..., i3, ...) -> M(i1, i3),
489/// S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively,
490/// and all memory accesses of the SCoP that are different from MA1, MA2,
491/// MA3, and MA4 have stride 0, if the innermost loop is exchanged with any
492/// of loops i1, i2 and i3.
493///
494/// @param PartialSchedule The PartialSchedule that contains a SCoP statement
495/// to check.
496/// @D The SCoP dependencies.
497/// @MMI Parameters of the matrix multiplication operands.
498static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D,
499 MatMulInfoTy &MMI) {
500 auto InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
501 auto *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
502 if (Stmt->size() <= 1)
503 return false;
504
505 auto Accesses = getAccessesInOrder(*Stmt);
506 for (auto *MemA = Accesses.end() - 1; MemA != Accesses.begin(); MemA--) {
507 auto *MemAccessPtr = *MemA;
508 if (!MemAccessPtr->isLatestArrayKind())
509 continue;
510 if (!MemAccessPtr->isWrite())
511 return false;
512 auto AccMap = MemAccessPtr->getLatestAccessRelation();
513 if (!isMatMulOperandAcc(Stmt->getDomain(), AccMap, MMI.i, MMI.j))
514 return false;
515 MMI.WriteToC = MemAccessPtr;
516 break;
517 }
518
519 if (!containsOnlyMatMulDep(PartialSchedule, D, MMI.k))
520 return false;
521
522 if (!MMI.WriteToC || !containsOnlyMatrMultAcc(PartialSchedule, MMI))
523 return false;
524
525 if (!MMI.A || !MMI.B || !MMI.ReadFromC)
526 return false;
527 return true;
528}
529
530/// Permute two dimensions of the band node.
531///
532/// Permute FirstDim and SecondDim dimensions of the Node.
533///
534/// @param Node The band node to be modified.
535/// @param FirstDim The first dimension to be permuted.
536/// @param SecondDim The second dimension to be permuted.
537static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node,
538 unsigned FirstDim,
539 unsigned SecondDim) {
541 (unsigned)isl_schedule_node_band_n_member(Node.get()) >
542 std::max(FirstDim, SecondDim));
543 auto PartialSchedule =
545 auto PartialScheduleFirstDim = PartialSchedule.at(FirstDim);
546 auto PartialScheduleSecondDim = PartialSchedule.at(SecondDim);
547 PartialSchedule =
548 PartialSchedule.set_union_pw_aff(SecondDim, PartialScheduleFirstDim);
549 PartialSchedule =
550 PartialSchedule.set_union_pw_aff(FirstDim, PartialScheduleSecondDim);
552 return Node.insert_partial_schedule(PartialSchedule);
553}
554
555static isl::schedule_node
556createMicroKernel(isl::schedule_node Node,
557 MicroKernelParamsTy MicroKernelParams) {
558 Node = applyRegisterTiling(Node, {MicroKernelParams.Mr, MicroKernelParams.Nr},
559 1);
560 Node = Node.parent().parent();
561 return permuteBandNodeDimensions(Node, 0, 1).child(0).child(0);
562}
563
564/// Create the BLIS macro-kernel.
565///
566/// We create the BLIS macro-kernel by applying a combination of tiling
567/// of dimensions of the band node and interchanging of two innermost
568/// modified dimensions. The values of MacroKernelParams's fields are used
569/// as tile sizes.
570///
571/// @param Node The schedule node to be modified.
572/// @param MacroKernelParams Parameters of the macro kernel
573/// to be used as tile sizes.
574static isl::schedule_node
575createMacroKernel(isl::schedule_node Node,
576 MacroKernelParamsTy MacroKernelParams) {
578 if (MacroKernelParams.Mc == 1 && MacroKernelParams.Nc == 1 &&
579 MacroKernelParams.Kc == 1)
580 return Node;
581 int DimOutNum = isl_schedule_node_band_n_member(Node.get());
582 std::vector<int> TileSizes(DimOutNum, 1);
583 TileSizes[DimOutNum - 3] = MacroKernelParams.Mc;
584 TileSizes[DimOutNum - 2] = MacroKernelParams.Nc;
585 TileSizes[DimOutNum - 1] = MacroKernelParams.Kc;
586 Node = tileNode(Node, "1st level tiling", TileSizes, 1);
587 Node = Node.parent().parent();
588 Node = permuteBandNodeDimensions(Node, DimOutNum - 2, DimOutNum - 1);
589 Node = permuteBandNodeDimensions(Node, DimOutNum - 3, DimOutNum - 1);
590
591 return Node.child(0).child(0);
592}
593
594/// Get the size of the widest type of the matrix multiplication operands
595/// in bytes, including alignment padding.
596///
597/// @param MMI Parameters of the matrix multiplication operands.
598/// @return The size of the widest type of the matrix multiplication operands
599/// in bytes, including alignment padding.
600static uint64_t getMatMulAlignTypeSize(const MatMulInfoTy &MMI) {
601 auto *S = MMI.A->getStatement()->getParent();
602 auto &DL = S->getFunction().getParent()->getDataLayout();
603 auto ElementSizeA = DL.getTypeAllocSize(MMI.A->getElementType());
604 auto ElementSizeB = DL.getTypeAllocSize(MMI.B->getElementType());
605 auto ElementSizeC = DL.getTypeAllocSize(MMI.WriteToC->getElementType());
606 return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
607}
608
609/// Get the size of the widest type of the matrix multiplication operands
610/// in bits.
611///
612/// @param MMI Parameters of the matrix multiplication operands.
613/// @return The size of the widest type of the matrix multiplication operands
614/// in bits.
615static uint64_t getMatMulTypeSize(const MatMulInfoTy &MMI) {
616 auto *S = MMI.A->getStatement()->getParent();
617 auto &DL = S->getFunction().getParent()->getDataLayout();
618 auto ElementSizeA = DL.getTypeSizeInBits(MMI.A->getElementType());
619 auto ElementSizeB = DL.getTypeSizeInBits(MMI.B->getElementType());
620 auto ElementSizeC = DL.getTypeSizeInBits(MMI.WriteToC->getElementType());
621 return std::max({ElementSizeA, ElementSizeB, ElementSizeC});
622}
623
624/// Get parameters of the BLIS micro kernel.
625///
626/// We choose the Mr and Nr parameters of the micro kernel to be large enough
627/// such that no stalls caused by the combination of latencies and dependencies
628/// are introduced during the updates of the resulting matrix of the matrix
629/// multiplication. However, they should also be as small as possible to
630/// release more registers for entries of multiplied matrices.
631///
632/// @param TTI Target Transform Info.
633/// @param MMI Parameters of the matrix multiplication operands.
634/// @return The structure of type MicroKernelParamsTy.
635/// @see MicroKernelParamsTy
636static MicroKernelParamsTy getMicroKernelParams(const TargetTransformInfo *TTI,
637 const MatMulInfoTy &MMI) {
638 assert(TTI && "The target transform info should be provided.");
639
640 // Nvec - Number of double-precision floating-point numbers that can be hold
641 // by a vector register. Use 2 by default.
642 long RegisterBitwidth = VectorRegisterBitwidth;
643
644 if (RegisterBitwidth == -1)
645 RegisterBitwidth =
646 TTI->getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector);
647 auto ElementSize = getMatMulTypeSize(MMI);
648 assert(ElementSize > 0 && "The element size of the matrix multiplication "
649 "operands should be greater than zero.");
650 auto Nvec = RegisterBitwidth / ElementSize;
651 if (Nvec == 0)
652 Nvec = 2;
653 int Nr = ceil(sqrt((double)(Nvec * LatencyVectorFma * ThroughputVectorFma)) /
654 Nvec) *
655 Nvec;
656 int Mr = ceil((double)(Nvec * LatencyVectorFma * ThroughputVectorFma / Nr));
657 return {Mr, Nr};
658}
659
660/// Determine parameters of the target cache.
661///
662/// @param TTI Target Transform Info.
663static void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) {
664 auto L1DCache = llvm::TargetTransformInfo::CacheLevel::L1D;
665 auto L2DCache = llvm::TargetTransformInfo::CacheLevel::L2D;
666 if (FirstCacheLevelSize == -1) {
667 if (TTI->getCacheSize(L1DCache))
668 FirstCacheLevelSize = TTI->getCacheSize(L1DCache).value();
669 else
671 }
672 if (SecondCacheLevelSize == -1) {
673 if (TTI->getCacheSize(L2DCache))
674 SecondCacheLevelSize = TTI->getCacheSize(L2DCache).value();
675 else
677 }
679 if (TTI->getCacheAssociativity(L1DCache))
681 TTI->getCacheAssociativity(L1DCache).value();
682 else
684 static_cast<int>(FirstCacheLevelDefaultAssociativity);
685 }
687 if (TTI->getCacheAssociativity(L2DCache))
689 TTI->getCacheAssociativity(L2DCache).value();
690 else
692 static_cast<int>(SecondCacheLevelDefaultAssociativity);
693 }
694}
695
696/// Get parameters of the BLIS macro kernel.
697///
698/// During the computation of matrix multiplication, blocks of partitioned
699/// matrices are mapped to different layers of the memory hierarchy.
700/// To optimize data reuse, blocks should be ideally kept in cache between
701/// iterations. Since parameters of the macro kernel determine sizes of these
702/// blocks, there are upper and lower bounds on these parameters.
703///
704/// @param TTI Target Transform Info.
705/// @param MicroKernelParams Parameters of the micro-kernel
706/// to be taken into account.
707/// @param MMI Parameters of the matrix multiplication operands.
708/// @return The structure of type MacroKernelParamsTy.
709/// @see MacroKernelParamsTy
710/// @see MicroKernelParamsTy
711static MacroKernelParamsTy
712getMacroKernelParams(const llvm::TargetTransformInfo *TTI,
713 const MicroKernelParamsTy &MicroKernelParams,
714 const MatMulInfoTy &MMI) {
715 getTargetCacheParameters(TTI);
716 // According to www.cs.utexas.edu/users/flame/pubs/TOMS-BLIS-Analytical.pdf,
717 // it requires information about the first two levels of a cache to determine
718 // all the parameters of a macro-kernel. It also checks that an associativity
719 // degree of a cache level is greater than two. Otherwise, another algorithm
720 // for determination of the parameters should be used.
721 if (!(MicroKernelParams.Mr > 0 && MicroKernelParams.Nr > 0 &&
724 return {1, 1, 1};
725 // The quotient should be greater than zero.
727 return {1, 1, 1};
728 int Car = floor(
730 (1 + static_cast<double>(MicroKernelParams.Nr) / MicroKernelParams.Mr));
731
732 // Car can be computed to be zero since it is floor to int.
733 // On Mac OS, division by 0 does not raise a signal. This causes negative
734 // tile sizes to be computed. Prevent division by Cac==0 by early returning
735 // if this happens.
736 if (Car == 0)
737 return {1, 1, 1};
738
739 auto ElementSize = getMatMulAlignTypeSize(MMI);
740 assert(ElementSize > 0 && "The element size of the matrix multiplication "
741 "operands should be greater than zero.");
742 int Kc = (Car * FirstCacheLevelSize) /
743 (MicroKernelParams.Mr * FirstCacheLevelAssociativity * ElementSize);
744 double Cac =
745 static_cast<double>(Kc * ElementSize * SecondCacheLevelAssociativity) /
747 int Mc = floor((SecondCacheLevelAssociativity - 2) / Cac);
748 int Nc = PollyPatternMatchingNcQuotient * MicroKernelParams.Nr;
749
750 assert(Mc > 0 && Nc > 0 && Kc > 0 &&
751 "Matrix block sizes should be greater than zero");
752 return {Mc, Nc, Kc};
753}
754
755/// Create an access relation that is specific to
756/// the matrix multiplication pattern.
757///
758/// Create an access relation of the following form:
759/// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ]
760/// where I is @p FirstDim, J is @p SecondDim.
761///
762/// It can be used, for example, to create relations that helps to consequently
763/// access elements of operands of a matrix multiplication after creation of
764/// the BLIS micro and macro kernels.
765///
766/// @see ScheduleTreeOptimizer::createMicroKernel
767/// @see ScheduleTreeOptimizer::createMacroKernel
768///
769/// Subsequently, the described access relation is applied to the range of
770/// @p MapOldIndVar, that is used to map original induction variables to
771/// the ones, which are produced by schedule transformations. It helps to
772/// define relations using a new space and, at the same time, keep them
773/// in the original one.
774///
775/// @param MapOldIndVar The relation, which maps original induction variables
776/// to the ones, which are produced by schedule
777/// transformations.
778/// @param FirstDim, SecondDim The input dimensions that are used to define
779/// the specified access relation.
780/// @return The specified access relation.
781static isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim,
782 unsigned SecondDim) {
783 auto AccessRelSpace = isl::space(MapOldIndVar.ctx(), 0, 9, 3);
784 auto AccessRel = isl::map::universe(AccessRelSpace);
785 AccessRel = AccessRel.equate(isl::dim::in, FirstDim, isl::dim::out, 0);
786 AccessRel = AccessRel.equate(isl::dim::in, 5, isl::dim::out, 1);
787 AccessRel = AccessRel.equate(isl::dim::in, SecondDim, isl::dim::out, 2);
788 return MapOldIndVar.apply_range(AccessRel);
789}
790
791static isl::schedule_node createExtensionNode(isl::schedule_node Node,
792 isl::map ExtensionMap) {
793 auto Extension = isl::union_map(ExtensionMap);
794 auto NewNode = isl::schedule_node::from_extension(Extension);
795 return Node.graft_before(NewNode);
796}
797
798static isl::schedule_node optimizePackedB(isl::schedule_node Node,
799 ScopStmt *Stmt, isl::map MapOldIndVar,
800 MicroKernelParamsTy MicroParams,
801 MacroKernelParamsTy MacroParams,
802 MatMulInfoTy &MMI) {
803 Scop *S = Stmt->getParent();
804 isl::set Domain = Stmt->getDomain();
805
806 // Create packed array.
807 unsigned FirstDimSize = MacroParams.Nc / MicroParams.Nr;
808 unsigned SecondDimSize = MacroParams.Kc;
809 unsigned ThirdDimSize = MicroParams.Nr;
810 ScopArrayInfo *PackedB =
811 S->createScopArrayInfo(MMI.B->getElementType(), "Packed_B",
812 {FirstDimSize, SecondDimSize, ThirdDimSize});
813
814 // Compute the access relation for copying from B to PackedB.
815 isl::map AccRelB = MMI.B->getLatestAccessRelation();
816 isl::map AccRelPackedB = getMatMulAccRel(MapOldIndVar, 3, 7);
817 AccRelPackedB =
818 AccRelPackedB.set_tuple_id(isl::dim::out, PackedB->getBasePtrId());
819
820 // Create the copy statement and redirect access.
821 ScopStmt *CopyStmt = S->addScopStmt(AccRelB, AccRelPackedB, Domain);
822 MMI.B->setNewAccessRelation(AccRelPackedB);
823
824 unsigned Dim = unsignedFromIslSize(MapOldIndVar.range_tuple_dim());
825 assert(Dim >= 2);
826 // Insert into the schedule tree.
827 isl::map ExtMap = MapOldIndVar.project_out(isl::dim::out, 2, Dim - 2);
828 ExtMap = ExtMap.reverse();
829 ExtMap = ExtMap.fix_si(isl::dim::out, MMI.i, 0);
830 ExtMap = ExtMap.intersect_range(Domain);
831 ExtMap = ExtMap.set_tuple_id(isl::dim::out, CopyStmt->getDomainId());
832 return createExtensionNode(Node, ExtMap);
833}
834
835static isl::schedule_node optimizePackedA(isl::schedule_node Node, ScopStmt *,
836 isl::map MapOldIndVar,
837 MicroKernelParamsTy MicroParams,
838 MacroKernelParamsTy MacroParams,
839 MatMulInfoTy &MMI) {
840 isl::id InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
841 ScopStmt *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
842 isl::set Domain = Stmt->getDomain();
843 isl::id DomainId = Domain.get_tuple_id();
844
845 // Create the packed array.
846 unsigned FirstDimSize = MacroParams.Mc / MicroParams.Mr;
847 unsigned SecondDimSize = MacroParams.Kc;
848 unsigned ThirdDimSize = MicroParams.Mr;
849 ScopArrayInfo *PackedA = Stmt->getParent()->createScopArrayInfo(
850 MMI.A->getElementType(), "Packed_A",
851 {FirstDimSize, SecondDimSize, ThirdDimSize});
852
853 // Compute the access relation for copying from A to PackedA.
854 isl::map AccRelA = MMI.A->getLatestAccessRelation();
855 isl::map AccRelPackedA = getMatMulAccRel(MapOldIndVar, 4, 6);
856 AccRelPackedA =
857 AccRelPackedA.set_tuple_id(isl::dim::out, PackedA->getBasePtrId());
858 // { MemrefA[] -> PackedA[] }
859 isl::map PackedATranslator = AccRelPackedA.apply_domain(AccRelA);
860
861 // Compute the domain for the copy statement.
862 // Construct the copy statement domain out of the 3 outermost scatter
863 // dimensions (to match the 3 band nodes surrounding the extension node) and
864 // the array elements to copy (one statement instance per array element).
865 // { Scatter[] }
866 isl::set ScatterDomain = MapOldIndVar.intersect_domain(Domain).range();
867 // { Scatter[] -> OutermostScatter[] }
868 isl::map OuterDomainMap =
869 makeIdentityMap(ScatterDomain, true).project_out(isl::dim::out, 3, 6);
870 // { Scatter[] -> MemrefA[] }
871 isl::map CopyFrom = MapOldIndVar.reverse().apply_range(AccRelA);
872 // { Scatter[] -> CopyStmt[] }
873 isl::map DomainTranslator = OuterDomainMap.range_product(CopyFrom);
874 // { CopyStmt[] }
875 isl::set CopyDomain = DomainTranslator.range();
876
877 // Translate the access relations to the new domain.
878 // { CopyStmt[] -> MemrefA[] }
879 CopyFrom = CopyFrom.apply_domain(DomainTranslator);
880 // { CopyStmt[] -> PackedA[] }
881 isl::map CopyTo = CopyFrom.apply_range(PackedATranslator);
882
883 // Create the copy statement and redirect access.
884 ScopStmt *CopyStmt =
885 Stmt->getParent()->addScopStmt(CopyFrom, CopyTo, CopyDomain);
886 MMI.A->setNewAccessRelation(AccRelPackedA);
887
888 // Insert into the schedule tree.
889 // { Scatter[] -> CopyStmt[] }
890 isl::map ExtScatterCopy = makeIdentityMap(CopyStmt->getDomain(), true);
891 ExtScatterCopy = ExtScatterCopy.project_out(isl::dim::in, 3, 2);
892 return createExtensionNode(Node, ExtScatterCopy);
893}
894
895/// Apply the packing transformation.
896///
897/// The packing transformation can be described as a data-layout
898/// transformation that requires to introduce a new array, copy data
899/// to the array, and change memory access locations to reference the array.
900/// It can be used to ensure that elements of the new array are read in-stride
901/// access, aligned to cache lines boundaries, and preloaded into certain cache
902/// levels.
903///
904/// As an example let us consider the packing of the array A that would help
905/// to read its elements with in-stride access. An access to the array A
906/// is represented by an access relation that has the form
907/// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has
908/// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr),
909/// k mod Kc, j mod Nr, i mod Mr].
910///
911/// To ensure that elements of the array A are read in-stride access, we add
912/// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using
913/// Scop::createScopArrayInfo, change the access relation
914/// S[i, j, k] -> A[i, k] to
915/// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using
916/// MemoryAccess::setNewAccessRelation, and copy the data to the array, using
917/// the copy statement created by Scop::addScopStmt.
918///
919/// @param Node The schedule node to be optimized.
920/// @param MapOldIndVar The relation, which maps original induction variables
921/// to the ones, which are produced by schedule
922/// transformations.
923/// @param MicroParams, MacroParams Parameters of the BLIS kernel
924/// to be taken into account.
925/// @param MMI Parameters of the matrix multiplication operands.
926/// @return The optimized schedule node.
927static isl::schedule_node
928optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar,
929 MicroKernelParamsTy MicroParams,
930 MacroKernelParamsTy MacroParams,
931 MatMulInfoTy &MMI) {
932 isl::id InputDimsId = MapOldIndVar.get_tuple_id(isl::dim::in);
933 ScopStmt *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
934
935 Node = Node.parent().parent().parent().parent().parent().parent();
937
938 Node = Node.child(0);
939 Node =
940 optimizePackedB(Node, Stmt, MapOldIndVar, MicroParams, MacroParams, MMI);
941
942 Node = Node.child(0);
943 Node =
944 optimizePackedA(Node, Stmt, MapOldIndVar, MicroParams, MacroParams, MMI);
945
946 return Node.child(0).child(0).child(0).child(0).child(0);
947}
948
949/// Get a relation mapping induction variables produced by schedule
950/// transformations to the original ones.
951///
952/// @param Node The schedule node produced as the result of creation
953/// of the BLIS kernels.
954/// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel
955/// to be taken into account.
956/// @return The relation mapping original induction variables to the ones
957/// produced by schedule transformation.
958/// @see ScheduleTreeOptimizer::createMicroKernel
959/// @see ScheduleTreeOptimizer::createMacroKernel
960/// @see getMacroKernelParams
961static isl::map
962getInductionVariablesSubstitution(isl::schedule_node Node,
963 MicroKernelParamsTy MicroKernelParams,
964 MacroKernelParamsTy MacroKernelParams) {
965 auto Child = Node.child(0);
966 auto UnMapOldIndVar = Child.get_prefix_schedule_union_map();
967 auto MapOldIndVar = isl::map::from_union_map(UnMapOldIndVar);
968 unsigned Dim = unsignedFromIslSize(MapOldIndVar.range_tuple_dim());
969 if (Dim > 9u)
970 return MapOldIndVar.project_out(isl::dim::out, 0, Dim - 9);
971 return MapOldIndVar;
972}
973
974/// Isolate a set of partial tile prefixes and unroll the isolated part.
975///
976/// The set should ensure that it contains only partial tile prefixes that have
977/// exactly Mr x Nr iterations of the two innermost loops produced by
978/// the optimization of the matrix multiplication. Mr and Nr are parameters of
979/// the micro-kernel.
980///
981/// In case of parametric bounds, this helps to auto-vectorize the unrolled
982/// innermost loops, using the SLP vectorizer.
983///
984/// @param Node The schedule node to be modified.
985/// @param MicroKernelParams Parameters of the micro-kernel
986/// to be taken into account.
987/// @return The modified isl_schedule_node.
988static isl::schedule_node
989isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node,
990 MicroKernelParamsTy MicroKernelParams) {
991 isl::schedule_node Child = Node.child(0);
992 isl::union_map UnMapOldIndVar = Child.get_prefix_schedule_relation();
993 isl::set Prefix = isl::map::from_union_map(UnMapOldIndVar).range();
994 unsigned Dims = unsignedFromIslSize(Prefix.tuple_dim());
995 assert(Dims >= 1);
996 Prefix = Prefix.project_out(isl::dim::set, Dims - 1, 1);
997 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Nr);
998 Prefix = getPartialTilePrefixes(Prefix, MicroKernelParams.Mr);
999
1000 isl::union_set IsolateOption =
1002 isl::ctx Ctx = Node.ctx();
1003 auto Options = IsolateOption.unite(getDimOptions(Ctx, "unroll"));
1004 Options = Options.unite(getUnrollIsolatedSetOptions(Ctx));
1005 Node = Node.as<isl::schedule_node_band>().set_ast_build_options(Options);
1006 Node = Node.parent().parent().parent();
1007 IsolateOption = getIsolateOptions(Prefix, 3);
1008 Options = IsolateOption.unite(getDimOptions(Ctx, "separate"));
1009 Node = Node.as<isl::schedule_node_band>().set_ast_build_options(Options);
1010 Node = Node.child(0).child(0).child(0);
1011 return Node;
1012}
1013
1014/// Insert "Loop Vectorizer Disabled" mark node.
1015///
1016/// @param Node The child of the mark node to be inserted.
1017/// @return The modified isl_schedule_node.
1018static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) {
1019 auto Id = isl::id::alloc(Node.ctx(), "Loop Vectorizer Disabled", nullptr);
1020 return Node.insert_mark(Id).child(0);
1021}
1022
1023/// Restore the initial ordering of dimensions of the band node
1024///
1025/// In case the band node represents all the dimensions of the iteration
1026/// domain, recreate the band node to restore the initial ordering of the
1027/// dimensions.
1028///
1029/// @param Node The band node to be modified.
1030/// @return The modified schedule node.
1031static isl::schedule_node
1032getBandNodeWithOriginDimOrder(isl::schedule_node Node) {
1035 return Node;
1036 auto Domain = Node.get_universe_domain();
1037 assert(isl_union_set_n_set(Domain.get()) == 1);
1038 if (Node.get_schedule_depth().release() != 0 ||
1039 (unsignedFromIslSize(isl::set(Domain).tuple_dim()) !=
1040 unsignedFromIslSize(Node.as<isl::schedule_node_band>().n_member())))
1041 return Node;
1043 auto PartialSchedulePwAff = Domain.identity_union_pw_multi_aff();
1044 auto PartialScheduleMultiPwAff =
1045 isl::multi_union_pw_aff(PartialSchedulePwAff);
1046 PartialScheduleMultiPwAff =
1047 PartialScheduleMultiPwAff.reset_tuple_id(isl::dim::set);
1048 return Node.insert_partial_schedule(PartialScheduleMultiPwAff);
1049}
1050
1051static isl::schedule_node optimizeMatMulPattern(isl::schedule_node Node,
1052 const TargetTransformInfo *TTI,
1053 MatMulInfoTy &MMI) {
1054 assert(TTI && "The target transform info should be provided.");
1055 int DimOutNum = isl_schedule_node_band_n_member(Node.get());
1056 assert(DimOutNum > 2 && "In case of the matrix multiplication the loop nest "
1057 "and, consequently, the corresponding scheduling "
1058 "functions have at least three dimensions.");
1059 Node = getBandNodeWithOriginDimOrder(Node);
1060 Node = permuteBandNodeDimensions(Node, MMI.i, DimOutNum - 3);
1061 int NewJ = MMI.j == DimOutNum - 3 ? MMI.i : MMI.j;
1062 int NewK = MMI.k == DimOutNum - 3 ? MMI.i : MMI.k;
1063 Node = permuteBandNodeDimensions(Node, NewJ, DimOutNum - 2);
1064 NewK = NewK == DimOutNum - 2 ? NewJ : NewK;
1065 Node = permuteBandNodeDimensions(Node, NewK, DimOutNum - 1);
1066 auto MicroKernelParams = getMicroKernelParams(TTI, MMI);
1067 auto MacroKernelParams = getMacroKernelParams(TTI, MicroKernelParams, MMI);
1068 Node = createMacroKernel(Node, MacroKernelParams);
1069 Node = createMicroKernel(Node, MicroKernelParams);
1070 if (MacroKernelParams.Mc == 1 || MacroKernelParams.Nc == 1 ||
1071 MacroKernelParams.Kc == 1)
1072 return Node;
1073 auto MapOldIndVar = getInductionVariablesSubstitution(Node, MicroKernelParams,
1074 MacroKernelParams);
1075 if (MapOldIndVar.is_null())
1076 return Node;
1077 Node = markLoopVectorizerDisabled(Node.parent()).child(0);
1078 Node = isolateAndUnrollMatMulInnerLoops(Node, MicroKernelParams);
1079 return optimizeDataLayoutMatrMulPattern(Node, MapOldIndVar, MicroKernelParams,
1080 MacroKernelParams, MMI);
1081}
1082
1083/// Check if this node contains a partial schedule that could
1084/// probably be optimized with analytical modeling.
1085///
1086/// isMatrMultPattern tries to determine whether the following conditions
1087/// are true:
1088/// 1. the partial schedule contains only one statement.
1089/// 2. there are exactly three input dimensions.
1090/// 3. all memory accesses of the statement will have stride 0 or 1, if we
1091/// interchange loops (switch the variable used in the inner loop to
1092/// the outer loop).
1093/// 4. all memory accesses of the statement except from the last one, are
1094/// read memory access and the last one is write memory access.
1095/// 5. all subscripts of the last memory access of the statement don't
1096/// contain the variable used in the inner loop.
1097/// If this is the case, we could try to use an approach that is similar to
1098/// the one used to get close-to-peak performance of matrix multiplications.
1099///
1100/// @param Node The node to check.
1101/// @param D The SCoP dependencies.
1102/// @param MMI Parameters of the matrix multiplication operands.
1103static bool isMatrMultPattern(isl::schedule_node Node, const Dependences *D,
1104 MatMulInfoTy &MMI) {
1105 auto PartialSchedule = isl::manage(
1107 if (isl_schedule_node_band_n_member(Node.get()) < 3 ||
1108 Node.get_schedule_depth().release() != 0 ||
1109 isl_union_map_n_map(PartialSchedule.get()) != 1)
1110 return false;
1111 auto NewPartialSchedule = isl::map::from_union_map(PartialSchedule);
1112 if (containsMatrMult(NewPartialSchedule, D, MMI))
1113 return true;
1114 return false;
1115}
1116
1117/// Get the dimension size.
1118///
1119/// Return the size of the dimension @p Pos, which is obtained from @p SAI.
1120/// Return -1 in the case of the first dimension of a multi-dimensional array,
1121/// since the ScopArrayInfo class does not carry size information.
1122///
1123/// @param SAI The information about the array.
1124/// @param Pos The position of the dimension.
1125/// @return The size of the dimension.
1126static int getDimSize(const ScopArrayInfo *SAI, unsigned Pos) {
1127 if (Pos == 0)
1128 return -1;
1129 const llvm::SCEV *SCEVDimSize = SAI->getDimensionSize(Pos);
1130 assert(SCEVDimSize);
1131 auto *ConstantDimSize = dyn_cast<const SCEVConstant>(SCEVDimSize);
1132 assert(ConstantDimSize);
1133 auto *IntDimSize = dyn_cast<ConstantInt>(ConstantDimSize->getValue());
1134 assert(IntDimSize);
1135 return IntDimSize->getSExtValue();
1136}
1137
1138/// Check whether the access relation has the specified form.
1139///
1140/// Check that the access relation @p AccMap has the form T[I0, …, In], where
1141/// indexes I0, …, In are specified by @p Dimensions.
1142///
1143/// @param Domain The domain of the access relation.
1144/// @param AccMap The access relation to be checked.
1145/// @param Dimensions The permutation of the subset of the input dimensions.
1146/// @return True if @p AccMap has the expected form and false,
1147/// otherwise.
1148static bool isCorrectAccessMap(isl::set Domain, isl::map AccMap,
1149 ArrayRef<int> Dimensions) {
1150 isl::space Space = AccMap.get_space();
1151 if (unsignedFromIslSize(Space.dim(isl::dim::out)) != Dimensions.size())
1152 return false;
1153
1154 // Create an access relation of the following form:
1155 // [I0, …, Im] -> [Il, …, In], where indexes
1156 // Il, …, In are specified by @p Dimensions.
1157 isl::map PossibleTensor = isl::map::universe(Space);
1158 unsigned DimInSize = unsignedFromIslSize(Space.dim(isl::dim::in));
1159 for (unsigned i = 0; i < Dimensions.size(); i++) {
1160 const int InPos = Dimensions[i];
1161 if ((InPos >= static_cast<int>(DimInSize)) || (InPos < 0))
1162 return false;
1163 PossibleTensor =
1164 PossibleTensor.equate(isl::dim::in, InPos, isl::dim::out, i);
1165 }
1166
1167 AccMap = AccMap.intersect_domain(Domain);
1168 PossibleTensor = PossibleTensor.intersect_domain(Domain);
1169
1170 // If AccMap != PossibleTensor here (the two maps have been gisted at
1171 // this point), it means that the writes are not complete, or in other
1172 // words, it is a Partial write and Partial writes must be rejected.
1173 return AccMap.is_equal(PossibleTensor);
1174}
1175
1176/// Check whether the access represents the tensor contraction operand.
1177///
1178/// Check that the access relation @p AccMap has the form T[i1, …, in].
1179/// Obtained indexes i1, …, in, their sizes and their permutation are stored
1180/// into @p IndexSet, @p DimensionSizes, and @p Dimensions, respectively.
1181///
1182/// @param Domain The domain of the access relation.
1183/// @param AccMap The access relation to be checked.
1184/// @param IndexSet The subset of the input dimensions.
1185/// @param DimensionSizes Sizes of the input dimensions of @p Dimensions.
1186/// @param Dimensions The permutation of the subset of the input dimensions.
1187/// @return True if @p AccMap has the expected form and false,
1188/// otherwise.
1189static bool isTCOperandAcc(isl::set Domain, isl::map AccMap,
1190 SmallDenseSet<int> &IndexSet,
1191 SmallVectorImpl<int> &DimensionSizes,
1192 SmallVectorImpl<int> &Dimensions) {
1193 isl::id Id = AccMap.get_tuple_id(isl::dim::out);
1194 const ScopArrayInfo *SAI = ScopArrayInfo::getFromId(Id);
1195 assert(SAI && "AccMap should represent memory access");
1196
1197 // Fix values of output dimensions with respect to their positions.
1198 // In the case of the tensor contraction, values of output dimensions are
1199 // fixed and form a permutation of a subset of values of input dimensions.
1200 //
1201 // For example, in the case of Stmt[i][j][k] -> A[k][i], which represents
1202 // the operand of the tensor contraction, we get the following map by fixing
1203 // the output dimensions Stmt[1][j][0] -> A[0][1].
1204 //
1205 // We store the permutation of the subset of the input dimensions {2, 0} into
1206 // @p Dimensions.
1207 //
1208 // The obtained permutation and the isCorrectAccessMap function are used to
1209 // check whether the access relation @p AccMap represents the tensor
1210 // contraction operand. For example, in the case of
1211 // Stmt[i][j][k] -> A[i-1][j+1], we get Stmt[1][0][k] -> A[0][1] and,
1212 // consequently, {1, 0}, which is rejected by isCorrectAccessMap,
1213 // since it corresponds to Stmt[i][j][k] -> A[j][i].
1214 isl::map CheckMap = isl::manage(AccMap.copy());
1215 unsigned OutDimNum = unsignedFromIslSize(CheckMap.dim(isl::dim::out));
1216 for (unsigned i = 0; i < OutDimNum; i++)
1217 CheckMap = CheckMap.fix_si(isl::dim::out, i, i);
1218
1219 // Try to obtain the permutation and sizes of corresponding input dimensions.
1220 Dimensions.assign(OutDimNum, -1);
1221 for (unsigned i : rangeIslSize(0, CheckMap.dim(isl::dim::in))) {
1222 isl::val Val = getConstant(CheckMap, isl::dim::in, i);
1223 if (!Val.is_int())
1224 continue;
1225 int OutPos = -1;
1226 llvm::APInt ValAPInt = APIntFromVal(Val);
1227 if (ValAPInt.isSignedIntN(32))
1228 OutPos = ValAPInt.getSExtValue();
1229 if ((OutPos < 0) || (OutPos >= static_cast<int>(OutDimNum)) ||
1230 IndexSet.count(i))
1231 return false;
1232 IndexSet.insert(i);
1233 Dimensions[OutPos] = i;
1234 if (DimensionSizes[i] <= 0)
1235 DimensionSizes[i] = getDimSize(SAI, OutPos);
1236 }
1237
1238 return isCorrectAccessMap(Domain, AccMap, Dimensions);
1239}
1240
1241/// Find the intersection of two sets.
1242///
1243/// Find the intersection of the set @p A and the set @p B.
1244///
1245/// @param A, B Sets to intersect.
1246/// @return The set intersection.
1247static SmallDenseSet<int> intersect(const SmallDenseSet<int> &A,
1248 const SmallDenseSet<int> &B) {
1249 SmallDenseSet<int> Intersection = A;
1250 set_intersect(Intersection, B);
1251 return Intersection;
1252}
1253
1254/// Check whether the set is a superset.
1255///
1256/// Check that the set @p A is a superset of @p B.
1257///
1258/// @param A, B Sets to be checked.
1259/// @return True if the set A is a superset of B.
1260static bool isSuperset(const SmallDenseSet<int> &A,
1261 const SmallDenseSet<int> &B) {
1262 return intersect(A, B).size() == B.size();
1263}
1264
1265/// Find the union of two sets.
1266///
1267/// Find the union of the set @p A and the set @p B.
1268///
1269/// @param A, B Sets to unite.
1270/// @return The set union.
1271static SmallDenseSet<int> unite(const SmallDenseSet<int> &A,
1272 const SmallDenseSet<int> &B) {
1273 SmallDenseSet<int> Union = A;
1274 set_union(Union, B);
1275 return Union;
1276}
1277
1278/// Determine the access that writes to the tensor, which contains
1279/// the result of the tensor contraction.
1280///
1281/// @param Domain The domain of the statement.
1282/// @param Stmt The statement, which writes to memory.
1283/// @param TCI The information about the tensor contraction.
1284/// @param IandJIndexSet The set, which contains free indexes of tensors.
1285/// @return The determined MemoryAccess, or nullptr if there is no necessary
1286/// access within the SCoP.
1287static MemoryAccess *getWriteAccess(isl::set Domain, ScopStmt *Stmt,
1288 TCInfoTy &TCI,
1289 SmallDenseSet<int> &IandJIndexSet) {
1290 TCI.WriteToC = nullptr;
1291 SmallVector<MemoryAccess *, 32> Accesses = getAccessesInOrder(*Stmt);
1292 for (MemoryAccess *MemA : reverse(Accesses)) {
1293 // A TC-like does not contain write scalar memory accesses
1294 if (!MemA->isLatestArrayKind())
1295 return nullptr;
1296 // The last memory access should be a write memory access.
1297 if (!MemA->isWrite())
1298 return nullptr;
1299
1300 isl::map AccMap = MemA->getLatestAccessRelation();
1301 if (!isTCOperandAcc(Domain, AccMap, IandJIndexSet, TCI.DimensionSizes,
1302 TCI.CDimensions))
1303 return nullptr;
1304
1305 return MemA;
1306 }
1307 return nullptr;
1308}
1309
1310/// Determine an access, which reads elements of an operand of the tensor
1311/// contraction
1312///
1313/// @param MemAccessPtr The access, which reads elements of the tensor.
1314/// @param IndexSet The set, which contains indexes of the tensors.
1315/// @param IandJIndexSet The set, which contains free indexes of tensors.
1316/// @param Dimensions The permutation of the subset of the input dimensions.
1317/// @param TCI The information about the tensor contraction.
1318/// @return True if the memory access @p MemAccessPtr corresponds
1319/// to the tensor contraction.
1320static bool setReadAccess(MemoryAccess *MemAccessPtr,
1321 const SmallDenseSet<int> &IndexSet,
1322 const SmallDenseSet<int> &IandJIndexSet,
1323 ArrayRef<int> Dimensions, TCInfoTy &TCI) {
1324 if (!TCI.A) {
1325 // Probably IndexSet is a union of I and P sets.
1326 if (!isSuperset(IndexSet, TCI.P))
1327 return false;
1328
1329 // Obtain the set I.
1330 TCI.I = set_difference(IndexSet, TCI.P);
1331 if (!isSuperset(IandJIndexSet, TCI.I))
1332 return false;
1333
1334 // Obtain the set J.
1335 TCI.J = set_difference(IandJIndexSet, TCI.I);
1336
1337 // Set the first operand of the tensor contraction.
1338 TCI.A = MemAccessPtr;
1339 llvm::replace(TCI.ADimensions, TCI.ADimensions.begin(),
1340 TCI.ADimensions.end(), Dimensions.begin(), Dimensions.end());
1341 return true;
1342 }
1343
1344 if (!TCI.B) {
1345 // IndexSet should be a union of J and P sets.
1346 if (unite(TCI.P, TCI.J) != IndexSet)
1347 return false;
1348
1349 // Set the second operand of the tensor contraction.
1350 TCI.B = MemAccessPtr;
1351 llvm::replace(TCI.BDimensions, TCI.BDimensions.begin(),
1352 TCI.BDimensions.end(), Dimensions.begin(), Dimensions.end());
1353 return true;
1354 }
1355
1356 return false;
1357}
1358
1359/// Check that all memory accesses of the statement, except from the last
1360/// one, are read memory accesses, which read elements of operands of the tensor
1361/// contraction and its result.
1362///
1363/// @param Domain The domain of the statement.
1364/// @param Stmt The statement, which writes to memory.
1365/// @param TCI The information about the tensor contraction.
1366/// @param IandJIndexSet The set, which contains free indexes of tensors.
1367/// @return True if all read memory accesses of the statement @p Stmt correspond
1368/// to the tensor contraction.
1369static bool setReadAccesses(isl::set Domain, ScopStmt *Stmt, TCInfoTy &TCI,
1370 SmallDenseSet<int> &IandJIndexSet) {
1371 TCI.A = nullptr;
1372 TCI.B = nullptr;
1373 TCI.ReadFromC = nullptr;
1374 SmallVector<MemoryAccess *, 32> Accesses = getAccessesInOrder(*Stmt);
1375 for (auto *MemA = Accesses.begin(); *MemA != TCI.WriteToC; MemA++) {
1376 MemoryAccess *MemAccessPtr = *MemA;
1377
1378 // All memory accesses, except from the last one, should be read memory
1379 // accesses.
1380 if (MemAccessPtr->isWrite())
1381 return false;
1382
1383 isl::map AccMap = MemAccessPtr->getLatestAccessRelation();
1384
1385 if (!MemAccessPtr->isLatestArrayKind()) {
1386 // Check whether the scalar read memory access is not partial.
1387 if (!Domain.is_subset(AccMap.domain()))
1388 return false;
1389 continue;
1390 return false;
1391 }
1392
1393 // There is only one memory access, which reads elements of the result of
1394 // the tensor contraction.
1395 if (AccMap.is_equal(TCI.WriteToC->getLatestAccessRelation())) {
1396 if (TCI.ReadFromC)
1397 return false;
1398 TCI.ReadFromC = MemAccessPtr;
1399 continue;
1400 }
1401
1402 SmallVector<int> Dimensions;
1403 SmallDenseSet<int> IndexSet;
1404 if (!isTCOperandAcc(Domain, AccMap, IndexSet, TCI.DimensionSizes,
1405 Dimensions))
1406 return false;
1407
1408 if (!setReadAccess(MemAccessPtr, IndexSet, IandJIndexSet, Dimensions, TCI))
1409 return false;
1410 }
1411
1412 // Check that there are read memory accesses, which read elements of operands
1413 // of the tensor contraction and its result.
1414 return TCI.ReadFromC && TCI.A && TCI.B;
1415}
1416
1417/// Check accesses to operands of the tensor contraction.
1418///
1419/// Check that accesses of the SCoP statement, which corresponds to
1420/// the partial schedule @p PartialSchedule, represent accesses
1421/// to the non-scalar operands of the tensor contraction.
1422///
1423/// @param Domain The domain of the SCoP statement.
1424/// @param PartialSchedule The partial schedule of the SCoP statement.
1425/// @param TCI Parameters of the tensor contraction operands.
1426/// @return True if the corresponding SCoP statement
1427/// represents tensor contraction and false,
1428/// otherwise.
1429static bool containsOnlyTCAcc(isl::set Domain, isl::map PartialSchedule,
1430 TCInfoTy &TCI) {
1431 isl::id InputDimsId = PartialSchedule.get_tuple_id(isl::dim::in);
1432 ScopStmt *Stmt = static_cast<ScopStmt *>(InputDimsId.get_user());
1433
1434 // In region statements, the order of memory accesses execution is not
1435 // predictable at compile-time.
1436 if ((Stmt->size() <= 1) || Stmt->isRegionStmt())
1437 return false;
1438
1439 unsigned DimNum = unsignedFromIslSize(PartialSchedule.dim(isl::dim::in));
1440 TCI.DimensionSizes.resize(DimNum);
1441 SmallDenseSet<int> IandJIndexSet;
1442
1443 TCI.WriteToC = getWriteAccess(Domain, Stmt, TCI, IandJIndexSet);
1444 if (!TCI.WriteToC)
1445 return false;
1446
1447 if (intersect(IandJIndexSet, TCI.P).size() != 0)
1448 return false;
1449
1450 if (!setReadAccesses(Domain, Stmt, TCI, IandJIndexSet))
1451 return false;
1452
1453 return true;
1454}
1455
1456/// Check that dependency corresponds to the tensor contraction carried over
1457/// loop dimension @p Dim.
1458///
1459/// Check that the dependency has the form
1460/// S(..., ki, max(k(i + 1)), ..., max(kn), ...) ->
1461/// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP
1462/// statement. For this purpose, we analyze the set @p DepDelta, which
1463/// represents the differences between image elements and domain elements of
1464/// the corresponding map.
1465///
1466/// @param DepDelta The set contains the differences between image elements
1467/// and corresponding domain elements of the map, which
1468/// represents the dependency.
1469/// @param Dim The position of the index ki.
1470/// @param BoundDeltas In the case of indexes of ki, the difference between
1471/// image elements and corresponding domain elements
1472/// corresponds to the difference between lexicographic
1473/// minimum and lexicographic maximum of the corresponding
1474/// dimension of the domain of the statement.
1475/// @param IndexSet Obtained indexes ki, which describe the dependency.
1476/// @return True if dependencies correspond to the tensor contraction
1477/// and false, otherwise.
1478static bool isReductionCarriedOverDim(isl::set DepDelta, unsigned Dim,
1479 isl::pw_multi_aff BoundDeltas,
1480 const SmallDenseSet<int> &IndexSet) {
1481 isl::space Space = DepDelta.get_space();
1482 isl::set Superset = isl::set::universe(Space);
1483 for (unsigned i = 0; i < Dim; i += 1)
1484 Superset = Superset.fix_si(isl::dim::set, i, 0);
1485 Superset = Superset.fix_si(isl::dim::set, Dim, 1);
1486
1487 // Check that the difference between the image element and the domain element
1488 // is equal to one in the case of the index ki. Image elements and
1489 // corresponding domain elements should be equal in the case of positions,
1490 // which are lower than the specified position.
1491 if (!DepDelta.is_subset(Superset))
1492 return false;
1493
1494 // Compute a set, which is used to analyze how values of
1495 // the domain are related to the map that describes the dependency.
1496 isl_pw_multi_aff *DepDeltaPW = isl_pw_multi_aff_from_set(DepDelta.copy());
1497 BoundDeltas = BoundDeltas.add(isl::manage(DepDeltaPW));
1498 isl_set *ComplementRawSet = isl_set_from_pw_multi_aff(BoundDeltas.release());
1499 isl::set Complement = isl::manage(ComplementRawSet);
1500
1501 for (unsigned i : rangeIslSize(Dim + 1, DepDelta.dim(isl::dim::set))) {
1502 if (!IndexSet.count(i)) {
1503 // Check the difference between the image element and the domain element
1504 // in the case of indexes, which do not describe the dependency.
1505 if (DepDelta.plain_get_val_if_fixed(isl::dim::set, i).is_zero())
1506 continue;
1507 return false;
1508 }
1509
1510 // In the case of other indexes, which describe the dependency,
1511 // the difference between the image element and the domain element
1512 // should be equal to the difference between lexicographic minimum and
1513 // lexicographic maximum of the domain of the statement.
1514 if (!Complement.plain_get_val_if_fixed(isl::dim::set, i).is_zero())
1515 return false;
1516 }
1517
1518 return true;
1519}
1520
1521/// Check whether dependencies are over the complete domain.
1522///
1523/// In the case of the tensor contraction RAW, WAW, WAR dependencies
1524/// have the form
1525/// S(..., ki, max(k(i + 1)), ..., max(kn), ...) ->
1526/// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP
1527/// statement. Consequently, the domain of the dependencies
1528/// can be described as
1529/// Domain / Domain ∩ S(…, max(kn),…) ∩ S(…, max(k(i + 1)),…),
1530/// where Domain is the domain of the statement S.
1531///
1532/// For example, in the case of the following tensor contraction,
1533/// corresponding domains will have the following form.
1534///
1535/// An example of the tensor contraction:
1536/// for (i = 0; i < 1024; i++)
1537/// for (j = 0; j < 1024; j++)
1538/// for (l = 0; l < 64; ++l)
1539/// for (w = 0; w < 64; ++w)
1540/// C[i][j] += A[i][l][w] * B[w][j][l];
1541///
1542/// The domain of the statement:
1543/// { S[i0, i1, i2, i3] : i0 >= 0 and i0 <= 1023 and
1544/// i1 >= 0 and i1 <= 1023 and
1545/// i2 >= 0 and i2 <= 63 and
1546/// i3 >= 0 and i3 <= 63 }
1547///
1548/// The domain of the dependencies:
1549/// { S[i0, i1, i2, i3] : (i0 >= 0 and i0 <= 1023 and
1550/// i1 >= 0 and i1 <= 1023 and
1551/// i2 >= 0 and i2 <= 63 and
1552/// i3 >= 0 and i3 <= 62) or
1553/// (i3 = 63 and i0 >= 0 and i0 <= 1023 and
1554/// i1 >= 0 and i1 <= 1023 and
1555/// i2 >= 0 and i2 <= 62) }
1556///
1557/// @param Domain The domain of the statement.
1558/// @param DepsForStmt RAW and RED dependencies for the statement.
1559/// @param UpperBound The lexicographic maximum of the elements in
1560/// the @p Domain.
1561/// @param IndexSet Obtained indexes ki, which describe the dependencies.
1562/// @return True if dependencies are over the complete domain
1563/// and false, otherwise.
1564static bool areDepsOverCompleteDomain(isl::set Domain, isl::map DepsForStmt,
1565 isl::pw_multi_aff UpperBound,
1566 SmallDenseSet<int> &IndexSet) {
1567 isl_set *UpperBoundRawSet = isl_set_from_pw_multi_aff(UpperBound.copy());
1568 isl::set UpperBoundSet = isl::manage(UpperBoundRawSet);
1569
1570 isl::set DomainRed = isl::manage(Domain.copy());
1571 for (const auto It : IndexSet) {
1572 isl::val FixedVal = UpperBoundSet.plain_get_val_if_fixed(isl::dim::set, It);
1573 if (FixedVal.is_nan())
1574 return false;
1575 DomainRed = isl::manage(
1576 isl_set_fix_val(DomainRed.copy(), isl_dim_set, It, FixedVal.release()));
1577 }
1578 return DepsForStmt.domain().intersect(Domain).is_equal(
1579 Domain.subtract(DomainRed));
1580}
1581
1582/// Check that dependencies correspond to the tensor contraction.
1583///
1584/// Check that there are only true dependencies of the form
1585/// S(..., ki, max(k(i + 1)), ..., max(kn), ...) ->
1586/// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP
1587/// statement represented by @p Schedule. Such dependencies are produced by
1588/// the tensor contraction. Obtained indexes ki are stored into @p IndexSet.
1589///
1590/// The form of anti and output dependencies is specified implicitly by
1591/// the form the SCoP statement, which is checked by subsequent analysis.
1592///
1593/// @param Schedule The schedule of the SCoP statement.
1594/// @param D The SCoP dependencies.
1595/// @param Domain The domain of the statement.
1596/// @param IndexSet Obtained indexes ki, which describe the dependencies.
1597/// @return True if dependencies correspond to the tensor contraction
1598/// and false, otherwise.
1599static bool containsOnlyTcDeps(isl::map Schedule, const Dependences *D,
1600 SmallDenseSet<int> &IndexSet, isl::set Domain) {
1601 IslMaxOperationsGuard MaxOpGuard(Schedule.ctx().get(), OptComputeOut);
1602
1603 isl::union_map Dep =
1605
1606 isl::space DomainSpace = Schedule.get_space().domain();
1607 isl::space Space = DomainSpace.map_from_domain_and_range(DomainSpace);
1608 isl::map DepsForStmt = Dep.extract_map(Space);
1609 isl::set DepDeltas = DepsForStmt.deltas();
1610 isl::size DeltasDimNum = DepDeltas.dim(isl::dim::set);
1611 isl::pw_multi_aff LowerBound = Domain.lexmin_pw_multi_aff();
1612 isl::pw_multi_aff UpperBound = Domain.lexmax_pw_multi_aff();
1613 isl::pw_multi_aff BoundDeltas = UpperBound.sub(LowerBound);
1614
1615 for (int i : reverse(rangeIslSize(0, DeltasDimNum))) {
1616 // In the case of the tensor contraction, the difference between image
1617 // elements and domain elements lies on a hyperplane where a dimension
1618 // has the fixed value one.
1619 isl::set Intersection = DepDeltas.fix_si(isl::dim::set, i, 1);
1620 if (Intersection.is_empty())
1621 continue;
1622
1623 if (!isReductionCarriedOverDim(Intersection, i, BoundDeltas, IndexSet))
1624 return false;
1625
1626 IndexSet.insert(i);
1627 DepDeltas = DepDeltas.subtract(Intersection);
1628 }
1629
1630 // In the case of the tensor contraction, all dependencies should have
1631 // the previously described form.
1632 if ((unsignedFromIslSize(DeltasDimNum) == 0) || !DepDeltas.is_empty())
1633 return false;
1634
1635 return areDepsOverCompleteDomain(Domain, DepsForStmt, UpperBound, IndexSet);
1636}
1637
1638/// Check if the SCoP statement could probably be optimized with analytical
1639/// modeling.
1640///
1641/// containsTCInfoTy tries to determine whether the following conditions
1642/// are true:
1643///
1644/// 1. The last memory access modeling an array, MA1, represents writing to
1645/// memory and has the form S(..., I, ..., J, ...) -> M(shuffle(I, J)),
1646/// where S is the SCoP statement under consideration and shuffle(I, J)
1647/// is a permutation of indexes of sets I and J.
1648/// 2. There are only true dependencies of the form
1649/// S(..., ki, max(k(i + 1)), ..., max(kn), ...) ->
1650/// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP
1651/// statement represented by @p Schedule and ki are indexes of the set P.
1652/// 3. SCoP contains an arbitrary number of reads from constants and only three
1653/// access relations, MA2, MA3, and MA4 that represent reading from memory
1654/// and have the form
1655/// S(..., I, ..., P, ...) -> M(shuffle(I, P)),
1656/// S(..., P, ..., J, ...) -> M(shuffle(J, P)),
1657/// S(...) -> M(shuffle(I, J)), respectively.
1658///
1659/// @param PartialSchedule The PartialSchedule that contains a SCoP statement
1660/// to check.
1661/// @param D The SCoP dependencies.
1662/// @param TCI Parameters of the tensor contraction operands.
1663/// @param Domain The domain of the statement.
1664/// @return True if dependencies and memory accesses correspond to the tensor
1665/// contraction and false, otherwise.
1666static bool containsTCInfoTy(isl::map PartialSchedule, const Dependences *D,
1667 TCInfoTy &TCI, isl::set Domain) {
1668 if (!containsOnlyTcDeps(PartialSchedule, D, TCI.P, Domain))
1669 return false;
1670
1671 // TODO: handle cases of scalar multiplication if needed.
1672 if (TCI.P.size() == 0)
1673 return false;
1674
1675 if (!containsOnlyTCAcc(Domain, PartialSchedule, TCI))
1676 return false;
1677
1678 // TODO: handle cases of GEMV if needed.
1679 if ((TCI.I.size() == 0) || (TCI.J.size() == 0))
1680 return false;
1681
1682 return true;
1683}
1684
1685/// Check if this node contains a partial schedule that could
1686/// probably be optimized with analytical modeling.
1687///
1688/// isTCPattern is used to determine whether the SCoP represents a TC-like
1689/// kernel [1], which is a perfectly nested set of loops, with a data usage
1690/// pattern that is similar to that produced by the tensor contraction.
1691///
1692/// A TC-like kernel can be defined as follows:
1693///
1694/// 1. It satisfies the requirements of the polyhedral model.
1695/// 2. Without loss of generality, it contains three nonempty bundles of
1696/// one-dimensional for-loops with induction variables that are grouped into
1697/// bundles I = i0...i(r-1), J = j0..j(s-1), and P = p0...p(t-1), and they
1698/// are incremented by one.
1699/// 3. The innermost loop body can be represented as a statement of the form
1700/// C(shuffle(I, J)) = E(A(shuffle(I, P)), B(shuffle(P, J)),
1701/// C(shuffle(I, J))), where A(shuffle(I, P)), B(shuffle(P, J)),
1702/// C(shuffle(I, J)) are accesses to tensors A, B, C, respectively,
1703/// shuffle(I, J), shuffle(I, P), and shuffle(P, J) are permutations of the
1704/// enclosed indices, and E is an expression that contains reads from
1705/// the tensors A, B, C, and an arbitrary number of reads from constants
1706/// with respect to bundles I, J, and P.
1707///
1708/// TC can be considered as a particular case of a TC-like kernel.
1709///
1710/// The order of loops with indexes from P should be preserved. Otherwise,
1711/// isTCPattern should check if a commutative operation is used.
1712///
1713/// isTCPattern performs the following steps to check whether the SCoP
1714/// corresponds to a definition of a TC-like kernel:
1715///
1716/// 1. Checks that the node is the innermost band node.
1717/// 2. Checks that the partial schedule contains only one statement.
1718/// 3. Check that all ancestors of the node contain all band nodes for
1719/// the statement and only mark nodes interleave such band nodes. This
1720/// corresponds to a straightforward implementation of TC.
1721/// 4. Analyses the dependencies to determine contraction dimensions.
1722/// 5. Check that the last memory access modeling an array, represents writing
1723/// to the result of the TC-like kernel.
1724/// 6. Check that SCoP contains only three access relations that represent
1725/// reading of the operands of the TC-like kernel and an arbitrary number of
1726/// reads from constants.
1727///
1728/// [1] - Gareev R., Grosser T., Kruse M. High-Performance Generalized Tensor
1729/// Operations: A Compiler-Oriented Approach // ACM Transactions
1730/// Architecture and Code Optimization (TACO). 2018.
1731/// Vol. 15, no. 3. P. 34:1–34:27. DOI: 10.1145/3235029.
1732///
1733/// If this is the case, we could logically represent tensors as matrices and
1734/// apply algorithms, which are used to get close-to-peak performance of
1735/// matrix multiplications in manually tuned BLAS libraries (e.g., BLIS).
1736///
1737/// @param Node The node to check.
1738/// @param D The SCoP dependencies.
1739/// @param TCI Parameters of the tensor contraction operands.
1740static bool isTCPattern(isl::schedule_node Node, const Dependences *D,
1741 TCInfoTy &TCI) {
1742 Node = Node.child(0);
1743 isl::union_map PartialSchedule = Node.get_prefix_schedule_union_map();
1744 isl::union_set Domain = Node.domain();
1745 Node = Node.parent();
1746
1747 // The partial schedule should contain only one statement.
1748 // TODO: This constraint should not be intrinsic to the algorithm.
1749 if (isl_union_set_n_set(Domain.get()) != 1)
1750 return false;
1751
1753
1754 // Check that all ancestors of the node contain all band nodes for
1755 // the statement, which represents the TC-like kernel, and only mark nodes
1756 // interleave such band nodes. This corresponds to a straightforward
1757 // implementation of TC with/without DeLICM applied.
1758 //
1759 // For example, this covers the matrix multiplication pattern after a full
1760 // run of -polly-optree and -polly-delicm, where the write access is not
1761 // through the original memory access, but through a PHI node that was
1762 // delicmed. Subsequently, such band nodes will be replaced by a single band
1763 // node.
1764 //
1765 // The corresponding schedule can be the following, where Stmt_for_body8
1766 // contains the matrix multiplication:
1767 //
1768 // domain: "{ Stmt_for_body8[i0, i1, i2] : 0 <= i0 <= 1599 and
1769 // 0 <= i1 <= 1799 and
1770 // 0 <= i2 <= 2199;
1771 // Stmt_for_body3[i0, i1] : 0 <= i0 <= 1599 and
1772 // 0 <= i1 <= 1799;
1773 // Stmt_for_body3_last[i0, i1] : 0 <= i0 <= 1599 and
1774 // 0 <= i1 <= 1799 }"
1775 // child:
1776 // sequence:
1777 // - filter: "{ Stmt_for_body3[i0, i1] }"
1778 // child:
1779 // schedule: "[{ Stmt_for_body3[i0, i1] -> [(i0)] },
1780 // { Stmt_for_body3[i0, i1] -> [(i1)] }]"
1781 // permutable: 1
1782 // coincident: [ 1, 1 ]
1783 // - filter: "{ Stmt_for_body3_last[i0, i1] }"
1784 // child:
1785 // schedule: "[{ Stmt_for_body3_last[i0, i1] -> [(i0)] },
1786 // { Stmt_for_body3_last[i0, i1] -> [(i1)] }]"
1787 // permutable: 1
1788 // coincident: [ 1, 1 ]
1789 // - filter: "{ Stmt_for_body8[i0, i1, i2] }"
1790 // child:
1791 // schedule: "[{ Stmt_for_body8[i0, i1, i2] -> [(i0)] },
1792 // { Stmt_for_body8[i0, i1, i2] -> [(i1)] },
1793 // { Stmt_for_body8[i0, i1, i2] -> [(i2)] }]"
1794 // permutable: 1
1795 // coincident: [ 1, 1, 0 ]
1796 //
1797 while (NodeType != isl_schedule_node_domain) {
1798 if (NodeType == isl_schedule_node_filter) {
1799 if (!Node.parent().isa<isl::schedule_node_sequence>() ||
1800 !Node.parent().parent().isa<isl::schedule_node_domain>())
1801 return false;
1802 break;
1803 }
1804
1805 if ((NodeType != isl_schedule_node_band) &&
1806 (NodeType != isl_schedule_node_mark))
1807 return false;
1808
1809 Node = Node.parent();
1810 NodeType = isl_schedule_node_get_type(Node.get());
1811 }
1812
1813 isl::map PartialScheduleMap = isl::map::from_union_map(PartialSchedule);
1814 if (containsTCInfoTy(PartialScheduleMap, D, TCI, isl::set(Domain)))
1815 return true;
1816
1817 return false;
1818}
1819
1820} // namespace
1821
1824 const llvm::TargetTransformInfo *TTI,
1825 const Dependences *D) {
1826 TCInfoTy TCI;
1827 if (PMBasedTCOpts && isTCPattern(Node, D, TCI))
1828 POLLY_DEBUG(dbgs() << "The tensor contraction pattern was detected\n");
1829 MatMulInfoTy MMI;
1830 if (PMBasedMMMOpts && isMatrMultPattern(Node, D, MMI)) {
1831 POLLY_DEBUG(dbgs() << "The matrix multiplication pattern was detected\n");
1832 return optimizeMatMulPattern(Node, TTI, MMI);
1833 }
1834 return {};
1835}
static cl::opt< int > OptComputeOut("polly-dependences-computeout", cl::desc("Bound the dependence analysis by a maximal amount of " "computational steps (0 means no bound)"), cl::Hidden, cl::init(500000), cl::cat(PollyCategory))
unsigned unsignedFromIslSize(const isl::size &Size)
Check that Size is valid (only on debug builds) and cast it to unsigned.
Definition ISLTools.h:40
static cl::opt< int > FirstCacheLevelDefaultSize("polly-target-1st-cache-level-default-size", cl::desc("The default size of the first cache level specified in bytes" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(32768), cl::cat(PollyCategory))
static cl::opt< int > SecondCacheLevelDefaultAssociativity("polly-target-2nd-cache-level-default-associativity", cl::desc("The default associativity of the second cache level" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(8), cl::cat(PollyCategory))
static cl::opt< bool > PMBasedMMMOpts("polly-matmul-opt", cl::desc("Perform optimizations of matrix multiplications " "based on pattern matching"), cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory))
static cl::opt< int > FirstCacheLevelAssociativity("polly-target-1st-cache-level-associativity", cl::desc("The associativity of the first cache level."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory))
static cl::opt< int > SecondCacheLevelDefaultSize("polly-target-2nd-cache-level-default-size", cl::desc("The default size of the second cache level specified in bytes" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(262144), cl::cat(PollyCategory))
static cl::opt< int > PollyPatternMatchingNcQuotient("polly-pattern-matching-nc-quotient", cl::desc("Quotient that is obtained by dividing Nc, the parameter of the" "macro-kernel, by Nr, the parameter of the micro-kernel"), cl::Hidden, cl::init(256), cl::cat(PollyCategory))
static cl::opt< bool > PMBasedTCOpts("polly-tc-opt", cl::desc("Perform optimizations of tensor contractions based " "on pattern matching"), cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory))
static cl::opt< int > FirstCacheLevelSize("polly-target-1st-cache-level-size", cl::desc("The size of the first cache level specified in bytes."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory))
static cl::opt< int > ThroughputVectorFma("polly-target-throughput-vector-fma", cl::desc("A throughput of the processor floating-point arithmetic units " "expressed in the number of vector fused multiply-add " "instructions per clock cycle."), cl::Hidden, cl::init(1), cl::cat(PollyCategory))
static cl::opt< int > SecondCacheLevelSize("polly-target-2nd-cache-level-size", cl::desc("The size of the second level specified in bytes."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory))
static cl::opt< int > FirstCacheLevelDefaultAssociativity("polly-target-1st-cache-level-default-associativity", cl::desc("The default associativity of the first cache level" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(8), cl::cat(PollyCategory))
static cl::opt< int > SecondCacheLevelAssociativity("polly-target-2nd-cache-level-associativity", cl::desc("The associativity of the second cache level."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory))
static cl::opt< int > VectorRegisterBitwidth("polly-target-vector-register-bitwidth", cl::desc("The size in bits of a vector register (if not set, this " "information is taken from LLVM's target information."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory))
static cl::opt< int > LatencyVectorFma("polly-target-latency-vector-fma", cl::desc("The minimal number of cycles between issuing two " "dependent consecutive vector fused multiply-add " "instructions."), cl::Hidden, cl::init(8), cl::cat(PollyCategory))
static cl::opt< int > OptComputeOut("polly-tc-dependences-computeout", cl::desc("Bound the dependence analysis by a maximal amount of " "computational steps (0 means no bound)"), cl::Hidden, cl::init(500000), cl::ZeroOrMore, cl::cat(PollyCategory))
llvm::cl::OptionCategory PollyCategory
#define POLLY_DEBUG(X)
Definition PollyDebug.h:23
__isl_give isl_set * isl_set_from_pw_multi_aff(__isl_take isl_pw_multi_aff *pma)
__isl_give isl_pw_multi_aff * isl_pw_multi_aff_from_set(__isl_take isl_set *set)
Definition isl_aff.c:5608
struct isl_pw_multi_aff isl_pw_multi_aff
Definition aff_type.h:33
isl_ctx * get()
bool is_null() const
void * get_user() const
static isl::id alloc(isl::ctx ctx, const std::string &name, void *user)
isl::map equate(isl::dim type1, int pos1, isl::dim type2, int pos2) const
static isl::map universe(isl::space space)
isl::id get_tuple_id(isl::dim type) const
isl::map reverse() const
isl::set deltas() const
class size range_tuple_dim() const
isl::map set_tuple_id(isl::dim type, isl::id id) const
isl::map fix_si(isl::dim type, unsigned int pos, int value) const
isl::set range() const
isl::set wrap() const
isl::map intersect_range(isl::set set) const
isl::ctx ctx() const
isl::map apply_range(isl::map map2) const
static isl::map from_union_map(isl::union_map umap)
boolean is_equal(const isl::map &map2) const
isl::map apply_domain(isl::map map2) const
isl::map range_product(isl::map map2) const
class size dim(isl::dim type) const
isl::space get_space() const
isl::set domain() const
__isl_keep isl_map * get() const
isl::map move_dims(isl::dim dst_type, unsigned int dst_pos, isl::dim src_type, unsigned int src_pos, unsigned int n) const
isl::map intersect_domain(isl::set set) const
isl::map project_out(isl::dim type, unsigned int first, unsigned int n) const
boolean has_tuple_id(isl::dim type) const
__isl_give isl_map * copy() const &
bool is_null() const
__isl_give isl_pw_multi_aff * copy() const &
isl::multi_pw_aff add(const isl::multi_pw_aff &multi2) const
__isl_give isl_pw_multi_aff * release()
isl::multi_pw_aff sub(const isl::multi_pw_aff &multi2) const
isl::union_set domain() const
isl::union_set get_universe_domain() const
class size get_schedule_depth() const
isl::schedule_node insert_mark(isl::id mark) const
isl::schedule_node child(int pos) const
__isl_give isl_schedule_node * release()
isl::schedule_node insert_partial_schedule(isl::multi_union_pw_aff schedule) const
isl::union_map get_prefix_schedule_relation() const
__isl_give isl_schedule_node * copy() const &
isl::union_map get_prefix_schedule_union_map() const
isl::schedule_node graft_before(isl::schedule_node graft) const
static isl::schedule_node from_extension(isl::union_map extension)
isl::schedule_node parent() const
__isl_keep isl_schedule_node * get() const
isl::set project_out(isl::dim type, unsigned int first, unsigned int n) const
isl::set intersect(isl::set set2) const
isl::set subtract(isl::set set2) const
static isl::set universe(isl::space space)
isl::set fix_si(isl::dim type, unsigned int pos, int value) const
__isl_give isl_set * copy() const &
boolean is_subset(const isl::set &set2) const
class size tuple_dim() const
isl::space get_space() const
boolean is_empty() const
class size dim(isl::dim type) const
isl::set add_dims(isl::dim type, unsigned int n) const
isl::val plain_get_val_if_fixed(isl::dim type, unsigned int pos) const
boolean is_equal(const isl::set &set2) const
isl_size release()
isl::space map_from_domain_and_range(isl::space range) const
isl::space domain() const
class size dim(isl::dim type) const
isl::map extract_map(isl::space space) const
isl::union_map unite(isl::union_map umap2) const
isl::union_set unite(isl::union_set uset2) const
__isl_give isl_val * release()
boolean is_zero() const
boolean is_int() const
boolean is_nan() const
The accumulated dependence information for a SCoP.
isl::union_map getDependences(int Kinds) const
Get the dependences of type Kinds.
isl::map getLatestAccessRelation() const
Return the newest access relation of this access.
Definition ScopInfo.h:784
bool isLatestArrayKind() const
Whether storage memory is either an custom .s2a/.phiops alloca (false) or an existing pointer into an...
Definition ScopInfo.h:945
bool isWrite() const
Is this a write memory access?
Definition ScopInfo.h:764
bool isRead() const
Is this a read memory access?
Definition ScopInfo.h:755
Type * getElementType() const
Return the element type of the accessed array wrt. this access.
Definition ScopInfo.h:859
ScopStmt * getStatement() const
Get the statement that contains this memory access.
Definition ScopInfo.h:1026
void setNewAccessRelation(isl::map NewAccessRelation)
Set the updated access relation read from JSCOP file.
const SCEV * getDimensionSize(unsigned Dim) const
Return the size of dimension dim as SCEV*.
Definition ScopInfo.h:287
static const ScopArrayInfo * getFromId(isl::id Id)
Access the ScopArrayInfo associated with an isl Id.
Definition ScopInfo.cpp:381
isl::id getBasePtrId() const
Return the isl id for the base pointer.
Definition ScopInfo.cpp:339
Scop * getParent()
Definition ScopInfo.h:1523
size_t size() const
Definition ScopInfo.h:1519
isl::id getDomainId() const
Get the id of the iteration domain space.
bool isRegionStmt() const
Return true if this statement represents a whole region.
Definition ScopInfo.h:1328
isl::set getDomain() const
Get the iteration domain of this ScopStmt.
void addScopStmt(BasicBlock *BB, StringRef Name, Loop *SurroundingLoop, std::vector< Instruction * > Instructions)
Create a new SCoP statement for BB.
ScopArrayInfo * createScopArrayInfo(Type *ElementType, const std::string &BaseName, const std::vector< unsigned > &Sizes)
Create an array and return the corresponding ScopArrayInfo object.
Function & getFunction() const
Return the function this SCoP is in.
Definition ScopInfo.h:2087
A()
B()
#define S(TYPE, NAME)
#define isl_set
enum isl_schedule_node_type isl_schedule_node_get_type(__isl_keep isl_schedule_node *node)
#define assert(exp)
boolean manage(isl_bool val)
llvm::SmallVector< MemoryAccess *, 32 > getAccessesInOrder(ScopStmt &Stmt)
Return a vector that contains MemoryAccesses in the order in which they are executed.
Definition Simplify.cpp:765
@ Value
MemoryKind::Value: Models an llvm::Value.
Definition ScopInfo.h:149
isl::schedule_node applyRegisterTiling(isl::schedule_node Node, llvm::ArrayRef< int > TileSizes, int DefaultTileSize)
Tile a schedule node and unroll point loops.
isl::val getConstant(isl::pw_aff PwAff, bool Max, bool Min)
If PwAff maps to a constant, return said constant.
Definition ISLTools.cpp:552
isl::map makeIdentityMap(const isl::set &Set, bool RestrictDomain)
Construct an identity map for the given domain values.
Definition ISLTools.cpp:182
llvm::iota_range< unsigned > rangeIslSize(unsigned Begin, isl::size End)
Check that End is valid and return an iterator from Begin to End.
Definition ISLTools.cpp:597
isl::schedule_node tryOptimizeMatMulPattern(isl::schedule_node Node, const llvm::TargetTransformInfo *TTI, const Dependences *D)
Apply the BLIS matmul optimization pattern if possible.
isl::union_set getIsolateOptions(isl::set IsolateDomain, unsigned OutDimsNum)
Create an isl::union_set, which describes the isolate option based on IsolateDomain.
isl::schedule_node tileNode(isl::schedule_node Node, const char *Identifier, llvm::ArrayRef< int > TileSizes, int DefaultTileSize)
Tile a schedule node.
isl::union_set getDimOptions(isl::ctx Ctx, const char *Option)
Create an isl::union_set, which describes the specified option for the dimension of the current node.
llvm::APInt APIntFromVal(__isl_take isl_val *Val)
Translate isl_val to llvm::APInt.
Definition GICHelper.cpp:51
isl::set getPartialTilePrefixes(isl::set ScheduleRange, int VectorWidth)
Build the desired set of partial tile prefixes.
__isl_export isl_size isl_schedule_node_band_n_member(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_multi_union_pw_aff * isl_schedule_node_band_get_partial_schedule(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_schedule_node * isl_schedule_node_band_split(__isl_take isl_schedule_node *node, int pos)
__isl_give isl_union_map * isl_schedule_node_band_get_partial_schedule_union_map(__isl_keep isl_schedule_node *node)
__isl_give isl_schedule_node * isl_schedule_node_delete(__isl_take isl_schedule_node *node)
isl_schedule_node_type
@ isl_schedule_node_mark
@ isl_schedule_node_filter
@ isl_schedule_node_domain
@ isl_schedule_node_band
@ isl_schedule_node_leaf
__isl_give isl_set * isl_set_fix_val(__isl_take isl_set *set, enum isl_dim_type type, unsigned pos, __isl_take isl_val *v)
Definition isl_map.c:6702
@ isl_dim_set
Definition space_type.h:18
static TupleKindPtr Domain("Domain")
static TupleKindPtr Ctx
static std::vector< std::string > intersect(const std::vector< std::string > &v1, const std::vector< std::string > &v2)
isl_size isl_union_map_n_map(__isl_keep isl_union_map *umap)
isl_size isl_union_set_n_set(__isl_keep isl_union_set *uset)