Polly 23.0.0git
isl_ast_codegen.c
Go to the documentation of this file.
1/*
2 * Copyright 2012-2014 Ecole Normale Superieure
3 * Copyright 2014 INRIA Rocquencourt
4 *
5 * Use of this software is governed by the MIT license
6 *
7 * Written by Sven Verdoolaege,
8 * Ecole Normale Superieure, 45 rue d’Ulm, 75230 Paris, France
9 * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt,
10 * B.P. 105 - 78153 Le Chesnay, France
11 */
12
13#include <limits.h>
14#include <isl/id.h>
15#include <isl/val.h>
16#include <isl/space.h>
17#include <isl/aff.h>
18#include <isl/constraint.h>
19#include <isl/set.h>
20#include <isl/ilp.h>
21#include <isl/union_set.h>
22#include <isl/union_map.h>
23#include <isl/schedule_node.h>
24#include <isl/options.h>
25#include <isl_sort.h>
26#include <isl_tarjan.h>
27#include <isl_ast_private.h>
28#include <isl_ast_build_expr.h>
31
32/* Try and reduce the number of disjuncts in the representation of "set",
33 * without dropping explicit representations of local variables.
34 */
36{
37 isl_ctx *ctx;
38 int save_preserve;
39
40 if (!set)
41 return NULL;
42
43 ctx = isl_set_get_ctx(set);
48 return set;
49}
50
51/* Data used in generate_domain.
52 *
53 * "build" is the input build.
54 * "list" collects the results.
55 */
58
59 isl_ast_graft_list *list;
60};
61
62static __isl_give isl_ast_graft_list *generate_next_level(
63 __isl_take isl_union_map *executed,
65static __isl_give isl_ast_graft_list *generate_code(
67 int internal);
68
69/* Generate an AST for a single domain based on
70 * the (non single valued) inverse schedule "executed".
71 *
72 * We extend the schedule with the iteration domain
73 * and continue generating through a call to generate_code.
74 *
75 * In particular, if executed has the form
76 *
77 * S -> D
78 *
79 * then we continue generating code on
80 *
81 * [S -> D] -> D
82 *
83 * The extended inverse schedule is clearly single valued
84 * ensuring that the nested generate_code will not reach this function,
85 * but will instead create calls to all elements of D that need
86 * to be executed from the current schedule domain.
87 */
89 struct isl_generate_domain_data *data)
90{
92 isl_ast_build *build;
93 isl_ast_graft_list *list;
94
95 build = isl_ast_build_copy(data->build);
96
98 executed = isl_map_domain_product(executed, identity);
99
100 list = generate_code(isl_union_map_from_map(executed), build, 1);
101
102 data->list = isl_ast_graft_list_concat(data->list, list);
103
104 return isl_stat_ok;
105}
106
107/* Call the at_each_domain callback, if requested by the user,
108 * after recording the current inverse schedule in the build.
109 */
111 __isl_keep isl_map *executed, __isl_keep isl_ast_build *build)
112{
113 if (!graft || !build)
114 return isl_ast_graft_free(graft);
115 if (!build->at_each_domain)
116 return graft;
117
118 build = isl_ast_build_copy(build);
119 build = isl_ast_build_set_executed(build,
121 if (!build)
122 return isl_ast_graft_free(graft);
123
124 graft->node = build->at_each_domain(graft->node,
125 build, build->at_each_domain_user);
126 isl_ast_build_free(build);
127
128 if (!graft->node)
129 graft = isl_ast_graft_free(graft);
130
131 return graft;
132}
133
134/* Generate a call expression for the single executed
135 * domain element "executed" and put a guard around it based on its (simplified)
136 * domain.
137 *
138 * At this stage, any pending constraints in the build can no longer
139 * be simplified with respect to any enforced constraints since
140 * the call node does not have any enforced constraints.
141 * Since all pending constraints not covered by any enforced constraints
142 * will be added as a guard to the graft in create_node_scaled,
143 * even in the eliminated case, the pending constraints
144 * can be considered to have been generated by outer constructs.
145 *
146 * If the user has set an at_each_domain callback, it is called
147 * on the constructed call expression node.
148 */
150 struct isl_generate_domain_data *data)
151{
152 isl_bool disjoint;
153 isl_ast_build *build;
154 isl_ast_graft *graft;
155 isl_ast_graft_list *list;
156 isl_set *guard, *pending, *domain;
157
158 guard = isl_map_domain(isl_map_copy(executed));
159 guard = isl_set_compute_divs(guard);
161 disjoint = isl_set_is_disjoint(guard, domain);
163
164 if (disjoint < 0 || disjoint) {
165 isl_set_free(guard);
166 isl_map_free(executed);
167 return isl_stat_non_error_bool(disjoint);
168 }
169
170 build = isl_ast_build_copy(data->build);
171 pending = isl_ast_build_get_pending(build);
172 build = isl_ast_build_replace_pending_by_guard(build, pending);
173
174 guard = isl_set_coalesce_preserve(guard);
175 guard = isl_set_gist(guard, isl_ast_build_get_generated(build));
176 guard = isl_ast_build_specialize(build, guard);
177
178 graft = isl_ast_graft_alloc_domain(isl_map_copy(executed), build);
179 graft = at_each_domain(graft, executed, build);
180 isl_ast_build_free(build);
181 isl_map_free(executed);
182 graft = isl_ast_graft_add_guard(graft, guard, data->build);
183
184 list = isl_ast_graft_list_from_ast_graft(graft);
185 data->list = isl_ast_graft_list_concat(data->list, list);
186
187 return isl_stat_ok;
188}
189
190/* Generate an AST for a single domain based on
191 * the inverse schedule "executed" and add it to data->list.
192 *
193 * If there is more than one domain element associated to the current
194 * schedule "time", then we need to continue the generation process
195 * in generate_non_single_valued.
196 * Note that the inverse schedule being single-valued may depend
197 * on constraints that are only available in the original context
198 * domain specified by the user. We therefore first introduce
199 * some of the constraints of data->build->domain. In particular,
200 * we intersect with a single-disjunct approximation of this set.
201 * We perform this approximation to avoid further splitting up
202 * the executed relation, possibly introducing a disjunctive guard
203 * on the statement.
204 *
205 * Otherwise, call add_domain to generate a call expression (with guard) and
206 * to call the at_each_domain callback, if any.
207 *
208 * Coalesce the inverse schedule before checking for single-valuedness.
209 * Skip this if the inverse schedule is obviously single-valued.
210 */
212{
213 struct isl_generate_domain_data *data = user;
215 int empty, sv;
216
219 executed = isl_map_intersect_domain(executed, domain);
220 empty = isl_map_is_empty(executed);
221 if (empty < 0)
222 goto error;
223 if (empty) {
224 isl_map_free(executed);
225 return isl_stat_ok;
226 }
227
229 if (sv < 0)
230 goto error;
231 if (sv)
232 return add_domain(executed, data);
233
234 executed = isl_map_coalesce(executed);
235 sv = isl_map_is_single_valued(executed);
236 if (sv < 0)
237 goto error;
238 if (!sv)
239 return generate_non_single_valued(executed, data);
240
241 return add_domain(executed, data);
242error:
243 isl_map_free(executed);
244 return isl_stat_error;
245}
246
247/* Call build->create_leaf to a create "leaf" node in the AST,
248 * encapsulate the result in an isl_ast_graft and return the result
249 * as a 1-element list.
250 *
251 * Note that the node returned by the user may be an entire tree.
252 *
253 * Since the node itself cannot enforce any constraints, we turn
254 * all pending constraints into guards and add them to the resulting
255 * graft to ensure that they will be generated.
256 *
257 * Before we pass control to the user, we first clear some information
258 * from the build that is (presumbably) only meaningful
259 * for the current code generation.
260 * This includes the create_leaf callback itself, so we make a copy
261 * of the build first.
262 */
263static __isl_give isl_ast_graft_list *call_create_leaf(
265{
266 isl_set *guard;
267 isl_ast_node *node;
268 isl_ast_graft *graft;
269 isl_ast_build *user_build;
270
272 user_build = isl_ast_build_copy(build);
273 user_build = isl_ast_build_replace_pending_by_guard(user_build,
274 isl_set_copy(guard));
275 user_build = isl_ast_build_set_executed(user_build, executed);
276 user_build = isl_ast_build_clear_local_info(user_build);
277 if (!user_build)
278 node = NULL;
279 else
280 node = build->create_leaf(user_build, build->create_leaf_user);
281 graft = isl_ast_graft_alloc(node, build);
282 graft = isl_ast_graft_add_guard(graft, guard, build);
284 return isl_ast_graft_list_from_ast_graft(graft);
285}
286
287static __isl_give isl_ast_graft_list *build_ast_from_child(
289 __isl_take isl_union_map *executed);
290
291/* Generate an AST after having handled the complete schedule
292 * of this call to the code generator or the complete band
293 * if we are generating an AST from a schedule tree.
294 *
295 * If we are inside a band node, then move on to the child of the band.
296 *
297 * If the user has specified a create_leaf callback, control
298 * is passed to the user in call_create_leaf.
299 *
300 * Otherwise, we generate one or more calls for each individual
301 * domain in generate_domain.
302 */
303static __isl_give isl_ast_graft_list *generate_inner_level(
305{
306 isl_ctx *ctx;
307 struct isl_generate_domain_data data = { build };
308
309 if (!build || !executed)
310 goto error;
311
313 isl_schedule_node *node;
316 return build_ast_from_child(build, node, executed);
317 }
318
319 if (build->create_leaf)
320 return call_create_leaf(executed, build);
321
322 ctx = isl_union_map_get_ctx(executed);
323 data.list = isl_ast_graft_list_alloc(ctx, 0);
324 if (isl_union_map_foreach_map(executed, &generate_domain, &data) < 0)
325 data.list = isl_ast_graft_list_free(data.list);
326
327 if (0)
328error: data.list = NULL;
330 isl_union_map_free(executed);
331 return data.list;
332}
333
334/* Call the before_each_for callback, if requested by the user.
335 */
338{
339 isl_id *id;
340
341 if (!node || !build)
342 return isl_ast_node_free(node);
344 return node;
346 node = isl_ast_node_set_annotation(node, id);
347 return node;
348}
349
350/* Call the after_each_for callback, if requested by the user.
351 */
354{
355 if (!graft || !build)
356 return isl_ast_graft_free(graft);
357 if (!build->after_each_for)
358 return graft;
359 graft->node = build->after_each_for(graft->node, build,
361 if (!graft->node)
362 return isl_ast_graft_free(graft);
363 return graft;
364}
365
366/* Plug in all the know values of the current and outer dimensions
367 * in the domain of "executed". In principle, we only need to plug
368 * in the known value of the current dimension since the values of
369 * outer dimensions have been plugged in already.
370 * However, it turns out to be easier to just plug in all known values.
371 */
378
379/* Check if the constraint "c" is a lower bound on dimension "pos",
380 * an upper bound, or independent of dimension "pos".
381 */
383{
385 return 1;
387 return 2;
388 return 0;
389}
390
391/* Compare the types of the constraints "a" and "b",
392 * resulting in constraints that are independent of "depth"
393 * to be sorted before the lower bounds on "depth", which in
394 * turn are sorted before the upper bounds on "depth".
395 */
398{
399 int *depth = user;
400 int t1 = constraint_type(a, *depth);
401 int t2 = constraint_type(b, *depth);
402
403 return t1 - t2;
404}
405
406/* Extract a lower bound on dimension "pos" from constraint "c".
407 *
408 * If the constraint is of the form
409 *
410 * a x + f(...) >= 0
411 *
412 * then we essentially return
413 *
414 * l = ceil(-f(...)/a)
415 *
416 * However, if the current dimension is strided, then we need to make
417 * sure that the lower bound we construct is of the form
418 *
419 * f + s a
420 *
421 * with f the offset and s the stride.
422 * We therefore compute
423 *
424 * f + s * ceil((l - f)/s)
425 */
452
453/* Return the exact lower bound (or upper bound if "upper" is set)
454 * of "domain" as a piecewise affine expression.
455 *
456 * If we are computing a lower bound (of a strided dimension), then
457 * we need to make sure it is of the form
458 *
459 * f + s a
460 *
461 * where f is the offset and s is the stride.
462 * We therefore need to include the stride constraint before computing
463 * the minimum.
464 */
466 __isl_keep isl_ast_build *build, int upper)
467{
468 isl_set *stride;
469 isl_map *it_map;
470 isl_pw_aff *pa;
472
474 if (!upper) {
477 }
479 if (upper)
481 else
487
488 return pa;
489}
490
491/* Callback for sorting the isl_pw_aff_list passed to reduce_list and
492 * remove_redundant_lower_bounds.
493 */
495 void *user)
496{
497 return isl_pw_aff_plain_cmp(a, b);
498}
499
500/* Given a list of lower bounds "list", remove those that are redundant
501 * with respect to the other bounds in "list" and the domain of "build".
502 *
503 * We first sort the bounds in the same way as they would be sorted
504 * by set_for_node_expressions so that we can try and remove the last
505 * bounds first.
506 *
507 * For a lower bound to be effective, there needs to be at least
508 * one domain element for which it is larger than all other lower bounds.
509 * For each lower bound we therefore intersect the domain with
510 * the conditions that it is larger than all other bounds and
511 * check whether the result is empty. If so, the bound can be removed.
512 */
514 __isl_take isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
515{
516 int i, j;
517 isl_size n;
519
520 list = isl_pw_aff_list_sort(list, &reduce_list_cmp, NULL);
521
522 n = isl_pw_aff_list_n_pw_aff(list);
523 if (n < 0)
524 return isl_pw_aff_list_free(list);
525 if (n <= 1)
526 return list;
527
529
530 for (i = n - 1; i >= 0; --i) {
531 isl_pw_aff *pa_i;
532 isl_set *domain_i;
533 int empty;
534
535 domain_i = isl_set_copy(domain);
536 pa_i = isl_pw_aff_list_get_pw_aff(list, i);
537
538 for (j = 0; j < n; ++j) {
539 isl_pw_aff *pa_j;
540 isl_set *better;
541
542 if (j == i)
543 continue;
544
545 pa_j = isl_pw_aff_list_get_pw_aff(list, j);
546 better = isl_pw_aff_gt_set(isl_pw_aff_copy(pa_i), pa_j);
547 domain_i = isl_set_intersect(domain_i, better);
548 }
549
550 empty = isl_set_is_empty(domain_i);
551
552 isl_set_free(domain_i);
553 isl_pw_aff_free(pa_i);
554
555 if (empty < 0)
556 goto error;
557 if (!empty)
558 continue;
559 list = isl_pw_aff_list_drop(list, i, 1);
560 n--;
561 }
562
564
565 return list;
566error:
568 return isl_pw_aff_list_free(list);
569}
570
571/* Extract a lower bound on dimension "pos" from each constraint
572 * in "constraints" and return the list of lower bounds.
573 * If "constraints" has zero elements, then we extract a lower bound
574 * from "domain" instead.
575 *
576 * If the current dimension is strided, then the lower bound
577 * is adjusted by lower_bound to match the stride information.
578 * This modification may make one or more lower bounds redundant
579 * with respect to the other lower bounds. We therefore check
580 * for this condition and remove the redundant lower bounds.
581 */
582static __isl_give isl_pw_aff_list *lower_bounds(
583 __isl_keep isl_constraint_list *constraints, int pos,
585{
586 isl_ctx *ctx;
587 isl_pw_aff_list *list;
588 int i;
589 isl_size n;
590
591 if (!build)
592 return NULL;
593
594 n = isl_constraint_list_n_constraint(constraints);
595 if (n < 0)
596 return NULL;
597 if (n == 0) {
598 isl_pw_aff *pa;
599 pa = exact_bound(domain, build, 0);
600 return isl_pw_aff_list_from_pw_aff(pa);
601 }
602
604 list = isl_pw_aff_list_alloc(ctx,n);
605
606 for (i = 0; i < n; ++i) {
607 isl_aff *aff;
609
610 c = isl_constraint_list_get_constraint(constraints, i);
611 aff = lower_bound(c, pos, build);
613 list = isl_pw_aff_list_add(list, isl_pw_aff_from_aff(aff));
614 }
615
618
619 return list;
620}
621
622/* Extract an upper bound on dimension "pos" from each constraint
623 * in "constraints" and return the list of upper bounds.
624 * If "constraints" has zero elements, then we extract an upper bound
625 * from "domain" instead.
626 */
627static __isl_give isl_pw_aff_list *upper_bounds(
628 __isl_keep isl_constraint_list *constraints, int pos,
630{
631 isl_ctx *ctx;
632 isl_pw_aff_list *list;
633 int i;
634 isl_size n;
635
636 n = isl_constraint_list_n_constraint(constraints);
637 if (n < 0)
638 return NULL;
639 if (n == 0) {
640 isl_pw_aff *pa;
641 pa = exact_bound(domain, build, 1);
642 return isl_pw_aff_list_from_pw_aff(pa);
643 }
644
646 list = isl_pw_aff_list_alloc(ctx,n);
647
648 for (i = 0; i < n; ++i) {
649 isl_aff *aff;
651
652 c = isl_constraint_list_get_constraint(constraints, i);
656 list = isl_pw_aff_list_add(list, isl_pw_aff_from_aff(aff));
657 }
658
659 return list;
660}
661
662/* Return an isl_ast_expr that performs the reduction of type "type"
663 * on AST expressions corresponding to the elements in "list".
664 *
665 * The list is assumed to contain at least one element.
666 * If the list contains exactly one element, then the returned isl_ast_expr
667 * simply computes that affine expression.
668 * If the list contains more than one element, then we sort it
669 * using a fairly arbitrary but hopefully reasonably stable order.
670 */
672 __isl_keep isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
673{
674 int i;
675 isl_size n;
676 isl_ctx *ctx;
677 isl_ast_expr *expr;
678
679 n = isl_pw_aff_list_n_pw_aff(list);
680 if (n < 0)
681 return NULL;
682
683 if (n == 1)
685 isl_pw_aff_list_get_pw_aff(list, 0));
686
687 ctx = isl_pw_aff_list_get_ctx(list);
688 expr = isl_ast_expr_alloc_op(ctx, type, n);
689
690 list = isl_pw_aff_list_copy(list);
691 list = isl_pw_aff_list_sort(list, &reduce_list_cmp, NULL);
692 if (!list)
693 return isl_ast_expr_free(expr);
694
695 for (i = 0; i < n; ++i) {
696 isl_ast_expr *expr_i;
697
699 isl_pw_aff_list_get_pw_aff(list, i));
700 expr = isl_ast_expr_op_add_arg(expr, expr_i);
701 }
702
703 isl_pw_aff_list_free(list);
704 return expr;
705}
706
707/* Add guards implied by the "generated constraints",
708 * but not (necessarily) enforced by the generated AST to "guard".
709 * In particular, if there is any stride constraints,
710 * then add the guard implied by those constraints.
711 * If we have generated a degenerate loop, then add the guard
712 * implied by "bounds" on the outer dimensions, i.e., the guard
713 * that ensures that the single value actually exists.
714 * Since there may also be guards implied by a combination
715 * of these constraints, we first combine them before
716 * deriving the implied constraints.
717 */
719 int degenerate, __isl_keep isl_basic_set *bounds,
721{
722 isl_size depth;
723 isl_bool has_stride;
724 isl_space *space;
725 isl_set *dom, *set;
726
728 has_stride = isl_ast_build_has_stride(build);
729 if (depth < 0 || has_stride < 0)
730 return isl_set_free(guard);
731 if (!has_stride && !degenerate)
732 return guard;
733
734 space = isl_basic_set_get_space(bounds);
735 dom = isl_set_universe(space);
736
737 if (degenerate) {
738 bounds = isl_basic_set_copy(bounds);
740 bounds, isl_dim_set, depth, 1);
741 set = isl_set_from_basic_set(bounds);
742 dom = isl_set_intersect(dom, set);
743 }
744
745 if (has_stride) {
747 dom = isl_set_intersect(dom, set);
748 }
749
750 dom = isl_set_eliminate(dom, isl_dim_set, depth, 1);
752 guard = isl_set_intersect(guard, dom);
753
754 return guard;
755}
756
757/* Update "graft" based on "sub_build" for the degenerate case.
758 *
759 * "build" is the build in which graft->node was created
760 * "sub_build" contains information about the current level itself,
761 * including the single value attained.
762 *
763 * We set the initialization part of the for loop to the single
764 * value attained by the current dimension.
765 * The increment and condition are not strictly needed as they are known
766 * to be "1" and "iterator <= value" respectively.
767 */
770 __isl_keep isl_ast_build *sub_build)
771{
772 isl_pw_aff *value;
774
775 if (!graft || !sub_build)
776 return isl_ast_graft_free(graft);
777
778 value = isl_pw_aff_copy(sub_build->value);
779
781 graft->node = isl_ast_node_for_set_init(graft->node, init);
782 if (!graft->node)
783 return isl_ast_graft_free(graft);
784
785 return graft;
786}
787
788/* Return the intersection of constraints in "list" as a set.
789 */
791 __isl_keep isl_constraint_list *list)
792{
793 int i;
794 isl_size n;
795 isl_basic_set *bset;
796
797 n = isl_constraint_list_n_constraint(list);
798 if (n < 0)
799 return NULL;
800 if (n < 1)
801 isl_die(isl_constraint_list_get_ctx(list), isl_error_internal,
802 "expecting at least one constraint", return NULL);
803
805 isl_constraint_list_get_constraint(list, 0));
806 for (i = 1; i < n; ++i) {
807 isl_basic_set *bset_i;
808
810 isl_constraint_list_get_constraint(list, i));
811 bset = isl_basic_set_intersect(bset, bset_i);
812 }
813
814 return isl_set_from_basic_set(bset);
815}
816
817/* Compute the constraints on the outer dimensions enforced by
818 * graft->node and add those constraints to graft->enforced,
819 * in case the upper bound is expressed as a set "upper".
820 *
821 * In particular, if l(...) is a lower bound in "lower", and
822 *
823 * -a i + f(...) >= 0 or a i <= f(...)
824 *
825 * is an upper bound ocnstraint on the current dimension i,
826 * then the for loop enforces the constraint
827 *
828 * -a l(...) + f(...) >= 0 or a l(...) <= f(...)
829 *
830 * We therefore simply take each lower bound in turn, plug it into
831 * the upper bounds and compute the intersection over all lower bounds.
832 *
833 * If a lower bound is a rational expression, then
834 * isl_basic_set_preimage_multi_aff will force this rational
835 * expression to have only integer values. However, the loop
836 * itself does not enforce this integrality constraint. We therefore
837 * use the ceil of the lower bounds instead of the lower bounds themselves.
838 * Other constraints will make sure that the for loop is only executed
839 * when each of the lower bounds attains an integral value.
840 * In particular, potentially rational values only occur in
841 * lower_bound if the offset is a (seemingly) rational expression,
842 * but then outer conditions will make sure that this rational expression
843 * only attains integer values.
844 */
847 __isl_keep isl_pw_aff_list *lower, int pos, __isl_keep isl_set *upper)
848{
849 isl_space *space;
850 isl_basic_set *enforced;
852 int i;
853 isl_size n;
854
855 n = isl_pw_aff_list_n_pw_aff(lower);
856 if (!graft || n < 0)
857 return isl_ast_graft_free(graft);
858
859 space = isl_set_get_space(upper);
860 enforced = isl_basic_set_universe(isl_space_copy(space));
861
862 space = isl_space_map_from_set(space);
864
865 for (i = 0; i < n; ++i) {
866 isl_pw_aff *pa;
867 isl_set *enforced_i;
869 isl_pw_multi_aff *pma_i;
870
871 pa = isl_pw_aff_list_get_pw_aff(lower, i);
873 pma_i = isl_pw_multi_aff_copy(pma);
874 pma_i = isl_pw_multi_aff_set_pw_aff(pma_i, pos, pa);
875 enforced_i = isl_set_copy(upper);
876 enforced_i = isl_set_preimage_pw_multi_aff(enforced_i, pma_i);
877 hull = isl_set_simple_hull(enforced_i);
878 enforced = isl_basic_set_intersect(enforced, hull);
879 }
880
882
883 graft = isl_ast_graft_enforce(graft, enforced);
884
885 return graft;
886}
887
888/* Compute the constraints on the outer dimensions enforced by
889 * graft->node and add those constraints to graft->enforced,
890 * in case the upper bound is expressed as
891 * a list of affine expressions "upper".
892 *
893 * The enforced condition is that each lower bound expression is less
894 * than or equal to each upper bound expression.
895 */
898 __isl_keep isl_pw_aff_list *lower, __isl_keep isl_pw_aff_list *upper)
899{
900 isl_set *cond;
901 isl_basic_set *enforced;
902
903 lower = isl_pw_aff_list_copy(lower);
904 upper = isl_pw_aff_list_copy(upper);
905 cond = isl_pw_aff_list_le_set(lower, upper);
906 enforced = isl_set_simple_hull(cond);
907 graft = isl_ast_graft_enforce(graft, enforced);
908
909 return graft;
910}
911
912/* Does "aff" have a negative constant term?
913 */
915 __isl_keep isl_aff *aff, void *user)
916{
917 isl_bool is_neg;
918 isl_val *v;
919
921 is_neg = isl_val_is_neg(v);
922 isl_val_free(v);
923
924 return is_neg;
925}
926
927/* Does "pa" have a negative constant term over its entire domain?
928 */
934
935/* Does each element in "list" have a negative constant term?
936 */
937static int list_constant_is_negative(__isl_keep isl_pw_aff_list *list)
938{
939 return isl_pw_aff_list_every(list, &pw_aff_constant_is_negative, NULL);
940}
941
942/* Add 1 to each of the elements in "list", where each of these elements
943 * is defined over the internal schedule space of "build".
944 */
945static __isl_give isl_pw_aff_list *list_add_one(
946 __isl_take isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
947{
948 int i;
949 isl_size n;
950 isl_space *space;
951 isl_aff *aff;
952 isl_pw_aff *one;
953
954 n = isl_pw_aff_list_n_pw_aff(list);
955 if (n < 0)
956 return isl_pw_aff_list_free(list);
957
958 space = isl_ast_build_get_space(build, 1);
962
963 for (i = 0; i < n; ++i) {
964 isl_pw_aff *pa;
965 pa = isl_pw_aff_list_get_pw_aff(list, i);
967 list = isl_pw_aff_list_set_pw_aff(list, i, pa);
968 }
969
970 isl_pw_aff_free(one);
971
972 return list;
973}
974
975/* Set the condition part of the for node graft->node in case
976 * the upper bound is represented as a list of piecewise affine expressions.
977 *
978 * In particular, set the condition to
979 *
980 * iterator <= min(list of upper bounds)
981 *
982 * If each of the upper bounds has a negative constant term, then
983 * set the condition to
984 *
985 * iterator < min(list of (upper bound + 1)s)
986 *
987 */
989 __isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *list,
991{
992 int neg;
993 isl_ast_expr *bound, *iterator, *cond;
995
996 if (!graft || !list)
997 return isl_ast_graft_free(graft);
998
1000 if (neg < 0)
1001 return isl_ast_graft_free(graft);
1002 list = isl_pw_aff_list_copy(list);
1003 if (neg) {
1006 }
1007
1009 iterator = isl_ast_expr_copy(graft->node->u.f.iterator);
1010 cond = isl_ast_expr_alloc_binary(type, iterator, bound);
1011 graft->node = isl_ast_node_for_set_cond(graft->node, cond);
1012
1013 isl_pw_aff_list_free(list);
1014 if (!graft->node)
1015 return isl_ast_graft_free(graft);
1016 return graft;
1017}
1018
1019/* Set the condition part of the for node graft->node in case
1020 * the upper bound is represented as a set.
1021 */
1025{
1026 isl_ast_expr *cond;
1027
1028 if (!graft)
1029 return NULL;
1030
1032 graft->node = isl_ast_node_for_set_cond(graft->node, cond);
1033 if (!graft->node)
1034 return isl_ast_graft_free(graft);
1035 return graft;
1036}
1037
1038/* Construct an isl_ast_expr for the increment (i.e., stride) of
1039 * the current dimension.
1040 */
1042{
1043 isl_size depth;
1044 isl_val *v;
1045 isl_ctx *ctx;
1046
1048 if (depth < 0)
1049 return NULL;
1051
1053 return isl_ast_expr_alloc_int_si(ctx, 1);
1054
1055 v = isl_ast_build_get_stride(build, depth);
1056 return isl_ast_expr_from_val(v);
1057}
1058
1059/* Should we express the loop condition as
1060 *
1061 * iterator <= min(list of upper bounds)
1062 *
1063 * or as a conjunction of constraints?
1064 *
1065 * The first is constructed from a list of upper bounds.
1066 * The second is constructed from a set.
1067 *
1068 * If there are no upper bounds in "constraints", then this could mean
1069 * that "domain" simply doesn't have an upper bound or that we didn't
1070 * pick any upper bound. In the first case, we want to generate the
1071 * loop condition as a(n empty) conjunction of constraints
1072 * In the second case, we will compute
1073 * a single upper bound from "domain" and so we use the list form.
1074 *
1075 * If there are upper bounds in "constraints",
1076 * then we use the list form iff the atomic_upper_bound option is set.
1077 */
1078static int use_upper_bound_list(isl_ctx *ctx, int n_upper,
1079 __isl_keep isl_set *domain, int depth)
1080{
1081 if (n_upper > 0)
1083 else
1085}
1086
1087/* Fill in the expressions of the for node in graft->node.
1088 *
1089 * In particular,
1090 * - set the initialization part of the loop to the maximum of the lower bounds
1091 * - extract the increment from the stride of the current dimension
1092 * - construct the for condition either based on a list of upper bounds
1093 * or on a set of upper bound constraints.
1094 */
1096 __isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *lower,
1097 int use_list, __isl_keep isl_pw_aff_list *upper_list,
1099{
1101
1102 if (!graft)
1103 return NULL;
1104
1106 graft->node = isl_ast_node_for_set_init(graft->node, init);
1107 graft->node = isl_ast_node_for_set_inc(graft->node, for_inc(build));
1108
1109 if (!graft->node)
1110 graft = isl_ast_graft_free(graft);
1111
1112 if (use_list)
1113 graft = set_for_cond_from_list(graft, upper_list, build);
1114 else
1115 graft = set_for_cond_from_set(graft, upper_set, build);
1116
1117 return graft;
1118}
1119
1120/* Update "graft" based on "bounds" and "domain" for the generic,
1121 * non-degenerate, case.
1122 *
1123 * "c_lower" and "c_upper" contain the lower and upper bounds
1124 * that the loop node should express.
1125 * "domain" is the subset of the intersection of the constraints
1126 * for which some code is executed.
1127 *
1128 * There may be zero lower bounds or zero upper bounds in "constraints"
1129 * in case the list of constraints was created
1130 * based on the atomic option or based on separation with explicit bounds.
1131 * In that case, we use "domain" to derive lower and/or upper bounds.
1132 *
1133 * We first compute a list of one or more lower bounds.
1134 *
1135 * Then we decide if we want to express the condition as
1136 *
1137 * iterator <= min(list of upper bounds)
1138 *
1139 * or as a conjunction of constraints.
1140 *
1141 * The set of enforced constraints is then computed either based on
1142 * a list of upper bounds or on a set of upper bound constraints.
1143 * We do not compute any enforced constraints if we were forced
1144 * to compute a lower or upper bound using exact_bound. The domains
1145 * of the resulting expressions may imply some bounds on outer dimensions
1146 * that we do not want to appear in the enforced constraints since
1147 * they are not actually enforced by the corresponding code.
1148 *
1149 * Finally, we fill in the expressions of the for node.
1150 */
1153 __isl_take isl_constraint_list *c_lower,
1154 __isl_take isl_constraint_list *c_upper,
1156{
1157 isl_size depth;
1158 isl_ctx *ctx;
1159 isl_pw_aff_list *lower;
1160 int use_list;
1161 isl_set *upper_set = NULL;
1162 isl_pw_aff_list *upper_list = NULL;
1163 isl_size n_lower, n_upper;
1164
1166 if (!graft || !c_lower || !c_upper || depth < 0)
1167 goto error;
1168
1169 ctx = isl_ast_graft_get_ctx(graft);
1170
1171 n_lower = isl_constraint_list_n_constraint(c_lower);
1172 n_upper = isl_constraint_list_n_constraint(c_upper);
1173 if (n_lower < 0 || n_upper < 0)
1174 goto error;
1175
1176 use_list = use_upper_bound_list(ctx, n_upper, domain, depth);
1177
1178 lower = lower_bounds(c_lower, depth, domain, build);
1179
1180 if (use_list)
1181 upper_list = upper_bounds(c_upper, depth, domain, build);
1182 else if (n_upper > 0)
1183 upper_set = intersect_constraints(c_upper);
1184 else
1186
1187 if (n_lower == 0 || n_upper == 0)
1188 ;
1189 else if (use_list)
1190 graft = set_enforced_from_list(graft, lower, upper_list);
1191 else
1192 graft = set_enforced_from_set(graft, lower, depth, upper_set);
1193
1194 graft = set_for_node_expressions(graft, lower, use_list, upper_list,
1195 upper_set, build);
1196
1197 isl_pw_aff_list_free(lower);
1198 isl_pw_aff_list_free(upper_list);
1199 isl_set_free(upper_set);
1200 isl_constraint_list_free(c_lower);
1201 isl_constraint_list_free(c_upper);
1202
1203 return graft;
1204error:
1205 isl_constraint_list_free(c_lower);
1206 isl_constraint_list_free(c_upper);
1207 return isl_ast_graft_free(graft);
1208}
1209
1210/* Internal data structure used inside count_constraints to keep
1211 * track of the number of constraints that are independent of dimension "pos",
1212 * the lower bounds in "pos" and the upper bounds in "pos".
1213 */
1221
1222/* Increment data->n_indep, data->lower or data->upper depending
1223 * on whether "c" is independent of dimensions data->pos,
1224 * a lower bound or an upper bound.
1225 */
1227{
1228 struct isl_ast_count_constraints_data *data = user;
1229
1231 data->n_lower++;
1232 else if (isl_constraint_is_upper_bound(c, isl_dim_set, data->pos))
1233 data->n_upper++;
1234 else
1235 data->n_indep++;
1236
1238
1239 return isl_stat_ok;
1240}
1241
1242/* Update "graft" based on "bounds" and "domain" for the generic,
1243 * non-degenerate, case.
1244 *
1245 * "list" respresent the list of bounds that need to be encoded by
1246 * the for loop. Only the constraints that involve the iterator
1247 * are relevant here. The other constraints are taken care of by
1248 * the caller and are included in the generated constraints of "build".
1249 * "domain" is the subset of the intersection of the constraints
1250 * for which some code is executed.
1251 * "build" is the build in which graft->node was created.
1252 *
1253 * We separate lower bounds, upper bounds and constraints that
1254 * are independent of the loop iterator.
1255 *
1256 * The actual for loop bounds are generated in refine_generic_bounds.
1257 */
1259 __isl_take isl_ast_graft *graft, __isl_take isl_constraint_list *list,
1261{
1263 isl_size depth;
1264 isl_constraint_list *lower;
1265 isl_constraint_list *upper;
1266
1267 depth = isl_ast_build_get_depth(build);
1268 if (depth < 0)
1269 list = isl_constraint_list_free(list);
1270 if (!list)
1271 return isl_ast_graft_free(graft);
1272
1273 data.pos = depth;
1274
1275 list = isl_constraint_list_sort(list, &cmp_constraint, &data.pos);
1276 if (!list)
1277 return isl_ast_graft_free(graft);
1278
1279 data.n_indep = data.n_lower = data.n_upper = 0;
1280 if (isl_constraint_list_foreach(list, &count_constraints, &data) < 0) {
1281 isl_constraint_list_free(list);
1282 return isl_ast_graft_free(graft);
1283 }
1284
1285 lower = isl_constraint_list_drop(list, 0, data.n_indep);
1286 upper = isl_constraint_list_copy(lower);
1287 lower = isl_constraint_list_drop(lower, data.n_lower, data.n_upper);
1288 upper = isl_constraint_list_drop(upper, 0, data.n_lower);
1289
1290 return refine_generic_bounds(graft, lower, upper, domain, build);
1291}
1292
1293/* Update "graft" based on "bounds" and "domain" for the generic,
1294 * non-degenerate, case.
1295 *
1296 * "bounds" respresent the bounds that need to be encoded by
1297 * the for loop (or a guard around the for loop).
1298 * "domain" is the subset of "bounds" for which some code is executed.
1299 * "build" is the build in which graft->node was created.
1300 *
1301 * We break up "bounds" into a list of constraints and continue with
1302 * refine_generic_split.
1303 */
1308{
1309 isl_constraint_list *list;
1310
1311 if (!build || !graft)
1312 return isl_ast_graft_free(graft);
1313
1314 list = isl_basic_set_get_constraint_list(bounds);
1315
1316 graft = refine_generic_split(graft, list, domain, build);
1317
1318 return graft;
1319}
1320
1321/* Create a for node for the current level.
1322 *
1323 * Mark the for node degenerate if "degenerate" is set.
1324 */
1326 int degenerate)
1327{
1328 isl_size depth;
1329 isl_id *id;
1330 isl_ast_node *node;
1331
1332 depth = isl_ast_build_get_depth(build);
1333 if (depth < 0)
1334 return NULL;
1335
1336 id = isl_ast_build_get_iterator_id(build, depth);
1337 node = isl_ast_node_alloc_for(id);
1338 if (degenerate)
1340
1341 return node;
1342}
1343
1344/* If the ast_build_exploit_nested_bounds option is set, then return
1345 * the constraints enforced by all elements in "list".
1346 * Otherwise, return the universe.
1347 */
1349 __isl_keep isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
1350{
1351 isl_ctx *ctx;
1352 isl_space *space;
1353
1354 if (!list)
1355 return NULL;
1356
1357 ctx = isl_ast_graft_list_get_ctx(list);
1360
1361 space = isl_ast_build_get_space(build, 1);
1362 return isl_basic_set_universe(space);
1363}
1364
1365/* Return the pending constraints of "build" that are not already taken
1366 * care of (by a combination of "enforced" and the generated constraints
1367 * of "build").
1368 */
1370 __isl_keep isl_basic_set *enforced)
1371{
1372 isl_set *guard, *context;
1373
1374 guard = isl_ast_build_get_pending(build);
1378 return isl_set_gist(guard, context);
1379}
1380
1381/* Create an AST node for the current dimension based on
1382 * the schedule domain "bounds" and return the node encapsulated
1383 * in an isl_ast_graft.
1384 *
1385 * "executed" is the current inverse schedule, taking into account
1386 * the bounds in "bounds"
1387 * "domain" is the domain of "executed", with inner dimensions projected out.
1388 * It may be a strict subset of "bounds" in case "bounds" was created
1389 * based on the atomic option or based on separation with explicit bounds.
1390 *
1391 * "domain" may satisfy additional equalities that result
1392 * from intersecting "executed" with "bounds" in add_node.
1393 * It may also satisfy some global constraints that were dropped out because
1394 * we performed separation with explicit bounds.
1395 * The very first step is then to copy these constraints to "bounds".
1396 *
1397 * Since we may be calling before_each_for and after_each_for
1398 * callbacks, we record the current inverse schedule in the build.
1399 *
1400 * We consider three builds,
1401 * "build" is the one in which the current level is created,
1402 * "body_build" is the build in which the next level is created,
1403 * "sub_build" is essentially the same as "body_build", except that
1404 * the depth has not been increased yet.
1405 *
1406 * "build" already contains information (in strides and offsets)
1407 * about the strides at the current level, but this information is not
1408 * reflected in the build->domain.
1409 * We first add this information and the "bounds" to the sub_build->domain.
1410 * isl_ast_build_set_loop_bounds adds the stride information and
1411 * checks whether the current dimension attains
1412 * only a single value and whether this single value can be represented using
1413 * a single affine expression.
1414 * In the first case, the current level is considered "degenerate".
1415 * In the second, sub-case, the current level is considered "eliminated".
1416 * Eliminated levels don't need to be reflected in the AST since we can
1417 * simply plug in the affine expression. For degenerate, but non-eliminated,
1418 * levels, we do introduce a for node, but mark is as degenerate so that
1419 * it can be printed as an assignment of the single value to the loop
1420 * "iterator".
1421 *
1422 * If the current level is eliminated, we explicitly plug in the value
1423 * for the current level found by isl_ast_build_set_loop_bounds in the
1424 * inverse schedule. This ensures that if we are working on a slice
1425 * of the domain based on information available in the inverse schedule
1426 * and the build domain, that then this information is also reflected
1427 * in the inverse schedule. This operation also eliminates the current
1428 * dimension from the inverse schedule making sure no inner dimensions depend
1429 * on the current dimension. Otherwise, we create a for node, marking
1430 * it degenerate if appropriate. The initial for node is still incomplete
1431 * and will be completed in either refine_degenerate or refine_generic.
1432 *
1433 * We then generate a sequence of grafts for the next level,
1434 * create a surrounding graft for the current level and insert
1435 * the for node we created (if the current level is not eliminated).
1436 * Before creating a graft for the current level, we first extract
1437 * hoistable constraints from the child guards and combine them
1438 * with the pending constraints in the build. These constraints
1439 * are used to simplify the child guards and then added to the guard
1440 * of the current graft to ensure that they will be generated.
1441 * If the hoisted guard is a disjunction, then we use it directly
1442 * to gist the guards on the children before intersect it with the
1443 * pending constraints. We do so because this disjunction is typically
1444 * identical to the guards on the children such that these guards
1445 * can be effectively removed completely. After the intersection,
1446 * the gist operation would have a harder time figuring this out.
1447 *
1448 * Finally, we set the bounds of the for loop in either
1449 * refine_degenerate or refine_generic.
1450 * We do so in a context where the pending constraints of the build
1451 * have been replaced by the guard of the current graft.
1452 */
1454 __isl_take isl_union_map *executed,
1457{
1458 isl_size depth;
1459 int degenerate;
1460 isl_bool eliminated;
1461 isl_size n;
1463 isl_basic_set *enforced;
1464 isl_set *guard, *hoisted;
1465 isl_ast_node *node = NULL;
1466 isl_ast_graft *graft;
1467 isl_ast_graft_list *children;
1468 isl_ast_build *sub_build;
1469 isl_ast_build *body_build;
1470
1474 bounds = isl_basic_set_intersect(bounds, hull);
1475 build = isl_ast_build_set_executed(build, isl_union_map_copy(executed));
1476
1477 depth = isl_ast_build_get_depth(build);
1478 if (depth < 0)
1479 build = isl_ast_build_free(build);
1480 sub_build = isl_ast_build_copy(build);
1481 bounds = isl_basic_set_remove_redundancies(bounds);
1482 bounds = isl_ast_build_specialize_basic_set(sub_build, bounds);
1483 sub_build = isl_ast_build_set_loop_bounds(sub_build,
1484 isl_basic_set_copy(bounds));
1485 degenerate = isl_ast_build_has_value(sub_build);
1486 eliminated = isl_ast_build_has_affine_value(sub_build, depth);
1487 if (degenerate < 0 || eliminated < 0)
1488 executed = isl_union_map_free(executed);
1489 if (!degenerate)
1490 bounds = isl_ast_build_compute_gist_basic_set(build, bounds);
1491 sub_build = isl_ast_build_set_pending_generated(sub_build,
1492 isl_basic_set_copy(bounds));
1493 if (eliminated)
1494 executed = plug_in_values(executed, sub_build);
1495 else
1496 node = create_for(build, degenerate);
1497
1498 body_build = isl_ast_build_copy(sub_build);
1499 body_build = isl_ast_build_increase_depth(body_build);
1500 if (!eliminated)
1501 node = before_each_for(node, body_build);
1502 children = generate_next_level(executed,
1503 isl_ast_build_copy(body_build));
1504
1505 enforced = extract_shared_enforced(children, build);
1506 guard = extract_pending(sub_build, enforced);
1507 hoisted = isl_ast_graft_list_extract_hoistable_guard(children, build);
1508 n = isl_set_n_basic_set(hoisted);
1509 if (n < 0)
1510 children = isl_ast_graft_list_free(children);
1511 if (n > 1)
1512 children = isl_ast_graft_list_gist_guards(children,
1513 isl_set_copy(hoisted));
1514 guard = isl_set_intersect(guard, hoisted);
1515 if (!eliminated)
1516 guard = add_implied_guards(guard, degenerate, bounds, build);
1517
1518 graft = isl_ast_graft_alloc_from_children(children,
1519 isl_set_copy(guard), enforced, build, sub_build);
1520
1521 if (!eliminated) {
1522 isl_ast_build *for_build;
1523
1524 graft = isl_ast_graft_insert_for(graft, node);
1525 for_build = isl_ast_build_copy(build);
1526 for_build = isl_ast_build_replace_pending_by_guard(for_build,
1527 isl_set_copy(guard));
1528 if (degenerate)
1529 graft = refine_degenerate(graft, for_build, sub_build);
1530 else
1531 graft = refine_generic(graft, bounds,
1532 domain, for_build);
1533 isl_ast_build_free(for_build);
1534 }
1535 isl_set_free(guard);
1536 if (!eliminated)
1537 graft = after_each_for(graft, body_build);
1538
1539 isl_ast_build_free(body_build);
1540 isl_ast_build_free(sub_build);
1541 isl_ast_build_free(build);
1542 isl_basic_set_free(bounds);
1544
1545 return graft;
1546}
1547
1548/* Internal data structure for checking if all constraints involving
1549 * the input dimension "depth" are such that the other coefficients
1550 * are multiples of "m", reducing "m" if they are not.
1551 * If "m" is reduced all the way down to "1", then the check has failed
1552 * and we break out of the iteration.
1553 */
1558
1559/* If constraint "c" involves the input dimension data->depth,
1560 * then make sure that all the other coefficients are multiples of data->m,
1561 * reducing data->m if needed.
1562 * Break out of the iteration if data->m has become equal to "1".
1563 */
1565 void *user)
1566{
1567 struct isl_check_scaled_data *data = user;
1568 int i, j;
1569 isl_size n;
1571 isl_dim_div };
1572
1573 if (!isl_constraint_involves_dims(c, isl_dim_in, data->depth, 1)) {
1575 return isl_stat_ok;
1576 }
1577
1578 for (i = 0; i < 4; ++i) {
1579 n = isl_constraint_dim(c, t[i]);
1580 if (n < 0)
1581 break;
1582 for (j = 0; j < n; ++j) {
1583 isl_val *d;
1584
1585 if (t[i] == isl_dim_in && j == data->depth)
1586 continue;
1587 if (!isl_constraint_involves_dims(c, t[i], j, 1))
1588 continue;
1590 data->m = isl_val_gcd(data->m, d);
1591 if (isl_val_is_one(data->m))
1592 break;
1593 }
1594 if (j < n)
1595 break;
1596 }
1597
1599
1600 return i < 4 ? isl_stat_error : isl_stat_ok;
1601}
1602
1603/* For each constraint of "bmap" that involves the input dimension data->depth,
1604 * make sure that all the other coefficients are multiples of data->m,
1605 * reducing data->m if needed.
1606 * Break out of the iteration if data->m has become equal to "1".
1607 */
1609 void *user)
1610{
1611 isl_stat r;
1612
1615 isl_basic_map_free(bmap);
1616
1617 return r;
1618}
1619
1620/* For each constraint of "map" that involves the input dimension data->depth,
1621 * make sure that all the other coefficients are multiples of data->m,
1622 * reducing data->m if needed.
1623 * Break out of the iteration if data->m has become equal to "1".
1624 */
1626{
1627 isl_stat r;
1628
1631
1632 return r;
1633}
1634
1635/* Create an AST node for the current dimension based on
1636 * the schedule domain "bounds" and return the node encapsulated
1637 * in an isl_ast_graft.
1638 *
1639 * "executed" is the current inverse schedule, taking into account
1640 * the bounds in "bounds"
1641 * "domain" is the domain of "executed", with inner dimensions projected out.
1642 *
1643 *
1644 * Before moving on to the actual AST node construction in create_node_scaled,
1645 * we first check if the current dimension is strided and if we can scale
1646 * down this stride. Note that we only do this if the ast_build_scale_strides
1647 * option is set.
1648 *
1649 * In particular, let the current dimension take on values
1650 *
1651 * f + s a
1652 *
1653 * with a an integer. We check if we can find an integer m that (obviously)
1654 * divides both f and s.
1655 *
1656 * If so, we check if the current dimension only appears in constraints
1657 * where the coefficients of the other variables are multiples of m.
1658 * We perform this extra check to avoid the risk of introducing
1659 * divisions by scaling down the current dimension.
1660 *
1661 * If so, we scale the current dimension down by a factor of m.
1662 * That is, we plug in
1663 *
1664 * i = m i' (1)
1665 *
1666 * Note that in principle we could always scale down strided loops
1667 * by plugging in
1668 *
1669 * i = f + s i'
1670 *
1671 * but this may result in i' taking on larger values than the original i,
1672 * due to the shift by "f".
1673 * By constrast, the scaling in (1) can only reduce the (absolute) value "i".
1674 */
1678{
1679 struct isl_check_scaled_data data;
1681 isl_ctx *ctx;
1682 isl_aff *offset;
1683 isl_val *d;
1684
1685 ctx = isl_ast_build_get_ctx(build);
1687 return create_node_scaled(executed, bounds, domain, build);
1688
1690 if (depth < 0)
1691 build = isl_ast_build_free(build);
1692 data.depth = depth;
1693 if (!isl_ast_build_has_stride(build))
1694 return create_node_scaled(executed, bounds, domain, build);
1695
1696 offset = isl_ast_build_get_offset(build, data.depth);
1697 data.m = isl_ast_build_get_stride(build, data.depth);
1698 if (!data.m)
1702 if (!d)
1703 executed = isl_union_map_free(executed);
1704
1705 if (executed && isl_val_is_divisible_by(data.m, d))
1706 data.m = isl_val_div(data.m, d);
1707 else {
1708 data.m = isl_val_set_si(data.m, 1);
1709 isl_val_free(d);
1710 }
1711
1712 if (!isl_val_is_one(data.m)) {
1714 &data) < 0 &&
1715 !isl_val_is_one(data.m))
1716 executed = isl_union_map_free(executed);
1717 }
1718
1719 if (!isl_val_is_one(data.m)) {
1720 isl_space *space;
1722 isl_aff *aff;
1723 isl_map *map;
1724 isl_union_map *umap;
1725
1726 space = isl_ast_build_get_space(build, 1);
1727 space = isl_space_map_from_set(space);
1728 ma = isl_multi_aff_identity(space);
1729 aff = isl_multi_aff_get_aff(ma, data.depth);
1731 ma = isl_multi_aff_set_aff(ma, data.depth, aff);
1732
1733 bounds = isl_basic_set_preimage_multi_aff(bounds,
1734 isl_multi_aff_copy(ma));
1736 isl_multi_aff_copy(ma));
1739 executed = isl_union_map_apply_domain(executed,
1740 isl_union_map_copy(umap));
1741 build = isl_ast_build_scale_down(build, isl_val_copy(data.m),
1742 umap);
1743 }
1745 isl_val_free(data.m);
1746
1747 return create_node_scaled(executed, bounds, domain, build);
1748}
1749
1750/* Add the basic set to the list that "user" points to.
1751 */
1753{
1754 isl_basic_set_list **list = user;
1755
1756 *list = isl_basic_set_list_add(*list, bset);
1757
1758 return isl_stat_ok;
1759}
1760
1761/* Extract the basic sets of "set" and collect them in an isl_basic_set_list.
1762 */
1765{
1766 isl_size n;
1767 isl_ctx *ctx;
1768 isl_basic_set_list *list;
1769
1771 if (n < 0)
1772 set = isl_set_free(set);
1773 if (!set)
1774 return NULL;
1775
1776 ctx = isl_set_get_ctx(set);
1777
1778 list = isl_basic_set_list_alloc(ctx, n);
1780 list = isl_basic_set_list_free(list);
1781
1783 return list;
1784}
1785
1786/* Generate code for the schedule domain "bounds"
1787 * and add the result to "list".
1788 *
1789 * We mainly detect strides here and check if the bounds do not
1790 * conflict with the current build domain
1791 * and then pass over control to create_node.
1792 *
1793 * "bounds" reflects the bounds on the current dimension and possibly
1794 * some extra conditions on outer dimensions.
1795 * It does not, however, include any divs involving the current dimension,
1796 * so it does not capture any stride constraints.
1797 * We therefore need to compute that part of the schedule domain that
1798 * intersects with "bounds" and derive the strides from the result.
1799 */
1800static __isl_give isl_ast_graft_list *add_node(
1801 __isl_take isl_ast_graft_list *list, __isl_take isl_union_map *executed,
1803{
1804 isl_ast_graft *graft;
1805 isl_set *domain = NULL;
1806 isl_union_set *uset;
1807 int empty, disjoint;
1808
1810 executed = isl_union_map_intersect_domain(executed, uset);
1811 empty = isl_union_map_is_empty(executed);
1812 if (empty < 0)
1813 goto error;
1814 if (empty)
1815 goto done;
1816
1817 uset = isl_union_map_domain(isl_union_map_copy(executed));
1820
1823 disjoint = isl_set_is_disjoint(domain, build->domain);
1824 if (disjoint < 0)
1825 goto error;
1826 if (disjoint)
1827 goto done;
1828
1830
1831 graft = create_node(executed, bounds, domain,
1832 isl_ast_build_copy(build));
1833 list = isl_ast_graft_list_add(list, graft);
1834 isl_ast_build_free(build);
1835 return list;
1836error:
1837 list = isl_ast_graft_list_free(list);
1838done:
1840 isl_basic_set_free(bounds);
1841 isl_union_map_free(executed);
1842 isl_ast_build_free(build);
1843 return list;
1844}
1845
1846/* Does any element of i follow or coincide with any element of j
1847 * at the current depth for equal values of the outer dimensions?
1848 */
1850 __isl_keep isl_basic_set *j, void *user)
1851{
1852 int depth = *(int *) user;
1854 isl_bool empty;
1855 int l;
1856
1859 for (l = 0; l < depth; ++l)
1861 isl_dim_out, l);
1866
1867 return isl_bool_not(empty);
1868}
1869
1870/* Split up each element of "list" into a part that is related to "bset"
1871 * according to "gt" and a part that is not.
1872 * Return a list that consist of "bset" and all the pieces.
1873 */
1877{
1878 int i;
1879 isl_size n;
1881
1882 n = isl_basic_set_list_n_basic_set(list);
1883 if (n < 0)
1884 bset = isl_basic_set_free(bset);
1885
1886 gt = isl_basic_map_copy(gt);
1888 res = isl_basic_set_list_from_basic_set(bset);
1889 for (i = 0; res && i < n; ++i) {
1890 isl_basic_set *bset;
1891 isl_set *set1, *set2;
1892 isl_basic_map *bmap;
1893 int empty;
1894
1895 bset = isl_basic_set_list_get_basic_set(list, i);
1896 bmap = isl_basic_map_copy(gt);
1897 bmap = isl_basic_map_intersect_range(bmap, bset);
1898 bset = isl_basic_map_range(bmap);
1899 empty = isl_basic_set_is_empty(bset);
1900 if (empty < 0)
1901 res = isl_basic_set_list_free(res);
1902 if (empty) {
1903 isl_basic_set_free(bset);
1904 bset = isl_basic_set_list_get_basic_set(list, i);
1905 res = isl_basic_set_list_add(res, bset);
1906 continue;
1907 }
1908
1909 res = isl_basic_set_list_add(res, isl_basic_set_copy(bset));
1911 bset = isl_basic_set_list_get_basic_set(list, i);
1915
1916 res = isl_basic_set_list_concat(res,
1918 }
1920 isl_basic_set_list_free(list);
1921 return res;
1922}
1923
1924static __isl_give isl_ast_graft_list *generate_sorted_domains(
1925 __isl_keep isl_basic_set_list *domain_list,
1926 __isl_keep isl_union_map *executed,
1927 __isl_keep isl_ast_build *build);
1928
1929/* Internal data structure for add_nodes.
1930 *
1931 * "executed" and "build" are extra arguments to be passed to add_node.
1932 * "list" collects the results.
1933 */
1940
1941/* Generate code for the schedule domains in "scc"
1942 * and add the results to "list".
1943 *
1944 * The domains in "scc" form a strongly connected component in the ordering.
1945 * If the number of domains in "scc" is larger than 1, then this means
1946 * that we cannot determine a valid ordering for the domains in the component.
1947 * This should be fairly rare because the individual domains
1948 * have been made disjoint first.
1949 * The problem is that the domains may be integrally disjoint but not
1950 * rationally disjoint. For example, we may have domains
1951 *
1952 * { [i,i] : 0 <= i <= 1 } and { [i,1-i] : 0 <= i <= 1 }
1953 *
1954 * These two domains have an empty intersection, but their rational
1955 * relaxations do intersect. It is impossible to order these domains
1956 * in the second dimension because the first should be ordered before
1957 * the second for outer dimension equal to 0, while it should be ordered
1958 * after for outer dimension equal to 1.
1959 *
1960 * This may happen in particular in case of unrolling since the domain
1961 * of each slice is replaced by its simple hull.
1962 *
1963 * For each basic set i in "scc" and for each of the following basic sets j,
1964 * we split off that part of the basic set i that shares the outer dimensions
1965 * with j and lies before j in the current dimension.
1966 * We collect all the pieces in a new list that replaces "scc".
1967 *
1968 * While the elements in "scc" should be disjoint, we double-check
1969 * this property to avoid running into an infinite recursion in case
1970 * they intersect due to some internal error.
1971 */
1973{
1974 struct isl_add_nodes_data *data = user;
1975 int i;
1976 isl_size depth;
1977 isl_size n;
1978 isl_basic_set *bset, *first;
1980 isl_space *space;
1981 isl_basic_map *gt;
1982
1983 n = isl_basic_set_list_n_basic_set(scc);
1984 if (n < 0)
1985 goto error;
1986 bset = isl_basic_set_list_get_basic_set(scc, 0);
1987 if (n == 1) {
1988 isl_basic_set_list_free(scc);
1989 data->list = add_node(data->list,
1990 isl_union_map_copy(data->executed), bset,
1991 isl_ast_build_copy(data->build));
1992 return data->list ? isl_stat_ok : isl_stat_error;
1993 }
1994
1995 depth = isl_ast_build_get_depth(data->build);
1996 if (depth < 0)
1997 bset = isl_basic_set_free(bset);
1998 space = isl_basic_set_get_space(bset);
1999 space = isl_space_map_from_set(space);
2000 gt = isl_basic_map_universe(space);
2001 for (i = 0; i < depth; ++i)
2003 gt = isl_basic_map_order_gt(gt, isl_dim_in, depth, isl_dim_out, depth);
2004
2005 first = isl_basic_set_copy(bset);
2006 list = isl_basic_set_list_from_basic_set(bset);
2007 for (i = 1; i < n; ++i) {
2008 int disjoint;
2009
2010 bset = isl_basic_set_list_get_basic_set(scc, i);
2011
2012 disjoint = isl_basic_set_is_disjoint(bset, first);
2013 if (disjoint < 0)
2014 list = isl_basic_set_list_free(list);
2015 else if (!disjoint)
2016 isl_die(isl_basic_set_list_get_ctx(scc),
2018 "basic sets in scc are assumed to be disjoint",
2019 list = isl_basic_set_list_free(list));
2020
2021 list = add_split_on(list, bset, gt);
2022 }
2023 isl_basic_set_free(first);
2025 isl_basic_set_list_free(scc);
2026 scc = list;
2027 data->list = isl_ast_graft_list_concat(data->list,
2028 generate_sorted_domains(scc, data->executed, data->build));
2029 isl_basic_set_list_free(scc);
2030
2031 return data->list ? isl_stat_ok : isl_stat_error;
2032error:
2033 isl_basic_set_list_free(scc);
2034 return isl_stat_error;
2035}
2036
2037/* Sort the domains in "domain_list" according to the execution order
2038 * at the current depth (for equal values of the outer dimensions),
2039 * generate code for each of them, collecting the results in a list.
2040 * If no code is generated (because the intersection of the inverse schedule
2041 * with the domains turns out to be empty), then an empty list is returned.
2042 *
2043 * The caller is responsible for ensuring that the basic sets in "domain_list"
2044 * are pair-wise disjoint. It can, however, in principle happen that
2045 * two basic sets should be ordered one way for one value of the outer
2046 * dimensions and the other way for some other value of the outer dimensions.
2047 * We therefore play safe and look for strongly connected components.
2048 * The function add_nodes takes care of handling non-trivial components.
2049 */
2050static __isl_give isl_ast_graft_list *generate_sorted_domains(
2051 __isl_keep isl_basic_set_list *domain_list,
2053{
2054 isl_ctx *ctx;
2055 struct isl_add_nodes_data data;
2056 isl_size depth;
2057 isl_size n;
2058
2059 n = isl_basic_set_list_n_basic_set(domain_list);
2060 if (n < 0)
2061 return NULL;
2062
2063 ctx = isl_basic_set_list_get_ctx(domain_list);
2064 data.list = isl_ast_graft_list_alloc(ctx, n);
2065 if (n == 0)
2066 return data.list;
2067 if (n == 1)
2069 isl_basic_set_list_get_basic_set(domain_list, 0),
2071
2073 data.executed = executed;
2074 data.build = build;
2075 if (depth < 0 || isl_basic_set_list_foreach_scc(domain_list,
2076 &domain_follows_at_depth, &depth,
2077 &add_nodes, &data) < 0)
2078 data.list = isl_ast_graft_list_free(data.list);
2079
2080 return data.list;
2081}
2082
2083/* Do i and j share any values for the outer dimensions?
2084 */
2086 __isl_keep isl_basic_set *j, void *user)
2087{
2088 int depth = *(int *) user;
2090 isl_bool empty;
2091 int l;
2092
2095 for (l = 0; l < depth; ++l)
2097 isl_dim_out, l);
2100
2101 return isl_bool_not(empty);
2102}
2103
2104/* Internal data structure for generate_sorted_domains_wrap.
2105 *
2106 * "n" is the total number of basic sets
2107 * "executed" and "build" are extra arguments to be passed
2108 * to generate_sorted_domains.
2109 *
2110 * "single" is set to 1 by generate_sorted_domains_wrap if there
2111 * is only a single component.
2112 * "list" collects the results.
2113 */
2122
2123/* Call generate_sorted_domains on "scc", fuse the result into a list
2124 * with either zero or one graft and collect the these single element
2125 * lists into data->list.
2126 *
2127 * If there is only one component, i.e., if the number of basic sets
2128 * in the current component is equal to the total number of basic sets,
2129 * then data->single is set to 1 and the result of generate_sorted_domains
2130 * is not fused.
2131 */
2133 void *user)
2134{
2136 isl_ast_graft_list *list;
2137 isl_size n;
2138
2139 n = isl_basic_set_list_n_basic_set(scc);
2140 if (n < 0)
2141 scc = isl_basic_set_list_free(scc);
2142 list = generate_sorted_domains(scc, data->executed, data->build);
2143 data->single = n == data->n;
2144 if (!data->single)
2146 if (!data->list)
2147 data->list = list;
2148 else
2149 data->list = isl_ast_graft_list_concat(data->list, list);
2150
2151 isl_basic_set_list_free(scc);
2152 if (!data->list)
2153 return isl_stat_error;
2154
2155 return isl_stat_ok;
2156}
2157
2158/* Look for any (weakly connected) components in the "domain_list"
2159 * of domains that share some values of the outer dimensions.
2160 * That is, domains in different components do not share any values
2161 * of the outer dimensions. This means that these components
2162 * can be freely reordered.
2163 * Within each of the components, we sort the domains according
2164 * to the execution order at the current depth.
2165 *
2166 * If there is more than one component, then generate_sorted_domains_wrap
2167 * fuses the result of each call to generate_sorted_domains
2168 * into a list with either zero or one graft and collects these (at most)
2169 * single element lists into a bigger list. This means that the elements of the
2170 * final list can be freely reordered. In particular, we sort them
2171 * according to an arbitrary but fixed ordering to ease merging of
2172 * graft lists from different components.
2173 */
2174static __isl_give isl_ast_graft_list *generate_parallel_domains(
2175 __isl_keep isl_basic_set_list *domain_list,
2177{
2178 isl_size depth;
2180
2181 data.n = isl_basic_set_list_n_basic_set(domain_list);
2182 if (data.n < 0)
2183 return NULL;
2184
2185 if (data.n <= 1)
2186 return generate_sorted_domains(domain_list, executed, build);
2187
2189 if (depth < 0)
2190 return NULL;
2191 data.list = NULL;
2192 data.executed = executed;
2193 data.build = build;
2194 data.single = 0;
2195 if (isl_basic_set_list_foreach_scc(domain_list, &shared_outer, &depth,
2197 &data) < 0)
2198 data.list = isl_ast_graft_list_free(data.list);
2199
2200 if (!data.single)
2202
2203 return data.list;
2204}
2205
2206/* Internal data for separate_domain.
2207 *
2208 * "explicit" is set if we only want to use explicit bounds.
2209 *
2210 * "domain" collects the separated domains.
2211 */
2217
2218/* Extract implicit bounds on the current dimension for the executed "map".
2219 *
2220 * The domain of "map" may involve inner dimensions, so we
2221 * need to eliminate them.
2222 */
2225{
2226 isl_set *domain;
2227
2230
2231 return domain;
2232}
2233
2234/* Extract explicit bounds on the current dimension for the executed "map".
2235 *
2236 * Rather than eliminating the inner dimensions as in implicit_bounds,
2237 * we simply drop any constraints involving those inner dimensions.
2238 * The idea is that most bounds that are implied by constraints on the
2239 * inner dimensions will be enforced by for loops and not by explicit guards.
2240 * There is then no need to separate along those bounds.
2241 */
2244{
2245 isl_set *domain;
2246 isl_size depth;
2247 isl_size dim;
2248
2249 depth = isl_ast_build_get_depth(build);
2250 dim = isl_map_dim(map, isl_dim_out);
2251 if (depth < 0 || dim < 0)
2254
2259 isl_dim_set, depth + 1, dim - (depth + 1));
2261 isl_dim_set, depth, 1);
2263
2264 return domain;
2265}
2266
2267/* Split data->domain into pieces that intersect with the range of "map"
2268 * and pieces that do not intersect with the range of "map"
2269 * and then add that part of the range of "map" that does not intersect
2270 * with data->domain.
2271 */
2273{
2274 struct isl_separate_domain_data *data = user;
2275 isl_set *domain;
2276 isl_set *d1, *d2;
2277
2278 if (data->explicit)
2279 domain = explicit_bounds(map, data->build);
2280 else
2281 domain = implicit_bounds(map, data->build);
2282
2287 data->domain = isl_set_intersect(data->domain, domain);
2288 data->domain = isl_set_union(data->domain, d1);
2289 data->domain = isl_set_union(data->domain, d2);
2290
2291 return isl_stat_ok;
2292}
2293
2294/* Separate the schedule domains of "executed".
2295 *
2296 * That is, break up the domain of "executed" into basic sets,
2297 * such that for each basic set S, every element in S is associated with
2298 * the same domain spaces.
2299 *
2300 * "space" is the (single) domain space of "executed".
2301 */
2303 __isl_take isl_space *space, __isl_take isl_union_map *executed,
2305{
2306 struct isl_separate_domain_data data = { build };
2307 isl_ctx *ctx;
2308
2312 data.domain = isl_set_empty(space);
2313 if (isl_union_map_foreach_map(executed, &separate_domain, &data) < 0)
2314 data.domain = isl_set_free(data.domain);
2315
2316 isl_union_map_free(executed);
2317 return data.domain;
2318}
2319
2320/* Temporary data used during the search for a lower bound for unrolling.
2321 *
2322 * "build" is the build in which the unrolling will be performed
2323 * "domain" is the original set for which to find a lower bound
2324 * "depth" is the dimension for which to find a lower boudn
2325 * "expansion" is the expansion that needs to be applied to "domain"
2326 * in the unrolling that will be performed
2327 *
2328 * "lower" is the best lower bound found so far. It is NULL if we have not
2329 * found any yet.
2330 * "n" is the corresponding size. If lower is NULL, then the value of n
2331 * is undefined.
2332 * "n_div" is the maximal number of integer divisions in the first
2333 * unrolled iteration (after expansion). It is set to -1 if it hasn't
2334 * been computed yet.
2335 */
2346
2347/* Return the constraint
2348 *
2349 * i_"depth" = aff + offset
2350 */
2359
2360/* Update *user to the number of integer divisions in the first element
2361 * of "ma", if it is larger than the current value.
2362 */
2365{
2366 isl_aff *aff;
2367 int *n = user;
2368 isl_size n_div;
2369
2370 aff = isl_multi_aff_get_aff(ma, 0);
2371 n_div = isl_aff_dim(aff, isl_dim_div);
2373 isl_multi_aff_free(ma);
2375
2376 if (n_div > *n)
2377 *n = n_div;
2378
2379 return n_div >= 0 ? isl_stat_ok : isl_stat_error;
2380}
2381
2382/* Get the number of integer divisions in the expression for the iterator
2383 * value at the first slice in the unrolling based on lower bound "lower",
2384 * taking into account the expansion that needs to be performed on this slice.
2385 */
2387 __isl_keep isl_aff *lower)
2388{
2389 isl_constraint *c;
2390 isl_set *set;
2391 isl_map *it_map, *expansion;
2393 int n;
2394
2395 c = at_offset(data->depth, lower, 0);
2396 set = isl_set_copy(data->domain);
2399 set = isl_set_apply(set, expansion);
2400 it_map = isl_ast_build_map_to_iterator(data->build, set);
2402 n = 0;
2404 n = -1;
2406
2407 return n;
2408}
2409
2410/* Is the lower bound "lower" with corresponding iteration count "n"
2411 * better than the one stored in "data"?
2412 * If there is no upper bound on the iteration count ("n" is infinity) or
2413 * if the count is too large, then we cannot use this lower bound.
2414 * Otherwise, if there was no previous lower bound or
2415 * if the iteration count of the new lower bound is smaller than
2416 * the iteration count of the previous lower bound, then we consider
2417 * the new lower bound to be better.
2418 * If the iteration count is the same, then compare the number
2419 * of integer divisions that would be needed to express
2420 * the iterator value at the first slice in the unrolling
2421 * according to the lower bound. If we end up computing this
2422 * number, then store the lowest value in data->n_div.
2423 */
2426{
2427 int cmp;
2428 int n_div;
2429
2430 if (!n)
2431 return -1;
2432 if (isl_val_is_infty(n))
2433 return 0;
2434 if (isl_val_cmp_si(n, INT_MAX) > 0)
2435 return 0;
2436 if (!data->lower)
2437 return 1;
2438 cmp = isl_val_cmp_si(n, *data->n);
2439 if (cmp < 0)
2440 return 1;
2441 if (cmp > 0)
2442 return 0;
2443 if (data->n_div < 0)
2444 data->n_div = get_expanded_n_div(data, data->lower);
2445 if (data->n_div < 0)
2446 return -1;
2447 if (data->n_div == 0)
2448 return 0;
2449 n_div = get_expanded_n_div(data, lower);
2450 if (n_div < 0)
2451 return -1;
2452 if (n_div >= data->n_div)
2453 return 0;
2454 data->n_div = n_div;
2455
2456 return 1;
2457}
2458
2459/* Check if we can use "c" as a lower bound and if it is better than
2460 * any previously found lower bound.
2461 *
2462 * If "c" does not involve the dimension at the current depth,
2463 * then we cannot use it.
2464 * Otherwise, let "c" be of the form
2465 *
2466 * i >= f(j)/a
2467 *
2468 * We compute the maximal value of
2469 *
2470 * -ceil(f(j)/a)) + i + 1
2471 *
2472 * over the domain. If there is such a value "n", then we know
2473 *
2474 * -ceil(f(j)/a)) + i + 1 <= n
2475 *
2476 * or
2477 *
2478 * i < ceil(f(j)/a)) + n
2479 *
2480 * meaning that we can use ceil(f(j)/a)) as a lower bound for unrolling.
2481 * We just need to check if we have found any lower bound before and
2482 * if the new lower bound is better (smaller n or fewer integer divisions)
2483 * than the previously found lower bounds.
2484 */
2487{
2488 isl_aff *aff, *lower;
2489 isl_val *max;
2490 int better;
2491
2493 return isl_stat_ok;
2494
2495 lower = isl_constraint_get_bound(c, isl_dim_set, data->depth);
2496 lower = isl_aff_ceil(lower);
2497 aff = isl_aff_copy(lower);
2498 aff = isl_aff_neg(aff);
2501 max = isl_set_max_val(data->domain, aff);
2503
2504 better = is_better_lower_bound(data, lower, max);
2505 if (better < 0 || !better) {
2506 isl_val_free(max);
2507 isl_aff_free(lower);
2508 return better < 0 ? isl_stat_error : isl_stat_ok;
2509 }
2510
2511 isl_aff_free(data->lower);
2512 data->lower = lower;
2513 *data->n = isl_val_get_num_si(max);
2514 isl_val_free(max);
2515
2516 return isl_stat_ok;
2517}
2518
2519/* Check if we can use "c" as a lower bound and if it is better than
2520 * any previously found lower bound.
2521 */
2523{
2524 struct isl_find_unroll_data *data;
2525 isl_stat r;
2526
2527 data = (struct isl_find_unroll_data *) user;
2528 r = update_unrolling_lower_bound(data, c);
2530
2531 return r;
2532}
2533
2534/* Look for a lower bound l(i) on the dimension at "depth"
2535 * and a size n such that "domain" is a subset of
2536 *
2537 * { [i] : l(i) <= i_d < l(i) + n }
2538 *
2539 * where d is "depth" and l(i) depends only on earlier dimensions.
2540 * Furthermore, try and find a lower bound such that n is as small as possible.
2541 * In particular, "n" needs to be finite.
2542 * "build" is the build in which the unrolling will be performed.
2543 * "expansion" is the expansion that needs to be applied to "domain"
2544 * in the unrolling that will be performed.
2545 *
2546 * Inner dimensions have been eliminated from "domain" by the caller.
2547 *
2548 * We first construct a collection of lower bounds on the input set
2549 * by computing its simple hull. We then iterate through them,
2550 * discarding those that we cannot use (either because they do not
2551 * involve the dimension at "depth" or because they have no corresponding
2552 * upper bound, meaning that "n" would be unbounded) and pick out the
2553 * best from the remaining ones.
2554 *
2555 * If we cannot find a suitable lower bound, then we consider that
2556 * to be an error.
2557 */
2561{
2562 struct isl_find_unroll_data data =
2563 { build, domain, depth, expansion, NULL, n, -1 };
2565
2567
2569 &constraint_find_unroll, &data) < 0)
2570 goto error;
2571
2573
2574 if (!data.lower)
2576 "cannot find lower bound for unrolling", return NULL);
2577
2578 return data.lower;
2579error:
2581 return isl_aff_free(data.lower);
2582}
2583
2584/* Call "fn" on each iteration of the current dimension of "domain".
2585 * If "init" is not NULL, then it is called with the number of
2586 * iterations before any call to "fn".
2587 * Return -1 on failure.
2588 *
2589 * Since we are going to be iterating over the individual values,
2590 * we first check if there are any strides on the current dimension.
2591 * If there is, we rewrite the current dimension i as
2592 *
2593 * i = stride i' + offset
2594 *
2595 * and then iterate over individual values of i' instead.
2596 *
2597 * We then look for a lower bound on i' and a size such that the domain
2598 * is a subset of
2599 *
2600 * { [j,i'] : l(j) <= i' < l(j) + n }
2601 *
2602 * and then take slices of the domain at values of i'
2603 * between l(j) and l(j) + n - 1.
2604 *
2605 * We compute the unshifted simple hull of each slice to ensure that
2606 * we have a single basic set per offset. The slicing constraint
2607 * may get simplified away before the unshifted simple hull is taken
2608 * and may therefore in some rare cases disappear from the result.
2609 * We therefore explicitly add the constraint back after computing
2610 * the unshifted simple hull to ensure that the basic sets
2611 * remain disjoint. The constraints that are dropped by taking the hull
2612 * will be taken into account at the next level, as in the case of the
2613 * atomic option.
2614 *
2615 * Finally, we map i' back to i and call "fn".
2616 */
2618 __isl_keep isl_ast_build *build, int (*init)(int n, void *user),
2619 int (*fn)(__isl_take isl_basic_set *bset, void *user), void *user)
2620{
2621 int i, n;
2622 isl_bool empty;
2625 isl_basic_map *bmap;
2626 isl_aff *lower = NULL;
2627 isl_ast_build *stride_build;
2628
2630 if (depth < 0)
2632
2635 stride_build = isl_ast_build_copy(build);
2636 stride_build = isl_ast_build_detect_strides(stride_build,
2639
2641 isl_multi_aff_copy(expansion));
2643 isl_ast_build_free(stride_build);
2644
2646
2647 empty = isl_set_is_empty(domain);
2648 if (empty < 0) {
2649 n = -1;
2650 } else if (empty) {
2651 n = 0;
2652 } else {
2654 if (!lower)
2655 n = -1;
2656 }
2657 if (n >= 0 && init && init(n, user) < 0)
2658 n = -1;
2659 for (i = 0; i < n; ++i) {
2660 isl_set *set;
2661 isl_basic_set *bset;
2662 isl_constraint *slice;
2663
2664 slice = at_offset(depth, lower, i);
2668 bset = isl_basic_set_add_constraint(bset, slice);
2669 bset = isl_basic_set_apply(bset, isl_basic_map_copy(bmap));
2670
2671 if (fn(bset, user) < 0)
2672 break;
2673 }
2674
2677 isl_basic_map_free(bmap);
2678
2679 return n < 0 || i < n ? -1 : 0;
2680}
2681
2682/* Data structure for storing the results and the intermediate objects
2683 * of compute_domains.
2684 *
2685 * "list" is the main result of the function and contains a list
2686 * of disjoint basic sets for which code should be generated.
2687 *
2688 * "executed" and "build" are inputs to compute_domains.
2689 * "schedule_domain" is the domain of "executed".
2690 *
2691 * "option" contains the domains at the current depth that should by
2692 * atomic, separated or unrolled. These domains are as specified by
2693 * the user, except that inner dimensions have been eliminated and
2694 * that they have been made pair-wise disjoint.
2695 *
2696 * "sep_class" contains the user-specified split into separation classes
2697 * specialized to the current depth.
2698 * "done" contains the union of the separation domains that have already
2699 * been handled.
2700 */
2713
2714/* Internal data structure for do_unroll.
2715 *
2716 * "domains" stores the results of compute_domains.
2717 * "class_domain" is the original class domain passed to do_unroll.
2718 * "unroll_domain" collects the unrolled iterations.
2719 */
2725
2726/* Given an iteration of an unrolled domain represented by "bset",
2727 * add it to data->domains->list.
2728 * Since we may have dropped some constraints, we intersect with
2729 * the class domain again to ensure that each element in the list
2730 * is disjoint from the other class domains.
2731 */
2733{
2734 struct isl_ast_unroll_data *data = user;
2735 isl_set *set;
2736 isl_basic_set_list *list;
2737
2740 isl_set_copy(set));
2744 data->domains->list = isl_basic_set_list_concat(data->domains->list,
2745 list);
2746
2747 return 0;
2748}
2749
2750/* Extend domains->list with a list of basic sets, one for each value
2751 * of the current dimension in "domain" and remove the corresponding
2752 * sets from the class domain. Return the updated class domain.
2753 * The divs that involve the current dimension have not been projected out
2754 * from this domain.
2755 *
2756 * We call foreach_iteration to iterate over the individual values and
2757 * in do_unroll_iteration we collect the individual basic sets in
2758 * domains->list and their union in data->unroll_domain, which is then
2759 * used to update the class domain.
2760 */
2763{
2764 struct isl_ast_unroll_data data;
2765
2766 if (!domain)
2767 return isl_set_free(class_domain);
2768 if (!class_domain)
2769 return isl_set_free(domain);
2770
2771 data.domains = domains;
2774
2776 &do_unroll_iteration, &data) < 0)
2778
2780
2781 return class_domain;
2782}
2783
2784/* Add domains to domains->list for each individual value of the current
2785 * dimension, for that part of the schedule domain that lies in the
2786 * intersection of the option domain and the class domain.
2787 * Remove the corresponding sets from the class domain and
2788 * return the updated class domain.
2789 *
2790 * We first break up the unroll option domain into individual pieces
2791 * and then handle each of them separately. The unroll option domain
2792 * has been made disjoint in compute_domains_init_options,
2793 *
2794 * Note that we actively want to combine different pieces of the
2795 * schedule domain that have the same value at the current dimension.
2796 * We therefore need to break up the unroll option domain before
2797 * intersecting with class and schedule domain, hoping that the
2798 * unroll option domain specified by the user is relatively simple.
2799 */
2802{
2804 isl_basic_set_list *unroll_list;
2805 int i;
2806 isl_size n;
2807 isl_bool empty;
2808
2810 if (empty < 0)
2811 return isl_set_free(class_domain);
2812 if (empty)
2813 return class_domain;
2814
2817
2818 n = isl_basic_set_list_n_basic_set(unroll_list);
2819 if (n < 0)
2821 for (i = 0; i < n; ++i) {
2822 isl_basic_set *bset;
2823
2824 bset = isl_basic_set_list_get_basic_set(unroll_list, i);
2830
2832 if (empty >= 0 && empty) {
2834 continue;
2835 }
2836
2838 }
2839
2840 isl_basic_set_list_free(unroll_list);
2841
2842 return class_domain;
2843}
2844
2845/* Try and construct a single basic set that includes the intersection of
2846 * the schedule domain, the atomic option domain and the class domain.
2847 * Add the resulting basic set(s) to domains->list and remove them
2848 * from class_domain. Return the updated class domain.
2849 *
2850 * We construct a single domain rather than trying to combine
2851 * the schedule domains of individual domains because we are working
2852 * within a single component so that non-overlapping schedule domains
2853 * should already have been separated.
2854 * We do however need to make sure that this single domains is a subset
2855 * of the class domain so that it would not intersect with any other
2856 * class domains. This means that we may end up splitting up the atomic
2857 * domain in case separation classes are being used.
2858 *
2859 * "domain" is the intersection of the schedule domain and the class domain,
2860 * with inner dimensions projected out.
2861 */
2895
2896/* Split up the schedule domain into uniform basic sets,
2897 * in the sense that each element in a basic set is associated to
2898 * elements of the same domains, and add the result to domains->list.
2899 * Do this for that part of the schedule domain that lies in the
2900 * intersection of "class_domain" and the separate option domain.
2901 *
2902 * "class_domain" may or may not include the constraints
2903 * of the schedule domain, but this does not make a difference
2904 * since we are going to intersect it with the domain of the inverse schedule.
2905 * If it includes schedule domain constraints, then they may involve
2906 * inner dimensions, but we will eliminate them in separation_domain.
2907 */
2910{
2911 isl_space *space;
2912 isl_set *domain;
2913 isl_union_map *executed;
2914 isl_basic_set_list *list;
2915 int empty;
2916
2919 executed = isl_union_map_copy(domains->executed);
2920 executed = isl_union_map_intersect_domain(executed,
2922 empty = isl_union_map_is_empty(executed);
2923 if (empty < 0 || empty) {
2924 isl_union_map_free(executed);
2925 return empty < 0 ? -1 : 0;
2926 }
2927
2929 domain = separate_schedule_domains(space, executed, domains->build);
2930
2932 domains->list = isl_basic_set_list_concat(domains->list, list);
2933
2934 return 0;
2935}
2936
2937/* Internal data structure for split_off_fixed_non_strided.
2938 *
2939 * "build" is a (possibly modified) copy of the build.
2940 * "core" is part of the domain that is not split off.
2941 * "fixed" collects basic sets that have a fixed value at the current depth
2942 * and that may be split off.
2943 * "split" collects basic sets that are split off.
2944 * "stride" describes stride constraints satisfied by "core"
2945 * (along with additional constraints on the outer dimensions).
2946 */
2954
2955/* Initialize "split" for a domain with the given space.
2956 */
2959{
2960 split->build = isl_ast_build_copy(build);
2961 split->core = isl_set_empty(space);
2962 split->fixed = isl_set_copy(split->core);
2963 split->split = isl_set_copy(split->core);
2964 split->stride = NULL;
2965}
2966
2967/* Free all memory allocated for "split".
2968 */
2970{
2971 split->build = isl_ast_build_free(split->build);
2972 split->core = isl_set_free(split->core);
2973 split->fixed = isl_set_free(split->fixed);
2974 split->split = isl_set_free(split->split);
2975 split->stride = isl_set_free(split->stride);
2976}
2977
2978/* Add "bset" to either split->core or split->fixed depending
2979 * on whether it has an obviously fixed value at the current depth.
2980 *
2981 * Only accept values that are directly defined by an equality constraint.
2982 * Note that isl_map_plain_is_single_valued also considers cases
2983 * of an equality constraint involving an integer division defined
2984 * in terms of the current dimension.
2985 * To prevent such cases from triggering, those integer divisions
2986 * are first eliminated.
2987 */
2989{
2991 isl_set *set;
2992 isl_map *map;
2993 isl_bool sv;
2994
3000
3001 if (sv < 0)
3003 else if (sv)
3004 split->fixed = isl_set_union(split->fixed, set);
3005 else
3006 split->core = isl_set_union(split->core, set);
3007
3009}
3010
3011/* Add "bset" to either split->core or split->split depending
3012 * on whether it satisfies the stride constraints.
3013 */
3015{
3017 isl_set *set;
3019
3021 match = isl_set_is_subset(set, split->stride);
3022
3023 if (match < 0)
3025 else if (match)
3026 split->core = isl_set_union(split->core, set);
3027 else
3028 split->split = isl_set_union(split->split, set);
3029
3031}
3032
3033/* Is part of "domain" strided while the remainder has a fixed value
3034 * for the current dimension?
3035 *
3036 * First, split off the basic sets of "domain" that have a fixed value, and
3037 * check if the remainder satisfies some stride constraints.
3038 *
3039 * If so, see if any of the basic sets with a fixed value also
3040 * satisfy these stride constraints. If so, add them to the core
3041 * strided domain.
3042 * Also include the shared constraints on the outer dimensions
3043 * to these constraints so that a basic set with a fixed value
3044 * that happens to be equal to one of the possible values in the strided domain,
3045 * but does not otherwise fit in the strided domain, is not added
3046 * to this domain.
3047 *
3048 * The remaining basic sets are collected in split->split.
3049 * If there are any left, then return isl_bool_true.
3050 */
3053{
3054 isl_size n1, n2, n3;
3055 isl_bool has_stride;
3056 isl_basic_set *outer;
3057 isl_map *map;
3058
3060 return isl_bool_error;
3061
3062 n1 = isl_set_n_basic_set(split->fixed);
3063 n2 = isl_set_n_basic_set(split->core);
3064 if (n1 < 0 || n2 < 0)
3065 return isl_bool_error;
3066 if (n1 == 0 || n2 == 0)
3067 return isl_bool_false;
3068
3070 isl_set_copy(split->core));
3071 has_stride = isl_ast_build_has_stride(split->build);
3072 if (has_stride < 0)
3073 return isl_bool_error;
3074 if (!has_stride)
3075 return isl_bool_false;
3076
3079 isl_set_copy(split->core));
3081 split->stride =
3083
3085 &split_not_strided, split) < 0)
3086 return isl_bool_error;
3087
3088 n3 = isl_set_n_basic_set(split->split);
3089 if (n3 < 0)
3090 return isl_bool_error;
3091 return isl_bool_ok(n3 > 0);
3092}
3093
3094/* If part of "domain" is strided and the remainder has
3095 * a fixed value for the current dimension, then subtract
3096 * the second part from the first.
3097 *
3098 * There need to be at least two basic sets for one to be strided and
3099 * one to have a fixed value.
3100 * Furthermore, some local variables need to be involved
3101 * for some basic sets to be strided.
3102 *
3103 * If this is the case, initialize the isl_split_fixed_data data structure,
3104 * check whether a split is needed, and, if so, perform the split.
3105 */
3108{
3109 isl_bool locals, need;
3110 isl_size n;
3112
3114 if (n < 0)
3115 return isl_set_free(domain);
3116 if (n <= 1)
3117 return domain;
3119 if (locals < 0)
3120 return isl_set_free(domain);
3121 if (!locals)
3122 return domain;
3123
3125
3127
3128 if (need < 0) {
3130 } else if (need) {
3132 domain = isl_set_copy(split.core);
3135 }
3136
3138 return domain;
3139}
3140
3141/* Eliminate dimensions inner to the current dimension as well as
3142 * existentially quantified variables and local variables
3143 * that depend on the current dimension.
3144 * The result then consists only of constraints that are independent
3145 * of the current dimension and upper and lower bounds on the current
3146 * dimension.
3147 * Do this as a preparation for splitting up the domain into disjoint
3148 * basic sets.
3149 *
3150 * If some parts of the domain satisfy some stride constraints,
3151 * while the other parts have a fixed value for the current dimension,
3152 * then first subtract this second part from the first.
3153 * This is especially useful when the second part overlaps with
3154 * the start or the end of the strided domain.
3155 * In such cases it is usually better to generate code for this fixed value
3156 * separately. Explicitly subtracting it here avoids situations
3157 * where the first part is subtracted from the second, leading to
3158 * more complicated code inside the (larger) strided domain.
3159 */
3169
3170/* Split up the domain at the current depth into disjoint
3171 * basic sets for which code should be generated separately
3172 * for the given separation class domain.
3173 *
3174 * If any separation classes have been defined, then "class_domain"
3175 * is the domain of the current class and does not refer to inner dimensions.
3176 * Otherwise, "class_domain" is the universe domain.
3177 *
3178 * We first make sure that the class domain is disjoint from
3179 * previously considered class domains.
3180 *
3181 * The separate domains can be computed directly from the "class_domain".
3182 *
3183 * The unroll, atomic and remainder domains need the constraints
3184 * from the schedule domain.
3185 *
3186 * For unrolling, the actual schedule domain is needed (with divs that
3187 * may refer to the current dimension) so that stride detection can be
3188 * performed.
3189 *
3190 * For atomic and remainder domains, inner dimensions and divs involving
3191 * the current dimensions should be eliminated.
3192 * In case we are working within a separation class, we need to intersect
3193 * the result with the current "class_domain" to ensure that the domains
3194 * are disjoint from those generated from other class domains.
3195 *
3196 * The domain that has been made atomic may be larger than specified
3197 * by the user since it needs to be representable as a single basic set.
3198 * This possibly larger domain is removed from class_domain by
3199 * compute_atomic_domain. It is computed first so that the extended domain
3200 * would not overlap with any domains computed before.
3201 * Similary, the unrolled domains may have some constraints removed and
3202 * may therefore also be larger than specified by the user.
3203 *
3204 * If anything is left after handling separate, unroll and atomic,
3205 * we split it up into basic sets and append the basic sets to domains->list.
3206 */
3208 __isl_take isl_set *class_domain)
3209{
3210 isl_basic_set_list *list;
3211 isl_set *domain;
3212
3213 class_domain = isl_set_subtract(class_domain,
3214 isl_set_copy(domains->done));
3215 domains->done = isl_set_union(domains->done,
3216 isl_set_copy(class_domain));
3217
3218 class_domain = compute_atomic_domain(domains, class_domain);
3219 class_domain = compute_unroll_domains(domains, class_domain);
3220
3221 domain = isl_set_copy(class_domain);
3222
3223 if (compute_separate_domain(domains, domain) < 0)
3224 goto error;
3227
3229 isl_set_copy(domains->schedule_domain));
3230
3232 domain = isl_set_intersect(domain, isl_set_copy(class_domain));
3233
3236
3238 domains->list = isl_basic_set_list_concat(domains->list, list);
3239
3240 isl_set_free(class_domain);
3241
3242 return isl_stat_ok;
3243error:
3245 isl_set_free(class_domain);
3246 return isl_stat_error;
3247}
3248
3249/* Split up the domain at the current depth into disjoint
3250 * basic sets for which code should be generated separately
3251 * for the separation class identified by "pnt".
3252 *
3253 * We extract the corresponding class domain from domains->sep_class,
3254 * eliminate inner dimensions and pass control to compute_partial_domains.
3255 */
3257{
3258 struct isl_codegen_domains *domains = user;
3259 isl_set *class_set;
3260 isl_set *domain;
3261 int disjoint;
3262
3263 class_set = isl_set_from_point(pnt);
3265 isl_map_copy(domains->sep_class), class_set));
3268
3269 disjoint = isl_set_plain_is_disjoint(domain, domains->schedule_domain);
3270 if (disjoint < 0)
3271 return isl_stat_error;
3272 if (disjoint) {
3274 return isl_stat_ok;
3275 }
3276
3277 return compute_partial_domains(domains, domain);
3278}
3279
3280/* Extract the domains at the current depth that should be atomic,
3281 * separated or unrolled and store them in option.
3282 *
3283 * The domains specified by the user might overlap, so we make
3284 * them disjoint by subtracting earlier domains from later domains.
3285 */
3288{
3289 enum isl_ast_loop_type type, type2;
3290 isl_set *unroll;
3291
3295 for (type2 = isl_ast_loop_atomic; type2 < type; ++type2)
3297 isl_set_copy(option[type2]));
3298 }
3299
3300 unroll = option[isl_ast_loop_unroll];
3301 unroll = isl_set_coalesce(unroll);
3302 unroll = isl_set_make_disjoint(unroll);
3303 option[isl_ast_loop_unroll] = unroll;
3304}
3305
3306/* Split up the domain at the current depth into disjoint
3307 * basic sets for which code should be generated separately,
3308 * based on the user-specified options.
3309 * Return the list of disjoint basic sets.
3310 *
3311 * There are three kinds of domains that we need to keep track of.
3312 * - the "schedule domain" is the domain of "executed"
3313 * - the "class domain" is the domain corresponding to the currrent
3314 * separation class
3315 * - the "option domain" is the domain corresponding to one of the options
3316 * atomic, unroll or separate
3317 *
3318 * We first consider the individial values of the separation classes
3319 * and split up the domain for each of them separately.
3320 * Finally, we consider the remainder. If no separation classes were
3321 * specified, then we call compute_partial_domains with the universe
3322 * "class_domain". Otherwise, we take the "schedule_domain" as "class_domain",
3323 * with inner dimensions removed. We do this because we want to
3324 * avoid computing the complement of the class domains (i.e., the difference
3325 * between the universe and domains->done).
3326 */
3329{
3330 struct isl_codegen_domains domains;
3331 isl_ctx *ctx;
3332 isl_set *domain;
3334 isl_set *classes;
3335 isl_space *space;
3336 int n_param;
3338 isl_bool empty;
3339
3340 if (!executed)
3341 return NULL;
3342
3344 domains.list = isl_basic_set_list_alloc(ctx, 0);
3345
3348
3350
3352 classes = isl_map_range(isl_map_copy(domains.sep_class));
3353 n_param = isl_set_dim(classes, isl_dim_param);
3354 if (n_param < 0)
3355 classes = isl_set_free(classes);
3356 classes = isl_set_project_out(classes, isl_dim_param, 0, n_param);
3357
3358 space = isl_set_get_space(domain);
3359 domains.build = build;
3361 domains.executed = executed;
3362 domains.done = isl_set_empty(space);
3363
3364 if (isl_set_foreach_point(classes, &compute_class_domains, &domains) < 0)
3365 domains.list = isl_basic_set_list_free(domains.list);
3366 isl_set_free(classes);
3367
3368 empty = isl_set_is_empty(domains.done);
3369 if (empty < 0) {
3370 domains.list = isl_basic_set_list_free(domains.list);
3372 } else if (empty) {
3375 } else {
3377 }
3378 if (compute_partial_domains(&domains, domain) < 0)
3379 domains.list = isl_basic_set_list_free(domains.list);
3380
3382 isl_set_free(domains.done);
3383 isl_map_free(domains.sep_class);
3385 isl_set_free(domains.option[type]);
3386
3387 return domains.list;
3388}
3389
3390/* Generate code for a single component, after shifting (if any)
3391 * has been applied, in case the schedule was specified as a union map.
3392 *
3393 * We first split up the domain at the current depth into disjoint
3394 * basic sets based on the user-specified options.
3395 * Then we generated code for each of them and concatenate the results.
3396 */
3399{
3400 isl_basic_set_list *domain_list;
3401 isl_ast_graft_list *list = NULL;
3402
3403 domain_list = compute_domains(executed, build);
3405
3406 isl_basic_set_list_free(domain_list);
3409
3410 return list;
3411}
3412
3413/* Generate code for a single component, after shifting (if any)
3414 * has been applied, in case the schedule was specified as a schedule tree
3415 * and the separate option was specified.
3416 *
3417 * We perform separation on the domain of "executed" and then generate
3418 * an AST for each of the resulting disjoint basic sets.
3419 */
3422{
3423 isl_space *space;
3424 isl_set *domain;
3425 isl_basic_set_list *domain_list;
3426 isl_ast_graft_list *list;
3427
3428 space = isl_ast_build_get_space(build, 1);
3431 domain_list = isl_basic_set_list_from_set(domain);
3432
3434
3435 isl_basic_set_list_free(domain_list);
3438
3439 return list;
3440}
3441
3442/* Internal data structure for generate_shifted_component_tree_unroll.
3443 *
3444 * "executed" and "build" are inputs to generate_shifted_component_tree_unroll.
3445 * "list" collects the constructs grafts.
3446 */
3452
3453/* Initialize data->list to a list of "n" elements.
3454 */
3455static int init_unroll_tree(int n, void *user)
3456{
3457 struct isl_ast_unroll_tree_data *data = user;
3458 isl_ctx *ctx;
3459
3460 ctx = isl_ast_build_get_ctx(data->build);
3461 data->list = isl_ast_graft_list_alloc(ctx, n);
3462
3463 return 0;
3464}
3465
3466/* Given an iteration of an unrolled domain represented by "bset",
3467 * generate the corresponding AST and add the result to data->list.
3468 */
3470{
3471 struct isl_ast_unroll_tree_data *data = user;
3472
3473 data->list = add_node(data->list, isl_union_map_copy(data->executed),
3474 bset, isl_ast_build_copy(data->build));
3475
3476 return 0;
3477}
3478
3479/* Generate code for a single component, after shifting (if any)
3480 * has been applied, in case the schedule was specified as a schedule tree
3481 * and the unroll option was specified.
3482 *
3483 * We call foreach_iteration to iterate over the individual values and
3484 * construct and collect the corresponding grafts in do_unroll_tree_iteration.
3485 */
3489{
3490 struct isl_ast_unroll_tree_data data = { executed, build, NULL };
3491
3493 &do_unroll_tree_iteration, &data) < 0)
3494 data.list = isl_ast_graft_list_free(data.list);
3495
3498
3499 return data.list;
3500}
3501
3502/* Does "domain" involve a disjunction that is purely based on
3503 * constraints involving only outer dimension?
3504 *
3505 * In particular, is there a disjunction such that the constraints
3506 * involving the current and later dimensions are the same over
3507 * all the disjuncts?
3508 */
3511{
3513 isl_set *shared, *inner;
3515 isl_size depth;
3516 isl_size n;
3517 isl_size dim;
3518
3520 if (n < 0)
3521 return isl_bool_error;
3522 if (n <= 1)
3523 return isl_bool_false;
3526 if (dim < 0 || depth < 0)
3527 return isl_bool_error;
3528
3529 inner = isl_set_copy(domain);
3531 isl_dim_set, depth, dim - depth);
3533 shared = isl_set_from_basic_set(hull);
3534 equal = isl_set_plain_is_equal(inner, shared);
3535 isl_set_free(inner);
3536 isl_set_free(shared);
3537
3538 return equal;
3539}
3540
3541/* Generate code for a single component, after shifting (if any)
3542 * has been applied, in case the schedule was specified as a schedule tree.
3543 * In particular, handle the base case where there is either no isolated
3544 * set or we are within the isolated set (in which case "isolated" is set)
3545 * or the iterations that precede or follow the isolated set.
3546 *
3547 * The schedule domain is broken up or combined into basic sets
3548 * according to the AST generation option specified in the current
3549 * schedule node, which may be either atomic, separate, unroll or
3550 * unspecified. If the option is unspecified, then we currently simply
3551 * split the schedule domain into disjoint basic sets.
3552 *
3553 * In case the separate option is specified, the AST generation is
3554 * handled by generate_shifted_component_tree_separate.
3555 * In the other cases, we need the global schedule domain.
3556 * In the unroll case, the AST generation is then handled by
3557 * generate_shifted_component_tree_unroll which needs the actual
3558 * schedule domain (with divs that may refer to the current dimension)
3559 * so that stride detection can be performed.
3560 * In the atomic or unspecified case, inner dimensions and divs involving
3561 * the current dimensions should be eliminated.
3562 * The result is then either combined into a single basic set or
3563 * split up into disjoint basic sets.
3564 * Finally an AST is generated for each basic set and the results are
3565 * concatenated.
3566 *
3567 * If the schedule domain involves a disjunction that is purely based on
3568 * constraints involving only outer dimension, then it is treated as
3569 * if atomic was specified. This ensures that only a single loop
3570 * is generated instead of a sequence of identical loops with
3571 * different guards.
3572 */
3575 int isolated)
3576{
3577 isl_bool outer_disjunction;
3578 isl_union_set *schedule_domain;
3579 isl_set *domain;
3580 isl_basic_set_list *domain_list;
3581 isl_ast_graft_list *list;
3583
3585 if (type < 0)
3586 goto error;
3587
3590 build);
3591
3593 domain = isl_set_from_union_set(schedule_domain);
3594
3597 build);
3598
3601
3602 outer_disjunction = has_pure_outer_disjunction(domain, build);
3603 if (outer_disjunction < 0)
3605
3606 if (outer_disjunction || type == isl_ast_loop_atomic) {
3609 domain_list = isl_basic_set_list_from_basic_set(hull);
3610 } else {
3612 domain_list = isl_basic_set_list_from_set(domain);
3613 }
3614
3616
3617 isl_basic_set_list_free(domain_list);
3620
3621 return list;
3622error:
3625 return NULL;
3626}
3627
3628/* Extract out the disjunction imposed by "domain" on the outer
3629 * schedule dimensions.
3630 *
3631 * In particular, remove all inner dimensions from "domain" (including
3632 * the current dimension) and then remove the constraints that are shared
3633 * by all disjuncts in the result.
3634 */
3655
3656/* Add "guard" to the grafts in "list".
3657 * "build" is the outer AST build, while "sub_build" includes "guard"
3658 * in its generated domain.
3659 *
3660 * First combine the grafts into a single graft and then add the guard.
3661 * If the list is empty, or if some error occurred, then simply return
3662 * the list.
3663 */
3664static __isl_give isl_ast_graft_list *list_add_guard(
3665 __isl_take isl_ast_graft_list *list, __isl_keep isl_set *guard,
3667{
3668 isl_ast_graft *graft;
3669 isl_size n;
3670
3671 list = isl_ast_graft_list_fuse(list, sub_build);
3672
3673 n = isl_ast_graft_list_n_ast_graft(list);
3674 if (n < 0)
3675 return isl_ast_graft_list_free(list);
3676 if (n != 1)
3677 return list;
3678
3679 graft = isl_ast_graft_list_get_ast_graft(list, 0);
3680 graft = isl_ast_graft_add_guard(graft, isl_set_copy(guard), build);
3681 list = isl_ast_graft_list_set_ast_graft(list, 0, graft);
3682
3683 return list;
3684}
3685
3686/* Generate code for a single component, after shifting (if any)
3687 * has been applied, in case the schedule was specified as a schedule tree.
3688 * In particular, do so for the specified subset of the schedule domain.
3689 *
3690 * If we are outside of the isolated part, then "domain" may include
3691 * a disjunction. Explicitly generate this disjunction at this point
3692 * instead of relying on the disjunction getting hoisted back up
3693 * to this level.
3694 */
3697 __isl_keep isl_ast_build *build, int isolated)
3698{
3699 isl_union_set *uset;
3700 isl_ast_graft_list *list;
3701 isl_ast_build *sub_build;
3702 int empty;
3703
3708 if (empty < 0)
3709 goto error;
3710 if (empty) {
3711 isl_ctx *ctx;
3715 return isl_ast_graft_list_alloc(ctx, 0);
3716 }
3717
3718 sub_build = isl_ast_build_copy(build);
3719 if (!isolated) {
3721 sub_build = isl_ast_build_restrict_generated(sub_build,
3723 }
3725 isl_ast_build_copy(sub_build), isolated);
3726 if (!isolated)
3727 list = list_add_guard(list, domain, build, sub_build);
3728 isl_ast_build_free(sub_build);
3730 return list;
3731error:
3734 return NULL;
3735}
3736
3737/* Generate code for a single component, after shifting (if any)
3738 * has been applied, in case the schedule was specified as a schedule tree.
3739 * In particular, do so for the specified sequence of subsets
3740 * of the schedule domain, "before", "isolated", "after" and "other",
3741 * where only the "isolated" part is considered to be isolated.
3742 */
3745 __isl_take isl_set *isolated, __isl_take isl_set *after,
3747{
3748 isl_ast_graft_list *list, *res;
3749
3752 build, 1);
3753 res = isl_ast_graft_list_concat(res, list);
3755 res = isl_ast_graft_list_concat(res, list);
3757 res = isl_ast_graft_list_concat(res, list);
3758
3761
3762 return res;
3763}
3764
3765/* Does "set" intersect "first", but not "second"?
3766 */
3768 __isl_keep isl_set *first, __isl_keep isl_set *second)
3769{
3770 isl_bool disjoint;
3771
3772 disjoint = isl_set_is_disjoint(set, first);
3773 if (disjoint < 0)
3774 return isl_bool_error;
3775 if (disjoint)
3776 return isl_bool_false;
3777
3778 return isl_set_is_disjoint(set, second);
3779}
3780
3781/* Generate code for a single component, after shifting (if any)
3782 * has been applied, in case the schedule was specified as a schedule tree.
3783 * In particular, do so in case of isolation where there is
3784 * only an "isolated" part and an "after" part.
3785 * "dead1" and "dead2" are freed by this function in order to simplify
3786 * the caller.
3787 *
3788 * The "before" and "other" parts are set to empty sets.
3789 */
3793 __isl_take isl_set *dead1, __isl_take isl_set *dead2)
3794{
3795 isl_set *empty;
3796
3797 empty = isl_set_empty(isl_set_get_space(after));
3798 isl_set_free(dead1);
3799 isl_set_free(dead2);
3801 isolated, after, empty, build);
3802}
3803
3804/* Generate code for a single component, after shifting (if any)
3805 * has been applied, in case the schedule was specified as a schedule tree.
3806 *
3807 * We first check if the user has specified an isolated schedule domain
3808 * and that we are not already outside of this isolated schedule domain.
3809 * If so, we break up the schedule domain into iterations that
3810 * precede the isolated domain, the isolated domain itself,
3811 * the iterations that follow the isolated domain and
3812 * the remaining iterations (those that are incomparable
3813 * to the isolated domain).
3814 * We generate an AST for each piece and concatenate the results.
3815 *
3816 * If the isolated domain is not convex, then it is replaced
3817 * by a convex superset to ensure that the sets of preceding and
3818 * following iterations are properly defined and, in particular,
3819 * that there are no intermediate iterations that do not belong
3820 * to the isolated domain.
3821 *
3822 * In the special case where at least one element of the schedule
3823 * domain that does not belong to the isolated domain needs
3824 * to be scheduled after this isolated domain, but none of those
3825 * elements need to be scheduled before, break up the schedule domain
3826 * in only two parts, the isolated domain, and a part that will be
3827 * scheduled after the isolated domain.
3828 *
3829 * If no isolated set has been specified, then we generate an
3830 * AST for the entire inverse schedule.
3831 */
3834{
3835 int i;
3836 isl_size depth;
3837 int empty, has_isolate;
3838 isl_space *space;
3839 isl_union_set *schedule_domain;
3840 isl_set *domain;
3842 isl_set *isolated, *before, *after, *test;
3843 isl_map *gt, *lt;
3844 isl_bool pure;
3845
3847 has_isolate = isl_ast_build_has_isolated(build);
3848 if (has_isolate < 0)
3850 else if (!has_isolate)
3852
3854 domain = isl_set_from_union_set(schedule_domain);
3855
3857 isolated = isl_set_intersect(isolated, isl_set_copy(domain));
3859 empty = isl_set_is_empty(test);
3861 if (empty < 0)
3862 goto error;
3863 if (empty) {
3864 isl_set_free(isolated);
3867 }
3869 if (depth < 0)
3870 goto error;
3871
3872 isolated = isl_ast_build_eliminate(build, isolated);
3874 isolated = isl_set_from_basic_set(hull);
3875
3876 space = isl_space_map_from_set(isl_set_get_space(isolated));
3877 gt = isl_map_universe(space);
3878 for (i = 0; i < depth; ++i)
3879 gt = isl_map_equate(gt, isl_dim_in, i, isl_dim_out, i);
3880 gt = isl_map_order_gt(gt, isl_dim_in, depth, isl_dim_out, depth);
3881 lt = isl_map_reverse(isl_map_copy(gt));
3882 before = isl_set_apply(isl_set_copy(isolated), gt);
3883 after = isl_set_apply(isl_set_copy(isolated), lt);
3884
3886 pure = only_intersects_first(domain, after, before);
3887 if (pure < 0)
3889 else if (pure)
3891 domain, build, before, after);
3894 after = isl_set_subtract(after, isl_set_copy(isolated));
3895 after = isl_set_subtract(after, isl_set_copy(before));
3897
3899 after, domain, build);
3900error:
3902 isl_set_free(isolated);
3905 return NULL;
3906}
3907
3908/* Generate code for a single component, after shifting (if any)
3909 * has been applied.
3910 *
3911 * Call generate_shifted_component_tree or generate_shifted_component_flat
3912 * depending on whether the schedule was specified as a schedule tree.
3913 */
3922
3927
3928/* Given an array "domain" of isl_set_map_pairs and an array "order"
3929 * of indices into the "domain" array,
3930 * return the union of the "map" fields of the elements
3931 * indexed by the first "n" elements of "order".
3932 */
3934 struct isl_set_map_pair *domain, int *order, int n)
3935{
3936 int i;
3937 isl_map *map;
3938 isl_union_map *executed;
3939
3940 map = isl_map_copy(domain[order[0]].map);
3941 executed = isl_union_map_from_map(map);
3942 for (i = 1; i < n; ++i) {
3943 map = isl_map_copy(domain[order[i]].map);
3944 executed = isl_union_map_add_map(executed, map);
3945 }
3946
3947 return executed;
3948}
3949
3950/* Generate code for a single component, after shifting (if any)
3951 * has been applied.
3952 *
3953 * The component inverse schedule is specified as the "map" fields
3954 * of the elements of "domain" indexed by the first "n" elements of "order".
3955 */
3957 struct isl_set_map_pair *domain, int *order, int n,
3959{
3960 isl_union_map *executed;
3961
3962 executed = construct_component_executed(domain, order, n);
3963 return generate_shifted_component(executed, build);
3964}
3965
3966/* Does set dimension "pos" of "set" have an obviously fixed value?
3967 */
3969{
3970 int fixed;
3971 isl_val *v;
3972
3974 if (!v)
3975 return -1;
3976 fixed = !isl_val_is_nan(v);
3977 isl_val_free(v);
3978
3979 return fixed;
3980}
3981
3982/* Given an array "domain" of isl_set_map_pairs and an array "order"
3983 * of indices into the "domain" array,
3984 * do all (except for at most one) of the "set" field of the elements
3985 * indexed by the first "n" elements of "order" have a fixed value
3986 * at position "depth"?
3987 */
3989 int *order, int n, int depth)
3990{
3991 int i;
3992 int non_fixed = -1;
3993
3994 for (i = 0; i < n; ++i) {
3995 int f;
3996
3997 f = dim_is_fixed(domain[order[i]].set, depth);
3998 if (f < 0)
3999 return -1;
4000 if (f)
4001 continue;
4002 if (non_fixed >= 0)
4003 return 0;
4004 non_fixed = i;
4005 }
4006
4007 return 1;
4008}
4009
4010/* Given an array "domain" of isl_set_map_pairs and an array "order"
4011 * of indices into the "domain" array,
4012 * eliminate the inner dimensions from the "set" field of the elements
4013 * indexed by the first "n" elements of "order", provided the current
4014 * dimension does not have a fixed value.
4015 *
4016 * Return the index of the first element in "order" with a corresponding
4017 * "set" field that does not have an (obviously) fixed value.
4018 */
4020 int *order, int n, int depth, __isl_keep isl_ast_build *build)
4021{
4022 int i;
4023 int base = -1;
4024
4025 for (i = n - 1; i >= 0; --i) {
4026 int f;
4027 f = dim_is_fixed(domain[order[i]].set, depth);
4028 if (f < 0)
4029 return -1;
4030 if (f)
4031 continue;
4032 domain[order[i]].set = isl_ast_build_eliminate_inner(build,
4033 domain[order[i]].set);
4034 base = i;
4035 }
4036
4037 return base;
4038}
4039
4040/* Given an array "domain" of isl_set_map_pairs and an array "order"
4041 * of indices into the "domain" array,
4042 * find the element of "domain" (amongst those indexed by the first "n"
4043 * elements of "order") with the "set" field that has the smallest
4044 * value for the current iterator.
4045 *
4046 * Note that the domain with the smallest value may depend on the parameters
4047 * and/or outer loop dimension. Since the result of this function is only
4048 * used as heuristic, we only make a reasonable attempt at finding the best
4049 * domain, one that should work in case a single domain provides the smallest
4050 * value for the current dimension over all values of the parameters
4051 * and outer dimensions.
4052 *
4053 * In particular, we compute the smallest value of the first domain
4054 * and replace it by that of any later domain if that later domain
4055 * has a smallest value that is smaller for at least some value
4056 * of the parameters and outer dimensions.
4057 */
4058static int first_offset(struct isl_set_map_pair *domain, int *order, int n,
4060{
4061 int i;
4062 isl_map *min_first;
4063 int first = 0;
4064
4065 min_first = isl_ast_build_map_to_iterator(build,
4066 isl_set_copy(domain[order[0]].set));
4067 min_first = isl_map_lexmin(min_first);
4068
4069 for (i = 1; i < n; ++i) {
4070 isl_map *min, *test;
4071 int empty;
4072
4074 isl_set_copy(domain[order[i]].set));
4075 min = isl_map_lexmin(min);
4076 test = isl_map_copy(min);
4079 empty = isl_map_is_empty(test);
4081 if (empty >= 0 && !empty) {
4082 isl_map_free(min_first);
4083 first = i;
4084 min_first = min;
4085 } else
4086 isl_map_free(min);
4087
4088 if (empty < 0)
4089 break;
4090 }
4091
4092 isl_map_free(min_first);
4093
4094 return i < n ? -1 : first;
4095}
4096
4097/* Construct a shifted inverse schedule based on the original inverse schedule,
4098 * the stride and the offset.
4099 *
4100 * The original inverse schedule is specified as the "map" fields
4101 * of the elements of "domain" indexed by the first "n" elements of "order".
4102 *
4103 * "stride" and "offset" are such that the difference
4104 * between the values of the current dimension of domain "i"
4105 * and the values of the current dimension for some reference domain are
4106 * equal to
4107 *
4108 * stride * integer + offset[i]
4109 *
4110 * Moreover, 0 <= offset[i] < stride.
4111 *
4112 * For each domain, we create a map
4113 *
4114 * { [..., j, ...] -> [..., j - offset[i], offset[i], ....] }
4115 *
4116 * where j refers to the current dimension and the other dimensions are
4117 * unchanged, and apply this map to the original schedule domain.
4118 *
4119 * For example, for the original schedule
4120 *
4121 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
4122 *
4123 * and assuming the offset is 0 for the A domain and 1 for the B domain,
4124 * we apply the mapping
4125 *
4126 * { [j] -> [j, 0] }
4127 *
4128 * to the schedule of the "A" domain and the mapping
4129 *
4130 * { [j - 1] -> [j, 1] }
4131 *
4132 * to the schedule of the "B" domain.
4133 *
4134 *
4135 * Note that after the transformation, the differences between pairs
4136 * of values of the current dimension over all domains are multiples
4137 * of stride and that we have therefore exposed the stride.
4138 *
4139 *
4140 * To see that the mapping preserves the lexicographic order,
4141 * first note that each of the individual maps above preserves the order.
4142 * If the value of the current iterator is j1 in one domain and j2 in another,
4143 * then if j1 = j2, we know that the same map is applied to both domains
4144 * and the order is preserved.
4145 * Otherwise, let us assume, without loss of generality, that j1 < j2.
4146 * If c1 >= c2 (with c1 and c2 the corresponding offsets), then
4147 *
4148 * j1 - c1 < j2 - c2
4149 *
4150 * and the order is preserved.
4151 * If c1 < c2, then we know
4152 *
4153 * 0 <= c2 - c1 < s
4154 *
4155 * We also have
4156 *
4157 * j2 - j1 = n * s + r
4158 *
4159 * with n >= 0 and 0 <= r < s.
4160 * In other words, r = c2 - c1.
4161 * If n > 0, then
4162 *
4163 * j1 - c1 < j2 - c2
4164 *
4165 * If n = 0, then
4166 *
4167 * j1 - c1 = j2 - c2
4168 *
4169 * and so
4170 *
4171 * (j1 - c1, c1) << (j2 - c2, c2)
4172 *
4173 * with "<<" the lexicographic order, proving that the order is preserved
4174 * in all cases.
4175 */
4177 struct isl_set_map_pair *domain, int *order, int n,
4180{
4181 int i;
4182 isl_union_map *executed;
4183 isl_space *space;
4184 isl_map *map;
4185 isl_size depth;
4186 isl_constraint *c;
4187
4188 depth = isl_ast_build_get_depth(build);
4189 if (depth < 0)
4190 return NULL;
4191 space = isl_ast_build_get_space(build, 1);
4192 executed = isl_union_map_empty(isl_space_copy(space));
4193 space = isl_space_map_from_set(space);
4195 map = isl_map_eliminate(map, isl_dim_out, depth, 1);
4196 map = isl_map_insert_dims(map, isl_dim_out, depth + 1, 1);
4197 space = isl_space_insert_dims(space, isl_dim_out, depth + 1, 1);
4198
4202
4203 for (i = 0; i < n; ++i) {
4204 isl_map *map_i;
4205 isl_val *v;
4206
4207 v = isl_multi_val_get_val(offset, i);
4208 if (!v)
4209 break;
4210 map_i = isl_map_copy(map);
4211 map_i = isl_map_fix_val(map_i, isl_dim_out, depth + 1,
4212 isl_val_copy(v));
4213 v = isl_val_neg(v);
4216
4217 map_i = isl_map_apply_domain(isl_map_copy(domain[order[i]].map),
4218 map_i);
4219 executed = isl_union_map_add_map(executed, map_i);
4220 }
4221
4224
4225 if (i < n)
4226 executed = isl_union_map_free(executed);
4227
4228 return executed;
4229}
4230
4231/* Generate code for a single component, after exposing the stride,
4232 * given that the schedule domain is "shifted strided".
4233 *
4234 * The component inverse schedule is specified as the "map" fields
4235 * of the elements of "domain" indexed by the first "n" elements of "order".
4236 *
4237 * The schedule domain being "shifted strided" means that the differences
4238 * between the values of the current dimension of domain "i"
4239 * and the values of the current dimension for some reference domain are
4240 * equal to
4241 *
4242 * stride * integer + offset[i]
4243 *
4244 * We first look for the domain with the "smallest" value for the current
4245 * dimension and adjust the offsets such that the offset of the "smallest"
4246 * domain is equal to zero. The other offsets are reduced modulo stride.
4247 *
4248 * Based on this information, we construct a new inverse schedule in
4249 * construct_shifted_executed that exposes the stride.
4250 * Since this involves the introduction of a new schedule dimension,
4251 * the build needs to be changed accordingly.
4252 * After computing the AST, the newly introduced dimension needs
4253 * to be removed again from the list of grafts. We do this by plugging
4254 * in a mapping that represents the new schedule domain in terms of the
4255 * old schedule domain.
4256 */
4257static __isl_give isl_ast_graft_list *generate_shift_component(
4258 struct isl_set_map_pair *domain, int *order, int n,
4261{
4262 isl_ast_graft_list *list;
4263 int first;
4264 isl_size depth;
4265 isl_val *val;
4266 isl_multi_val *mv;
4267 isl_space *space;
4268 isl_multi_aff *ma, *zero;
4269 isl_union_map *executed;
4270
4271 depth = isl_ast_build_get_depth(build);
4272
4273 first = first_offset(domain, order, n, build);
4274 if (depth < 0 || first < 0)
4275 goto error;
4276
4277 mv = isl_multi_val_copy(offset);
4278 val = isl_multi_val_get_val(offset, first);
4279 val = isl_val_neg(val);
4280 mv = isl_multi_val_add_val(mv, val);
4281 mv = isl_multi_val_mod_val(mv, isl_val_copy(stride));
4282
4283 executed = construct_shifted_executed(domain, order, n, stride, mv,
4284 build);
4285 space = isl_ast_build_get_space(build, 1);
4286 space = isl_space_map_from_set(space);
4287 ma = isl_multi_aff_identity(isl_space_copy(space));
4289 space = isl_space_add_dims(space, isl_dim_out, 1);
4290 zero = isl_multi_aff_zero(space);
4291 ma = isl_multi_aff_range_splice(ma, depth + 1, zero);
4292 build = isl_ast_build_insert_dim(build, depth + 1);
4293 list = generate_shifted_component(executed, build);
4294
4296
4297 isl_multi_val_free(mv);
4298
4299 return list;
4300error:
4301 isl_ast_build_free(build);
4302 return NULL;
4303}
4304
4305/* Does any node in the schedule tree rooted at the current schedule node
4306 * of "build" depend on outer schedule nodes?
4307 */
4309{
4310 isl_schedule_node *node;
4311 int dependent = 0;
4312
4313 node = isl_ast_build_get_schedule_node(build);
4314 dependent = isl_schedule_node_is_subtree_anchored(node);
4316
4317 return dependent;
4318}
4319
4320/* Generate code for a single component.
4321 *
4322 * The component inverse schedule is specified as the "map" fields
4323 * of the elements of "domain" indexed by the first "n" elements of "order".
4324 *
4325 * This function may modify the "set" fields of "domain".
4326 *
4327 * Before proceeding with the actual code generation for the component,
4328 * we first check if there are any "shifted" strides, meaning that
4329 * the schedule domains of the individual domains are all strided,
4330 * but that they have different offsets, resulting in the union
4331 * of schedule domains not being strided anymore.
4332 *
4333 * The simplest example is the schedule
4334 *
4335 * { A[i] -> [2i]: 0 <= i < 10; B[i] -> [2i+1] : 0 <= i < 10 }
4336 *
4337 * Both schedule domains are strided, but their union is not.
4338 * This function detects such cases and then rewrites the schedule to
4339 *
4340 * { A[i] -> [2i, 0]: 0 <= i < 10; B[i] -> [2i, 1] : 0 <= i < 10 }
4341 *
4342 * In the new schedule, the schedule domains have the same offset (modulo
4343 * the stride), ensuring that the union of schedule domains is also strided.
4344 *
4345 *
4346 * If there is only a single domain in the component, then there is
4347 * nothing to do. Similarly, if the current schedule dimension has
4348 * a fixed value for almost all domains then there is nothing to be done.
4349 * In particular, we need at least two domains where the current schedule
4350 * dimension does not have a fixed value.
4351 * Finally, in case of a schedule map input,
4352 * if any of the options refer to the current schedule dimension,
4353 * then we bail out as well. It would be possible to reformulate the options
4354 * in terms of the new schedule domain, but that would introduce constraints
4355 * that separate the domains in the options and that is something we would
4356 * like to avoid.
4357 * In the case of a schedule tree input, we bail out if any of
4358 * the descendants of the current schedule node refer to outer
4359 * schedule nodes in any way.
4360 *
4361 *
4362 * To see if there is any shifted stride, we look at the differences
4363 * between the values of the current dimension in pairs of domains
4364 * for equal values of outer dimensions. These differences should be
4365 * of the form
4366 *
4367 * m x + r
4368 *
4369 * with "m" the stride and "r" a constant. Note that we cannot perform
4370 * this analysis on individual domains as the lower bound in each domain
4371 * may depend on parameters or outer dimensions and so the current dimension
4372 * itself may not have a fixed remainder on division by the stride.
4373 *
4374 * In particular, we compare the first domain that does not have an
4375 * obviously fixed value for the current dimension to itself and all
4376 * other domains and collect the offsets and the gcd of the strides.
4377 * If the gcd becomes one, then we failed to find shifted strides.
4378 * If the gcd is zero, then the differences were all fixed, meaning
4379 * that some domains had non-obviously fixed values for the current dimension.
4380 * If all the offsets are the same (for those domains that do not have
4381 * an obviously fixed value for the current dimension), then we do not
4382 * apply the transformation.
4383 * If none of the domains were skipped, then there is nothing to do.
4384 * If some of them were skipped, then if we apply separation, the schedule
4385 * domain should get split in pieces with a (non-shifted) stride.
4386 *
4387 * Otherwise, we apply a shift to expose the stride in
4388 * generate_shift_component.
4389 */
4390static __isl_give isl_ast_graft_list *generate_component(
4391 struct isl_set_map_pair *domain, int *order, int n,
4393{
4394 int i, d;
4395 isl_size depth;
4396 isl_ctx *ctx;
4397 isl_map *map;
4398 isl_set *deltas;
4399 isl_val *gcd = NULL;
4400 isl_multi_val *mv;
4401 int fixed, skip;
4402 int base;
4403 isl_ast_graft_list *list;
4404 int res = 0;
4405
4406 depth = isl_ast_build_get_depth(build);
4407 if (depth < 0)
4408 goto error;
4409
4410 skip = n == 1;
4411 if (skip >= 0 && !skip)
4412 skip = at_most_one_non_fixed(domain, order, n, depth);
4413 if (skip >= 0 && !skip) {
4415 skip = has_anchored_subtree(build);
4416 else
4418 }
4419 if (skip < 0)
4420 goto error;
4421 if (skip)
4423 order, n, build);
4424
4425 base = eliminate_non_fixed(domain, order, n, depth, build);
4426 if (base < 0)
4427 goto error;
4428
4429 ctx = isl_ast_build_get_ctx(build);
4430
4431 mv = isl_multi_val_zero(isl_space_set_alloc(ctx, 0, n));
4432
4433 fixed = 1;
4434 for (i = 0; i < n; ++i) {
4435 isl_val *r, *m;
4436
4438 isl_set_copy(domain[order[base]].set),
4439 isl_set_copy(domain[order[i]].set));
4440 for (d = 0; d < depth; ++d)
4442 isl_dim_out, d);
4443 deltas = isl_map_deltas(map);
4444 res = isl_set_dim_residue_class_val(deltas, depth, &m, &r);
4445 isl_set_free(deltas);
4446 if (res < 0)
4447 break;
4448
4449 if (i == 0)
4450 gcd = m;
4451 else
4452 gcd = isl_val_gcd(gcd, m);
4453 if (isl_val_is_one(gcd)) {
4454 isl_val_free(r);
4455 break;
4456 }
4457 mv = isl_multi_val_set_val(mv, i, r);
4458
4459 res = dim_is_fixed(domain[order[i]].set, depth);
4460 if (res < 0)
4461 break;
4462 if (res)
4463 continue;
4464
4465 if (fixed && i > base) {
4466 isl_val *a, *b;
4467 a = isl_multi_val_get_val(mv, i);
4468 b = isl_multi_val_get_val(mv, base);
4469 if (isl_val_ne(a, b))
4470 fixed = 0;
4471 isl_val_free(a);
4472 isl_val_free(b);
4473 }
4474 }
4475
4476 if (res < 0 || !gcd) {
4477 isl_ast_build_free(build);
4478 list = NULL;
4479 } else if (i < n || fixed || isl_val_is_zero(gcd)) {
4481 order, n, build);
4482 } else {
4483 list = generate_shift_component(domain, order, n, gcd, mv,
4484 build);
4485 }
4486
4488 isl_multi_val_free(mv);
4489
4490 return list;
4491error:
4492 isl_ast_build_free(build);
4493 return NULL;
4494}
4495
4496/* Store both "map" itself and its domain in the
4497 * structure pointed to by *next and advance to the next array element.
4498 */
4500{
4501 struct isl_set_map_pair **next = user;
4502
4503 (*next)->map = isl_map_copy(map);
4504 (*next)->set = isl_map_domain(map);
4505 (*next)++;
4506
4507 return isl_stat_ok;
4508}
4509
4512
4513/* Is any domain element of "umap" scheduled after any of
4514 * the corresponding image elements by the tree rooted at
4515 * the child of "node"?
4516 */
4519{
4520 isl_schedule_node *child;
4521 isl_bool after;
4522
4523 child = isl_schedule_node_get_child(node, 0);
4524 after = after_in_tree(umap, child);
4526
4527 return after;
4528}
4529
4530/* Is any domain element of "umap" scheduled after any of
4531 * the corresponding image elements by the tree rooted at
4532 * the band node "node"?
4533 *
4534 * We first check if any domain element is scheduled after any
4535 * of the corresponding image elements by the band node itself.
4536 * If not, we restrict "map" to those pairs of element that
4537 * are scheduled together by the band node and continue with
4538 * the child of the band node.
4539 * If there are no such pairs then the map passed to after_in_child
4540 * will be empty causing it to return 0.
4541 */
4544{
4546 isl_union_map *partial, *test, *gt, *universe, *umap1, *umap2;
4548 isl_space *space;
4549 isl_bool empty;
4550 isl_bool after;
4551 isl_size n;
4552
4554 if (n < 0)
4555 return isl_bool_error;
4556 if (n == 0)
4557 return after_in_child(umap, node);
4558
4560 space = isl_multi_union_pw_aff_get_space(mupa);
4562 test = isl_union_map_copy(umap);
4569
4570 if (empty < 0 || !empty) {
4571 isl_union_map_free(partial);
4572 return isl_bool_not(empty);
4573 }
4574
4578 umap1 = isl_union_map_copy(partial);
4579 umap1 = isl_union_map_intersect_domain(umap1, domain);
4580 umap2 = isl_union_map_intersect_domain(partial, range);
4583 after = after_in_child(test, node);
4585 return after;
4586}
4587
4588/* Is any domain element of "umap" scheduled after any of
4589 * the corresponding image elements by the tree rooted at
4590 * the context node "node"?
4591 *
4592 * The context constraints apply to the schedule domain,
4593 * so we cannot apply them directly to "umap", which contains
4594 * pairs of statement instances. Instead, we add them
4595 * to the range of the prefix schedule for both domain and
4596 * range of "umap".
4597 */
4600{
4601 isl_union_map *prefix, *universe, *umap1, *umap2;
4604 isl_bool after;
4605
4606 umap = isl_union_map_copy(umap);
4612 umap1 = isl_union_map_copy(prefix);
4613 umap1 = isl_union_map_intersect_domain(umap1, domain);
4614 umap2 = isl_union_map_intersect_domain(prefix, range);
4615 umap1 = isl_union_map_intersect_range(umap1,
4617 umap1 = isl_union_map_apply_range(umap1, isl_union_map_reverse(umap2));
4618 umap = isl_union_map_intersect(umap, umap1);
4619
4620 after = after_in_child(umap, node);
4621
4622 isl_union_map_free(umap);
4623
4624 return after;
4625}
4626
4627/* Is any domain element of "umap" scheduled after any of
4628 * the corresponding image elements by the tree rooted at
4629 * the expansion node "node"?
4630 *
4631 * We apply the expansion to domain and range of "umap" and
4632 * continue with its child.
4633 */
4636{
4637 isl_union_map *expansion;
4638 isl_bool after;
4639
4641 umap = isl_union_map_copy(umap);
4642 umap = isl_union_map_apply_domain(umap, isl_union_map_copy(expansion));
4643 umap = isl_union_map_apply_range(umap, expansion);
4644
4645 after = after_in_child(umap, node);
4646
4647 isl_union_map_free(umap);
4648
4649 return after;
4650}
4651
4652/* Is any domain element of "umap" scheduled after any of
4653 * the corresponding image elements by the tree rooted at
4654 * the extension node "node"?
4655 *
4656 * Since the extension node may add statement instances before or
4657 * after the pairs of statement instances in "umap", we return isl_bool_true
4658 * to ensure that these pairs are not broken up.
4659 */
4665
4666/* Is any domain element of "umap" scheduled after any of
4667 * the corresponding image elements by the tree rooted at
4668 * the filter node "node"?
4669 *
4670 * We intersect domain and range of "umap" with the filter and
4671 * continue with its child.
4672 */
4675{
4676 isl_union_set *filter;
4677 isl_bool after;
4678
4679 umap = isl_union_map_copy(umap);
4682 umap = isl_union_map_intersect_range(umap, filter);
4683
4684 after = after_in_child(umap, node);
4685
4686 isl_union_map_free(umap);
4687
4688 return after;
4689}
4690
4691/* Is any domain element of "umap" scheduled after any of
4692 * the corresponding image elements by the tree rooted at
4693 * the set node "node"?
4694 *
4695 * This is only the case if this condition holds in any
4696 * of the (filter) children of the set node.
4697 * In particular, if the domain and the range of "umap"
4698 * are contained in different children, then the condition
4699 * does not hold.
4700 */
4703{
4704 int i;
4705 isl_size n;
4706
4708 if (n < 0)
4709 return isl_bool_error;
4710 for (i = 0; i < n; ++i) {
4711 isl_schedule_node *child;
4712 isl_bool after;
4713
4714 child = isl_schedule_node_get_child(node, i);
4715 after = after_in_tree(umap, child);
4717
4718 if (after < 0 || after)
4719 return after;
4720 }
4721
4722 return isl_bool_false;
4723}
4724
4725/* Return the filter of child "i" of "node".
4726 */
4728 __isl_keep isl_schedule_node *node, int i)
4729{
4730 isl_schedule_node *child;
4731 isl_union_set *filter;
4732
4733 child = isl_schedule_node_get_child(node, i);
4736
4737 return filter;
4738}
4739
4740/* Is any domain element of "umap" scheduled after any of
4741 * the corresponding image elements by the tree rooted at
4742 * the sequence node "node"?
4743 *
4744 * This happens in particular if any domain element is
4745 * contained in a later child than one containing a range element or
4746 * if the condition holds within a given child in the sequence.
4747 * The later part of the condition is checked by after_in_set.
4748 */
4751{
4752 int i, j;
4753 isl_size n;
4754 isl_union_map *umap_i;
4755 isl_bool empty;
4756 isl_bool after = isl_bool_false;
4757
4759 if (n < 0)
4760 return isl_bool_error;
4761 for (i = 1; i < n; ++i) {
4762 isl_union_set *filter_i;
4763
4764 umap_i = isl_union_map_copy(umap);
4765 filter_i = child_filter(node, i);
4766 umap_i = isl_union_map_intersect_domain(umap_i, filter_i);
4767 empty = isl_union_map_is_empty(umap_i);
4768 if (empty < 0)
4769 goto error;
4770 if (empty) {
4771 isl_union_map_free(umap_i);
4772 continue;
4773 }
4774
4775 for (j = 0; j < i; ++j) {
4776 isl_union_set *filter_j;
4777 isl_union_map *umap_ij;
4778
4779 umap_ij = isl_union_map_copy(umap_i);
4780 filter_j = child_filter(node, j);
4781 umap_ij = isl_union_map_intersect_range(umap_ij,
4782 filter_j);
4783 empty = isl_union_map_is_empty(umap_ij);
4784 isl_union_map_free(umap_ij);
4785
4786 if (empty < 0)
4787 goto error;
4788 if (!empty)
4789 after = isl_bool_true;
4790 if (after)
4791 break;
4792 }
4793
4794 isl_union_map_free(umap_i);
4795 if (after)
4796 break;
4797 }
4798
4799 if (after < 0 || after)
4800 return after;
4801
4802 return after_in_set(umap, node);
4803error:
4804 isl_union_map_free(umap_i);
4805 return isl_bool_error;
4806}
4807
4808/* Is any domain element of "umap" scheduled after any of
4809 * the corresponding image elements by the tree rooted at "node"?
4810 *
4811 * If "umap" is empty, then clearly there is no such element.
4812 * Otherwise, consider the different types of nodes separately.
4813 */
4816{
4817 isl_bool empty;
4819
4820 empty = isl_union_map_is_empty(umap);
4821 if (empty < 0)
4822 return isl_bool_error;
4823 if (empty)
4824 return isl_bool_false;
4825 if (!node)
4826 return isl_bool_error;
4827
4829 switch (type) {
4831 return isl_bool_error;
4833 return isl_bool_false;
4835 return after_in_band(umap, node);
4838 "unexpected internal domain node",
4839 return isl_bool_error);
4841 return after_in_context(umap, node);
4843 return after_in_expansion(umap, node);
4845 return after_in_extension(umap, node);
4847 return after_in_filter(umap, node);
4850 return after_in_child(umap, node);
4852 return after_in_set(umap, node);
4854 return after_in_sequence(umap, node);
4855 }
4856
4857 return isl_bool_true;
4858}
4859
4860/* Is any domain element of "map1" scheduled after any domain
4861 * element of "map2" by the subtree underneath the current band node,
4862 * while at the same time being scheduled together by the current
4863 * band node, i.e., by "map1" and "map2?
4864 *
4865 * If the child of the current band node is a leaf, then
4866 * no element can be scheduled after any other element.
4867 *
4868 * Otherwise, we construct a relation between domain elements
4869 * of "map1" and domain elements of "map2" that are scheduled
4870 * together and then check if the subtree underneath the current
4871 * band node determines their relative order.
4872 */
4875{
4876 isl_schedule_node *node;
4877 isl_map *map;
4878 isl_union_map *umap;
4879 isl_bool after;
4880
4881 node = isl_ast_build_get_schedule_node(build);
4882 if (!node)
4883 return isl_bool_error;
4884 node = isl_schedule_node_child(node, 0);
4887 return isl_bool_false;
4888 }
4892 after = after_in_tree(umap, node);
4893 isl_union_map_free(umap);
4895 return after;
4896}
4897
4898/* Internal data for any_scheduled_after.
4899 *
4900 * "build" is the build in which the AST is constructed.
4901 * "depth" is the number of loops that have already been generated
4902 * "group_coscheduled" is a local copy of options->ast_build_group_coscheduled
4903 * "domain" is an array of set-map pairs corresponding to the different
4904 * iteration domains. The set is the schedule domain, i.e., the domain
4905 * of the inverse schedule, while the map is the inverse schedule itself.
4906 */
4913
4914/* Is any element of domain "i" scheduled after any element of domain "j"
4915 * (for a common iteration of the first data->depth loops)?
4916 *
4917 * data->domain[i].set contains the domain of the inverse schedule
4918 * for domain "i", i.e., elements in the schedule domain.
4919 *
4920 * If we are inside a band of a schedule tree and there is a pair
4921 * of elements in the two domains that is schedule together by
4922 * the current band, then we check if any element of "i" may be schedule
4923 * after element of "j" by the descendants of the band node.
4924 *
4925 * If data->group_coscheduled is set, then we also return 1 if there
4926 * is any pair of elements in the two domains that are scheduled together.
4927 */
4928static isl_bool any_scheduled_after(int i, int j, void *user)
4929{
4930 struct isl_any_scheduled_after_data *data = user;
4931 isl_size dim = isl_set_dim(data->domain[i].set, isl_dim_set);
4932 int pos;
4933
4934 if (dim < 0)
4935 return isl_bool_error;
4936
4937 for (pos = data->depth; pos < dim; ++pos) {
4938 int follows;
4939
4940 follows = isl_set_follows_at(data->domain[i].set,
4941 data->domain[j].set, pos);
4942
4943 if (follows < -1)
4944 return isl_bool_error;
4945 if (follows > 0)
4946 return isl_bool_true;
4947 if (follows < 0)
4948 return isl_bool_false;
4949 }
4950
4952 isl_bool after;
4953
4954 after = after_in_subtree(data->build, data->domain[i].map,
4955 data->domain[j].map);
4956 if (after < 0 || after)
4957 return after;
4958 }
4959
4960 return isl_bool_ok(data->group_coscheduled);
4961}
4962
4963/* Look for independent components at the current depth and generate code
4964 * for each component separately. The resulting lists of grafts are
4965 * merged in an attempt to combine grafts with identical guards.
4966 *
4967 * Code for two domains can be generated separately if all the elements
4968 * of one domain are scheduled before (or together with) all the elements
4969 * of the other domain. We therefore consider the graph with as nodes
4970 * the domains and an edge between two nodes if any element of the first
4971 * node is scheduled after any element of the second node.
4972 * If the ast_build_group_coscheduled is set, then we also add an edge if
4973 * there is any pair of elements in the two domains that are scheduled
4974 * together.
4975 * Code is then generated (by generate_component)
4976 * for each of the strongly connected components in this graph
4977 * in their topological order.
4978 *
4979 * Since the test is performed on the domain of the inverse schedules of
4980 * the different domains, we precompute these domains and store
4981 * them in data.domain.
4982 */
4983static __isl_give isl_ast_graft_list *generate_components(
4985{
4986 int i;
4988 isl_size n = isl_union_map_n_map(executed);
4990 struct isl_any_scheduled_after_data data;
4991 struct isl_set_map_pair *next;
4992 struct isl_tarjan_graph *g = NULL;
4993 isl_ast_graft_list *list = NULL;
4994 int n_domain = 0;
4995
4996 data.domain = NULL;
4997 if (n < 0)
4998 goto error;
4999 data.domain = isl_calloc_array(ctx, struct isl_set_map_pair, n);
5000 if (!data.domain)
5001 goto error;
5002 n_domain = n;
5003
5004 next = data.domain;
5005 if (isl_union_map_foreach_map(executed, &extract_domain, &next) < 0)
5006 goto error;
5007
5008 depth = isl_ast_build_get_depth(build);
5009 if (depth < 0)
5010 goto error;
5011 data.build = build;
5012 data.depth = depth;
5014 g = isl_tarjan_graph_init(ctx, n, &any_scheduled_after, &data);
5015 if (!g)
5016 goto error;
5017
5018 list = isl_ast_graft_list_alloc(ctx, 0);
5019
5020 i = 0;
5021 while (list && n) {
5022 isl_ast_graft_list *list_c;
5023 int first = i;
5024
5025 if (g->order[i] == -1)
5026 isl_die(ctx, isl_error_internal, "cannot happen",
5027 goto error);
5028 ++i; --n;
5029 while (g->order[i] != -1) {
5030 ++i; --n;
5031 }
5032
5033 list_c = generate_component(data.domain,
5034 g->order + first, i - first,
5035 isl_ast_build_copy(build));
5036 list = isl_ast_graft_list_merge(list, list_c, build);
5037
5038 ++i;
5039 }
5040
5041 if (0)
5042error: list = isl_ast_graft_list_free(list);
5044 for (i = 0; i < n_domain; ++i) {
5045 isl_map_free(data.domain[i].map);
5046 isl_set_free(data.domain[i].set);
5047 }
5048 free(data.domain);
5049 isl_union_map_free(executed);
5050 isl_ast_build_free(build);
5051
5052 return list;
5053}
5054
5055/* Generate code for the next level (and all inner levels).
5056 *
5057 * If "executed" is empty, i.e., no code needs to be generated,
5058 * then we return an empty list.
5059 *
5060 * If we have already generated code for all loop levels, then we pass
5061 * control to generate_inner_level.
5062 *
5063 * If "executed" lives in a single space, i.e., if code needs to be
5064 * generated for a single domain, then there can only be a single
5065 * component and we go directly to generate_shifted_component.
5066 * Otherwise, we call generate_components to detect the components
5067 * and to call generate_component on each of them separately.
5068 */
5069static __isl_give isl_ast_graft_list *generate_next_level(
5071{
5072 isl_size depth;
5073 isl_size dim;
5074 isl_size n;
5075
5076 if (!build || !executed)
5077 goto error;
5078
5079 if (isl_union_map_is_empty(executed)) {
5080 isl_ctx *ctx = isl_ast_build_get_ctx(build);
5081 isl_union_map_free(executed);
5082 isl_ast_build_free(build);
5083 return isl_ast_graft_list_alloc(ctx, 0);
5084 }
5085
5086 depth = isl_ast_build_get_depth(build);
5087 dim = isl_ast_build_dim(build, isl_dim_set);
5088 if (depth < 0 || dim < 0)
5089 goto error;
5090 if (depth >= dim)
5091 return generate_inner_level(executed, build);
5092
5093 n = isl_union_map_n_map(executed);
5094 if (n < 0)
5095 goto error;
5096 if (n == 1)
5097 return generate_shifted_component(executed, build);
5098
5099 return generate_components(executed, build);
5100error:
5101 isl_union_map_free(executed);
5102 isl_ast_build_free(build);
5103 return NULL;
5104}
5105
5106/* Internal data structure used by isl_ast_build_node_from_schedule_map.
5107 * internal, executed and build are the inputs to generate_code.
5108 * list collects the output.
5109 */
5117
5118/* Given an inverse schedule in terms of the external build schedule, i.e.,
5119 *
5120 * [E -> S] -> D
5121 *
5122 * with E the external build schedule and S the additional schedule "space",
5123 * reformulate the inverse schedule in terms of the internal schedule domain,
5124 * i.e., return
5125 *
5126 * [I -> S] -> D
5127 *
5128 * We first obtain a mapping
5129 *
5130 * I -> E
5131 *
5132 * take the inverse and the product with S -> S, resulting in
5133 *
5134 * [I -> S] -> [E -> S]
5135 *
5136 * Applying the map to the input produces the desired result.
5137 */
5139 __isl_take isl_union_map *executed, __isl_keep isl_space *space,
5141{
5142 isl_map *id, *proj;
5143
5144 proj = isl_ast_build_get_schedule_map(build);
5145 proj = isl_map_reverse(proj);
5146 space = isl_space_map_from_set(isl_space_copy(space));
5147 id = isl_map_identity(space);
5148 proj = isl_map_product(proj, id);
5149 executed = isl_union_map_apply_domain(executed,
5151 return executed;
5152}
5153
5154/* Generate an AST that visits the elements in the range of data->executed
5155 * in the relative order specified by the corresponding domain element(s)
5156 * for those domain elements that belong to "set".
5157 * Add the result to data->list.
5158 *
5159 * The caller ensures that "set" is a universe domain.
5160 * "space" is the space of the additional part of the schedule.
5161 * It is equal to the space of "set" if build->domain is parametric.
5162 * Otherwise, it is equal to the range of the wrapped space of "set".
5163 *
5164 * If the build space is not parametric and
5165 * if isl_ast_build_node_from_schedule_map
5166 * was called from an outside user (data->internal not set), then
5167 * the (inverse) schedule refers to the external build domain and needs to
5168 * be transformed to refer to the internal build domain.
5169 *
5170 * If the build space is parametric, then we add some of the parameter
5171 * constraints to the executed relation. Adding these constraints
5172 * allows for an earlier detection of conflicts in some cases.
5173 * However, we do not want to divide the executed relation into
5174 * more disjuncts than necessary. We therefore approximate
5175 * the constraints on the parameters by a single disjunct set.
5176 *
5177 * The build is extended to include the additional part of the schedule.
5178 * If the original build space was not parametric, then the options
5179 * in data->build refer only to the additional part of the schedule
5180 * and they need to be adjusted to refer to the complete AST build
5181 * domain.
5182 *
5183 * After having adjusted inverse schedule and build, we start generating
5184 * code with the outer loop of the current code generation
5185 * in generate_next_level.
5186 *
5187 * If the original build space was not parametric, we undo the embedding
5188 * on the resulting isl_ast_node_list so that it can be used within
5189 * the outer AST build.
5190 */
5193{
5194 isl_union_map *executed;
5195 isl_ast_build *build;
5196 isl_ast_graft_list *list;
5197 int embed;
5198
5199 executed = isl_union_map_copy(data->executed);
5200 executed = isl_union_map_intersect_domain(executed,
5202
5203 embed = !isl_set_is_params(data->build->domain);
5204 if (embed && !data->internal)
5205 executed = internal_executed(executed, space, data->build);
5206 if (!embed) {
5207 isl_set *domain;
5210 executed = isl_union_map_intersect_params(executed, domain);
5211 }
5212
5213 build = isl_ast_build_copy(data->build);
5214 build = isl_ast_build_product(build, space);
5215
5216 list = generate_next_level(executed, build);
5217
5218 list = isl_ast_graft_list_unembed(list, embed);
5219
5220 data->list = isl_ast_graft_list_concat(data->list, list);
5221
5222 return isl_stat_ok;
5223}
5224
5225/* Generate an AST that visits the elements in the range of data->executed
5226 * in the relative order specified by the corresponding domain element(s)
5227 * for those domain elements that belong to "set".
5228 * Add the result to data->list.
5229 *
5230 * The caller ensures that "set" is a universe domain.
5231 *
5232 * If the build space S is not parametric, then the space of "set"
5233 * need to be a wrapped relation with S as domain. That is, it needs
5234 * to be of the form
5235 *
5236 * [S -> T]
5237 *
5238 * Check this property and pass control to generate_code_in_space
5239 * passing along T.
5240 * If the build space is not parametric, then T is the space of "set".
5241 */
5243{
5244 struct isl_generate_code_data *data = user;
5245 isl_space *space, *build_space;
5246 int is_domain;
5247
5248 space = isl_set_get_space(set);
5249
5250 if (isl_set_is_params(data->build->domain))
5251 return generate_code_in_space(data, set, space);
5252
5253 build_space = isl_ast_build_get_space(data->build, data->internal);
5254 space = isl_space_unwrap(space);
5255 is_domain = isl_space_is_domain(build_space, space);
5256 isl_space_free(build_space);
5257 space = isl_space_range(space);
5258
5259 if (is_domain < 0)
5260 goto error;
5261 if (!is_domain)
5263 "invalid nested schedule space", goto error);
5264
5265 return generate_code_in_space(data, set, space);
5266error:
5268 isl_space_free(space);
5269 return isl_stat_error;
5270}
5271
5272/* Generate an AST that visits the elements in the range of "executed"
5273 * in the relative order specified by the corresponding domain element(s).
5274 *
5275 * "build" is an isl_ast_build that has either been constructed by
5276 * isl_ast_build_from_context or passed to a callback set by
5277 * isl_ast_build_set_create_leaf.
5278 * In the first case, the space of the isl_ast_build is typically
5279 * a parametric space, although this is currently not enforced.
5280 * In the second case, the space is never a parametric space.
5281 * If the space S is not parametric, then the domain space(s) of "executed"
5282 * need to be wrapped relations with S as domain.
5283 *
5284 * If the domain of "executed" consists of several spaces, then an AST
5285 * is generated for each of them (in arbitrary order) and the results
5286 * are concatenated.
5287 *
5288 * If "internal" is set, then the domain "S" above refers to the internal
5289 * schedule domain representation. Otherwise, it refers to the external
5290 * representation, as returned by isl_ast_build_get_schedule_space.
5291 *
5292 * We essentially run over all the spaces in the domain of "executed"
5293 * and call generate_code_set on each of them.
5294 */
5295static __isl_give isl_ast_graft_list *generate_code(
5297 int internal)
5298{
5299 isl_ctx *ctx;
5300 struct isl_generate_code_data data = { 0 };
5301 isl_space *space;
5302 isl_union_set *schedule_domain;
5304
5305 if (!build)
5306 goto error;
5307 space = isl_ast_build_get_space(build, 1);
5308 space = isl_space_align_params(space,
5310 space = isl_space_align_params(space,
5314 if (!executed || !build)
5315 goto error;
5316
5318
5319 data.internal = internal;
5320 data.executed = executed;
5321 data.build = build;
5322 data.list = isl_ast_graft_list_alloc(ctx, 0);
5323
5325 schedule_domain = isl_union_map_domain(universe);
5326 if (isl_union_set_foreach_set(schedule_domain, &generate_code_set,
5327 &data) < 0)
5328 data.list = isl_ast_graft_list_free(data.list);
5329
5330 isl_union_set_free(schedule_domain);
5332
5334 return data.list;
5335error:
5338 return NULL;
5339}
5340
5341/* Generate an AST that visits the elements in the domain of "schedule"
5342 * in the relative order specified by the corresponding image element(s).
5343 *
5344 * "build" is an isl_ast_build that has either been constructed by
5345 * isl_ast_build_from_context or passed to a callback set by
5346 * isl_ast_build_set_create_leaf.
5347 * In the first case, the space of the isl_ast_build is typically
5348 * a parametric space, although this is currently not enforced.
5349 * In the second case, the space is never a parametric space.
5350 * If the space S is not parametric, then the range space(s) of "schedule"
5351 * need to be wrapped relations with S as domain.
5352 *
5353 * If the range of "schedule" consists of several spaces, then an AST
5354 * is generated for each of them (in arbitrary order) and the results
5355 * are concatenated.
5356 *
5357 * We first initialize the local copies of the relevant options.
5358 * We do this here rather than when the isl_ast_build is created
5359 * because the options may have changed between the construction
5360 * of the isl_ast_build and the call to isl_generate_code.
5361 *
5362 * The main computation is performed on an inverse schedule (with
5363 * the schedule domain in the domain and the elements to be executed
5364 * in the range) called "executed".
5365 */
5381
5382/* The old name for isl_ast_build_node_from_schedule_map.
5383 * It is being kept for backward compatibility, but
5384 * it will be removed in the future.
5385 */
5391
5392/* Generate an AST that visits the elements in the domain of "executed"
5393 * in the relative order specified by the leaf node "node".
5394 *
5395 * The relation "executed" maps the outer generated loop iterators
5396 * to the domain elements executed by those iterations.
5397 *
5398 * Simply pass control to generate_inner_level.
5399 * Note that the current build does not refer to any band node, so
5400 * that generate_inner_level will not try to visit the child of
5401 * the leaf node.
5402 *
5403 * If multiple statement instances reach a leaf,
5404 * then they can be executed in any order.
5405 * Group the list of grafts based on shared guards
5406 * such that identical guards are only generated once
5407 * when the list is eventually passed on to isl_ast_graft_list_fuse.
5408 */
5422
5423/* Check that the band partial schedule "partial" does not filter out
5424 * any statement instances, as specified by the range of "executed".
5425 */
5429{
5431 isl_union_set *domain, *instances;
5432
5434 partial = isl_multi_union_pw_aff_copy(partial);
5438 isl_union_set_free(instances);
5439
5440 if (subset < 0)
5441 return isl_stat_error;
5442 if (!subset)
5444 "band node is not allowed to drop statement instances",
5445 return isl_stat_error);
5446 return isl_stat_ok;
5447}
5448
5449/* Generate an AST that visits the elements in the domain of "executed"
5450 * in the relative order specified by the band node "node" and its descendants.
5451 *
5452 * The relation "executed" maps the outer generated loop iterators
5453 * to the domain elements executed by those iterations.
5454 *
5455 * If the band is empty, we continue with its descendants.
5456 * Otherwise, we extend the build and the inverse schedule with
5457 * the additional space/partial schedule and continue generating
5458 * an AST in generate_next_level.
5459 * As soon as we have extended the inverse schedule with the additional
5460 * partial schedule, we look for equalities that may exists between
5461 * the old and the new part.
5462 */
5463static __isl_give isl_ast_graft_list *build_ast_from_band(
5466{
5467 isl_space *space;
5469 isl_union_map *extra_umap;
5470 isl_ast_graft_list *list;
5471 isl_size n1, n2;
5472 isl_size n;
5473
5475 if (!build || n < 0 || !executed)
5476 goto error;
5477
5478 if (n == 0)
5479 return build_ast_from_child(build, node, executed);
5480
5482 extra = isl_multi_union_pw_aff_align_params(extra,
5484 space = isl_multi_union_pw_aff_get_space(extra);
5485
5488
5489 extra_umap = isl_union_map_from_multi_union_pw_aff(extra);
5490 extra_umap = isl_union_map_reverse(extra_umap);
5491
5494
5498 if (n1 < 0 || n2 < 0)
5500 else if (n2 > n1)
5502 "band node is not allowed to introduce new parameters",
5505
5507
5509
5510 return list;
5511error:
5515 return NULL;
5516}
5517
5518/* Hoist a list of grafts (in practice containing a single graft)
5519 * from "sub_build" (which includes extra context information)
5520 * to "build".
5521 *
5522 * In particular, project out all additional parameters introduced
5523 * by the context node from the enforced constraints and the guard
5524 * of the single graft.
5525 */
5526static __isl_give isl_ast_graft_list *hoist_out_of_context(
5527 __isl_take isl_ast_graft_list *list, __isl_keep isl_ast_build *build,
5528 __isl_keep isl_ast_build *sub_build)
5529{
5530 isl_ast_graft *graft;
5531 isl_basic_set *enforced;
5532 isl_set *guard;
5533 isl_size n_param, extra_param;
5534
5536 extra_param = isl_ast_build_dim(sub_build, isl_dim_param);
5537 if (n_param < 0 || extra_param < 0)
5538 return isl_ast_graft_list_free(list);
5539
5540 if (extra_param == n_param)
5541 return list;
5542
5543 extra_param -= n_param;
5544 enforced = isl_ast_graft_list_extract_shared_enforced(list, sub_build);
5545 enforced = isl_basic_set_project_out(enforced, isl_dim_param,
5546 n_param, extra_param);
5547 enforced = isl_basic_set_remove_unknown_divs(enforced);
5550 n_param, extra_param);
5551 guard = isl_set_project_out(guard, isl_dim_param, n_param, extra_param);
5552 guard = isl_set_compute_divs(guard);
5553 graft = isl_ast_graft_alloc_from_children(list, guard, enforced,
5554 build, sub_build);
5555 list = isl_ast_graft_list_from_ast_graft(graft);
5556
5557 return list;
5558}
5559
5560/* Generate an AST that visits the elements in the domain of "executed"
5561 * in the relative order specified by the context node "node"
5562 * and its descendants.
5563 *
5564 * The relation "executed" maps the outer generated loop iterators
5565 * to the domain elements executed by those iterations.
5566 *
5567 * The context node may introduce additional parameters as well as
5568 * constraints on the outer schedule dimensions or original parameters.
5569 *
5570 * We add the extra parameters to a new build and the context
5571 * constraints to both the build and (as a single disjunct)
5572 * to the domain of "executed". Since the context constraints
5573 * are specified in terms of the input schedule, we first need
5574 * to map them to the internal schedule domain.
5575 *
5576 * After constructing the AST from the descendants of "node",
5577 * we combine the list of grafts into a single graft within
5578 * the new build, in order to be able to exploit the additional
5579 * context constraints during this combination.
5580 *
5581 * Additionally, if the current node is the outermost node in
5582 * the schedule tree (apart from the root domain node), we generate
5583 * all pending guards, again to be able to exploit the additional
5584 * context constraints. We currently do not do this for internal
5585 * context nodes since we may still want to hoist conditions
5586 * to outer AST nodes.
5587 *
5588 * If the context node introduced any new parameters, then they
5589 * are removed from the set of enforced constraints and guard
5590 * in hoist_out_of_context.
5591 */
5592static __isl_give isl_ast_graft_list *build_ast_from_context(
5595{
5597 isl_space *space;
5598 isl_multi_aff *internal2input;
5599 isl_ast_build *sub_build;
5600 isl_ast_graft_list *list;
5601 isl_size n;
5602 isl_size depth;
5603
5605 if (depth < 0)
5607 space = isl_ast_build_get_space(build, 1);
5610 sub_build = isl_ast_build_copy(build);
5611 space = isl_set_get_space(context);
5612 sub_build = isl_ast_build_align_params(sub_build, space);
5613 internal2input = isl_ast_build_get_internal2input(sub_build);
5614 context = isl_set_preimage_multi_aff(context, internal2input);
5615 sub_build = isl_ast_build_restrict_generated(sub_build,
5620
5622 node, executed);
5623 n = isl_ast_graft_list_n_ast_graft(list);
5624 if (n < 0)
5625 list = isl_ast_graft_list_free(list);
5626
5627 list = isl_ast_graft_list_fuse(list, sub_build);
5628 if (depth == 1)
5630 sub_build);
5631 if (n >= 1)
5632 list = hoist_out_of_context(list, build, sub_build);
5633
5635 isl_ast_build_free(sub_build);
5636
5637 return list;
5638}
5639
5640/* Generate an AST that visits the elements in the domain of "executed"
5641 * in the relative order specified by the expansion node "node" and
5642 * its descendants.
5643 *
5644 * The relation "executed" maps the outer generated loop iterators
5645 * to the domain elements executed by those iterations.
5646 *
5647 * We expand the domain elements by the expansion and
5648 * continue with the descendants of the node.
5649 */
5650static __isl_give isl_ast_graft_list *build_ast_from_expansion(
5653{
5654 isl_union_map *expansion;
5655 isl_size n1, n2;
5656
5658 expansion = isl_union_map_align_params(expansion,
5660
5664 if (n1 < 0 || n2 < 0)
5665 goto error;
5666 if (n2 > n1)
5668 "expansion node is not allowed to introduce "
5669 "new parameters", goto error);
5670
5671 return build_ast_from_child(build, node, executed);
5672error:
5676 return NULL;
5677}
5678
5679/* Generate an AST that visits the elements in the domain of "executed"
5680 * in the relative order specified by the extension node "node" and
5681 * its descendants.
5682 *
5683 * The relation "executed" maps the outer generated loop iterators
5684 * to the domain elements executed by those iterations.
5685 *
5686 * Extend the inverse schedule with the extension applied to current
5687 * set of generated constraints. Since the extension if formulated
5688 * in terms of the input schedule, it first needs to be transformed
5689 * to refer to the internal schedule.
5690 */
5691static __isl_give isl_ast_graft_list *build_ast_from_extension(
5694{
5695 isl_union_set *schedule_domain;
5696 isl_union_map *extension;
5697 isl_set *set;
5698
5701 schedule_domain = isl_union_set_from_set(set);
5702
5704
5705 extension = isl_union_map_preimage_domain_multi_aff(extension,
5706 isl_multi_aff_copy(build->internal2input));
5707 extension = isl_union_map_intersect_domain(extension, schedule_domain);
5709 extension);
5711
5712 return build_ast_from_child(build, node, executed);
5713}
5714
5715/* Generate an AST that visits the elements in the domain of "executed"
5716 * in the relative order specified by the filter node "node" and
5717 * its descendants.
5718 *
5719 * The relation "executed" maps the outer generated loop iterators
5720 * to the domain elements executed by those iterations.
5721 *
5722 * We simply intersect the iteration domain (i.e., the range of "executed")
5723 * with the filter and continue with the descendants of the node,
5724 * unless the resulting inverse schedule is empty, in which
5725 * case we return an empty list.
5726 *
5727 * If the result of the intersection is equal to the original "executed"
5728 * relation, then keep the original representation since the intersection
5729 * may have unnecessarily broken up the relation into a greater number
5730 * of disjuncts.
5731 */
5732static __isl_give isl_ast_graft_list *build_ast_from_filter(
5735{
5736 isl_ctx *ctx;
5737 isl_union_set *filter;
5738 isl_union_map *orig;
5739 isl_ast_graft_list *list;
5740 int empty;
5741 isl_bool unchanged;
5742 isl_size n1, n2;
5743
5745 if (!build || !node || !executed)
5746 goto error;
5747
5749 filter = isl_union_set_align_params(filter,
5754 if (n1 < 0 || n2 < 0)
5755 goto error;
5756 if (n2 > n1)
5758 "filter node is not allowed to introduce "
5759 "new parameters", goto error);
5760
5761 unchanged = isl_union_map_is_subset(orig, executed);
5763 if (unchanged < 0 || empty < 0)
5764 goto error;
5765 if (unchanged) {
5767 return build_ast_from_child(build, node, orig);
5768 }
5769 isl_union_map_free(orig);
5770 if (!empty)
5771 return build_ast_from_child(build, node, executed);
5772
5774 list = isl_ast_graft_list_alloc(ctx, 0);
5778 return list;
5779error:
5783 isl_union_map_free(orig);
5784 return NULL;
5785}
5786
5787/* Generate an AST that visits the elements in the domain of "executed"
5788 * in the relative order specified by the guard node "node" and
5789 * its descendants.
5790 *
5791 * The relation "executed" maps the outer generated loop iterators
5792 * to the domain elements executed by those iterations.
5793 *
5794 * Ensure that the associated guard is enforced by the outer AST
5795 * constructs by adding it to the guard of the graft.
5796 * Since we know that we will enforce the guard, we can also include it
5797 * in the generated constraints used to construct an AST for
5798 * the descendant nodes.
5799 */
5800static __isl_give isl_ast_graft_list *build_ast_from_guard(
5803{
5804 isl_space *space;
5805 isl_set *guard, *hoisted;
5806 isl_basic_set *enforced;
5807 isl_ast_build *sub_build;
5808 isl_ast_graft *graft;
5809 isl_ast_graft_list *list;
5810 isl_size n1, n2, n;
5811
5812 space = isl_ast_build_get_space(build, 1);
5814 n1 = isl_space_dim(space, isl_dim_param);
5815 guard = isl_set_align_params(guard, space);
5816 n2 = isl_set_dim(guard, isl_dim_param);
5817 if (n1 < 0 || n2 < 0)
5818 guard = isl_set_free(guard);
5819 else if (n2 > n1)
5821 "guard node is not allowed to introduce "
5822 "new parameters", guard = isl_set_free(guard));
5823 guard = isl_set_preimage_multi_aff(guard,
5824 isl_multi_aff_copy(build->internal2input));
5825 guard = isl_ast_build_specialize(build, guard);
5826 guard = isl_set_gist(guard, isl_set_copy(build->generated));
5827
5828 sub_build = isl_ast_build_copy(build);
5829 sub_build = isl_ast_build_restrict_generated(sub_build,
5830 isl_set_copy(guard));
5831
5833 node, executed);
5834
5836 n = isl_set_n_basic_set(hoisted);
5837 if (n < 0)
5838 list = isl_ast_graft_list_free(list);
5839 if (n > 1)
5841 isl_set_copy(hoisted));
5842 guard = isl_set_intersect(guard, hoisted);
5843 enforced = extract_shared_enforced(list, build);
5844 graft = isl_ast_graft_alloc_from_children(list, guard, enforced,
5845 build, sub_build);
5846
5847 isl_ast_build_free(sub_build);
5849 return isl_ast_graft_list_from_ast_graft(graft);
5850}
5851
5852/* Call the before_each_mark callback, if requested by the user.
5853 *
5854 * Return 0 on success and -1 on error.
5855 *
5856 * The caller is responsible for recording the current inverse schedule
5857 * in "build".
5858 */
5861{
5862 if (!build)
5863 return isl_stat_error;
5864 if (!build->before_each_mark)
5865 return isl_stat_ok;
5866 return build->before_each_mark(mark, build,
5868}
5869
5870/* Call the after_each_mark callback, if requested by the user.
5871 *
5872 * The caller is responsible for recording the current inverse schedule
5873 * in "build".
5874 */
5877{
5878 if (!graft || !build)
5879 return isl_ast_graft_free(graft);
5880 if (!build->after_each_mark)
5881 return graft;
5882 graft->node = build->after_each_mark(graft->node, build,
5884 if (!graft->node)
5885 return isl_ast_graft_free(graft);
5886 return graft;
5887}
5888
5889
5890/* Generate an AST that visits the elements in the domain of "executed"
5891 * in the relative order specified by the mark node "node" and
5892 * its descendants.
5893 *
5894 * The relation "executed" maps the outer generated loop iterators
5895 * to the domain elements executed by those iterations.
5896
5897 * Since we may be calling before_each_mark and after_each_mark
5898 * callbacks, we record the current inverse schedule in the build.
5899 *
5900 * We generate an AST for the child of the mark node, combine
5901 * the graft list into a single graft and then insert the mark
5902 * in the AST of that single graft.
5903 */
5904static __isl_give isl_ast_graft_list *build_ast_from_mark(
5907{
5908 isl_id *mark;
5909 isl_ast_graft *graft;
5910 isl_ast_graft_list *list;
5911 isl_size n;
5912
5914
5915 mark = isl_schedule_node_mark_get_id(node);
5916 if (before_each_mark(mark, build) < 0)
5917 node = isl_schedule_node_free(node);
5918
5921 n = isl_ast_graft_list_n_ast_graft(list);
5922 if (n < 0)
5923 list = isl_ast_graft_list_free(list);
5924 if (n == 0) {
5925 isl_id_free(mark);
5926 } else {
5927 graft = isl_ast_graft_list_get_ast_graft(list, 0);
5928 graft = isl_ast_graft_insert_mark(graft, mark);
5929 graft = after_each_mark(graft, build);
5930 list = isl_ast_graft_list_set_ast_graft(list, 0, graft);
5931 }
5933
5934 return list;
5935}
5936
5937static __isl_give isl_ast_graft_list *build_ast_from_schedule_node(
5940
5941/* Generate an AST that visits the elements in the domain of "executed"
5942 * in the relative order specified by the sequence (or set) node "node" and
5943 * its descendants.
5944 *
5945 * The relation "executed" maps the outer generated loop iterators
5946 * to the domain elements executed by those iterations.
5947 *
5948 * We simply generate an AST for each of the children and concatenate
5949 * the results.
5950 */
5951static __isl_give isl_ast_graft_list *build_ast_from_sequence(
5954{
5955 int i;
5956 isl_size n;
5957 isl_ctx *ctx;
5958 isl_ast_graft_list *list;
5959
5961 list = isl_ast_graft_list_alloc(ctx, 0);
5962
5964 if (n < 0)
5965 list = isl_ast_graft_list_free(list);
5966 for (i = 0; i < n; ++i) {
5967 isl_schedule_node *child;
5968 isl_ast_graft_list *list_i;
5969
5970 child = isl_schedule_node_get_child(node, i);
5973 list = isl_ast_graft_list_concat(list, list_i);
5974 }
5978
5979 return list;
5980}
5981
5982/* Generate an AST that visits the elements in the domain of "executed"
5983 * in the relative order specified by the node "node" and its descendants.
5984 *
5985 * The relation "executed" maps the outer generated loop iterators
5986 * to the domain elements executed by those iterations.
5987 *
5988 * The node types are handled in separate functions.
5989 * Set nodes are currently treated in the same way as sequence nodes.
5990 * The children of a set node may be executed in any order,
5991 * including the order of the children.
5992 */
5993static __isl_give isl_ast_graft_list *build_ast_from_schedule_node(
5996{
5998
6000
6001 switch (type) {
6003 goto error;
6005 return build_ast_from_leaf(build, node, executed);
6007 return build_ast_from_band(build, node, executed);
6009 return build_ast_from_context(build, node, executed);
6012 "unexpected internal domain node", goto error);
6018 return build_ast_from_filter(build, node, executed);
6020 return build_ast_from_guard(build, node, executed);
6022 return build_ast_from_mark(build, node, executed);
6025 return build_ast_from_sequence(build, node, executed);
6026 }
6027
6029 "unhandled type", goto error);
6030error:
6034
6035 return NULL;
6036}
6037
6038/* Generate an AST that visits the elements in the domain of "executed"
6039 * in the relative order specified by the (single) child of "node" and
6040 * its descendants.
6041 *
6042 * The relation "executed" maps the outer generated loop iterators
6043 * to the domain elements executed by those iterations.
6044 *
6045 * This function is never called on a leaf, set or sequence node,
6046 * so the node always has exactly one child.
6047 */
6055
6056/* Generate an AST that visits the elements in the domain of the domain
6057 * node "node" in the relative order specified by its descendants.
6058 *
6059 * An initial inverse schedule is created that maps a zero-dimensional
6060 * schedule space to the node domain.
6061 * The input "build" is assumed to have a parametric domain and
6062 * is replaced by the same zero-dimensional schedule space.
6063 *
6064 * We also add some of the parameter constraints in the build domain
6065 * to the executed relation. Adding these constraints
6066 * allows for an earlier detection of conflicts in some cases.
6067 * However, we do not want to divide the executed relation into
6068 * more disjuncts than necessary. We therefore approximate
6069 * the constraints on the parameters by a single disjunct set.
6070 */
6073{
6074 isl_ctx *ctx;
6075 isl_union_set *domain, *schedule_domain;
6077 isl_space *space;
6078 isl_set *set;
6079 isl_ast_graft_list *list;
6080 isl_ast_node *ast;
6081 int is_params;
6082
6083 if (!build)
6084 goto error;
6085
6087 space = isl_ast_build_get_space(build, 1);
6088 is_params = isl_space_is_params(space);
6089 isl_space_free(space);
6090 if (is_params < 0)
6091 goto error;
6092 if (!is_params)
6094 "expecting parametric initial context", goto error);
6095
6098
6100 space = isl_space_set_from_params(space);
6102
6105 schedule_domain = isl_union_set_from_set(set);
6106
6111
6112 return ast;
6113error:
6116 return NULL;
6117}
6118
6119/* Generate an AST that visits the elements in the domain of "schedule"
6120 * in the relative order specified by the schedule tree.
6121 *
6122 * "build" is an isl_ast_build that has been created using
6123 * isl_ast_build_alloc or isl_ast_build_from_context based
6124 * on a parametric set.
6125 *
6126 * The construction starts at the root node of the schedule,
6127 * which is assumed to be a domain node.
6128 */
6131{
6132 isl_ctx *ctx;
6133 isl_schedule_node *node;
6134
6135 if (!build || !schedule)
6136 goto error;
6137
6139
6141 if (!node)
6142 goto error;
6144
6148 "expecting root domain node",
6150 return build_ast_from_domain(build, node);
6151error:
6153 return NULL;
6154}
__isl_give isl_aff * isl_aff_add_constant_si(__isl_take isl_aff *aff, int v)
Definition isl_aff.c:1099
__isl_give isl_pw_multi_aff * isl_pw_multi_aff_set_pw_aff(__isl_take isl_pw_multi_aff *pma, unsigned pos, __isl_take isl_pw_aff *pa)
Definition isl_aff.c:6620
__isl_null isl_aff * isl_aff_free(__isl_take isl_aff *aff)
Definition isl_aff.c:449
int isl_pw_aff_plain_cmp(__isl_keep isl_pw_aff *pa1, __isl_keep isl_pw_aff *pa2)
Definition isl_aff.c:7694
__isl_export __isl_give isl_union_set * isl_multi_union_pw_aff_domain(__isl_take isl_multi_union_pw_aff *mupa)
Definition isl_aff.c:9393
__isl_export __isl_give isl_aff * isl_aff_neg(__isl_take isl_aff *aff)
Definition isl_aff.c:1451
__isl_overload __isl_give isl_aff * isl_aff_scale_val(__isl_take isl_aff *aff, __isl_take isl_val *v)
Definition isl_aff.c:2075
__isl_give isl_val * isl_aff_get_denominator_val(__isl_keep isl_aff *aff)
Definition isl_aff.c:819
__isl_null isl_pw_aff * isl_pw_aff_free(__isl_take isl_pw_aff *pwaff)
__isl_overload __isl_give isl_union_map * isl_union_map_from_multi_union_pw_aff(__isl_take isl_multi_union_pw_aff *mupa)
Definition isl_aff.c:9157
__isl_overload __isl_give isl_aff * isl_aff_scale_down_val(__isl_take isl_aff *aff, __isl_take isl_val *v)
Definition isl_aff.c:2142
__isl_give isl_aff * isl_aff_copy(__isl_keep isl_aff *aff)
Definition isl_aff.c:145
__isl_export __isl_give isl_set * isl_pw_aff_gt_set(__isl_take isl_pw_aff *pwaff1, __isl_take isl_pw_aff *pwaff2)
Definition isl_aff.c:3175
__isl_export isl_stat isl_pw_multi_aff_foreach_piece(__isl_keep isl_pw_multi_aff *pma, isl_stat(*fn)(__isl_take isl_set *set, __isl_take isl_multi_aff *maff, void *user), void *user)
__isl_export __isl_give isl_aff * isl_aff_floor(__isl_take isl_aff *aff)
Definition isl_aff.c:1729
__isl_give isl_aff * isl_aff_add_coefficient_si(__isl_take isl_aff *aff, enum isl_dim_type type, int pos, int v)
Definition isl_aff.c:1426
__isl_give isl_aff * isl_aff_zero_on_domain(__isl_take isl_local_space *ls)
Definition isl_aff.c:235
__isl_constructor __isl_give isl_pw_aff * isl_pw_aff_from_aff(__isl_take isl_aff *aff)
isl_size isl_aff_dim(__isl_keep isl_aff *aff, enum isl_dim_type type)
Definition isl_aff.c:509
__isl_export __isl_give isl_aff * isl_aff_sub(__isl_take isl_aff *aff1, __isl_take isl_aff *aff2)
Definition isl_aff.c:2029
__isl_export __isl_give isl_val * isl_aff_get_constant_val(__isl_keep isl_aff *aff)
Definition isl_aff.c:834
isl_bool isl_pw_aff_every_piece(__isl_keep isl_pw_aff *pa, isl_bool(*test)(__isl_keep isl_set *set, __isl_keep isl_aff *aff, void *user), void *user)
__isl_give isl_pw_multi_aff * isl_pw_multi_aff_from_map(__isl_take isl_map *map)
Definition isl_aff.c:5617
__isl_export __isl_give isl_pw_aff * isl_pw_aff_ceil(__isl_take isl_pw_aff *pwaff)
Definition isl_aff.c:3406
__isl_export __isl_give isl_aff * isl_aff_add(__isl_take isl_aff *aff1, __isl_take isl_aff *aff2)
Definition isl_aff.c:1976
__isl_null isl_pw_multi_aff * isl_pw_multi_aff_free(__isl_take isl_pw_multi_aff *pma)
__isl_export __isl_give isl_pw_aff * isl_pw_aff_add(__isl_take isl_pw_aff *pwaff1, __isl_take isl_pw_aff *pwaff2)
Definition isl_aff.c:3611
__isl_export __isl_give isl_aff * isl_aff_ceil(__isl_take isl_aff *aff)
Definition isl_aff.c:1873
__isl_export __isl_give isl_pw_aff * isl_pw_aff_coalesce(__isl_take isl_pw_aff *pa)
__isl_give isl_pw_multi_aff * isl_pw_multi_aff_copy(__isl_keep isl_pw_multi_aff *pma)
__isl_give isl_pw_aff * isl_pw_multi_aff_get_pw_aff(__isl_keep isl_pw_multi_aff *pma, int pos)
Definition isl_aff.c:6332
__isl_give isl_set * isl_pw_aff_list_le_set(__isl_take isl_pw_aff_list *list1, __isl_take isl_pw_aff_list *list2)
Definition isl_aff.c:3334
__isl_give isl_pw_aff * isl_pw_aff_copy(__isl_keep isl_pw_aff *pwaff)
__isl_give isl_pw_multi_aff * isl_pw_multi_aff_identity(__isl_take isl_space *space)
Definition isl_aff.c:4471
struct isl_multi_aff isl_multi_aff
Definition aff_type.h:29
struct isl_multi_union_pw_aff isl_multi_union_pw_aff
Definition aff_type.h:46
__isl_give isl_ast_node * isl_ast_node_set_annotation(__isl_take isl_ast_node *node, __isl_take isl_id *annotation)
__isl_null isl_ast_expr * isl_ast_expr_free(__isl_take isl_ast_expr *expr)
Definition isl_ast.c:243
__isl_give isl_ast_expr * isl_ast_expr_from_val(__isl_take isl_val *v)
Definition isl_ast.c:589
__isl_null isl_ast_node * isl_ast_node_free(__isl_take isl_ast_node *node)
Definition isl_ast.c:1180
__isl_give isl_ast_expr * isl_ast_expr_copy(__isl_keep isl_ast_expr *expr)
Definition isl_ast.c:195
__isl_give isl_ast_build * isl_ast_build_copy(__isl_keep isl_ast_build *build)
int isl_options_get_ast_build_separation_bounds(isl_ctx *ctx)
int isl_options_get_ast_build_group_coscheduled(isl_ctx *ctx)
__isl_null isl_ast_build * isl_ast_build_free(__isl_take isl_ast_build *build)
int isl_options_get_ast_build_atomic_upper_bound(isl_ctx *ctx)
int isl_options_get_ast_build_scale_strides(isl_ctx *ctx)
isl_ctx * isl_ast_build_get_ctx(__isl_keep isl_ast_build *build)
int isl_options_get_ast_build_exploit_nested_bounds(isl_ctx *ctx)
#define ISL_AST_BUILD_SEPARATION_BOUNDS_EXPLICIT
Definition ast_build.h:32
isl_ast_loop_type
Definition ast_type.h:91
@ isl_ast_loop_atomic
Definition ast_type.h:94
@ isl_ast_loop_separate
Definition ast_type.h:96
@ isl_ast_loop_unroll
Definition ast_type.h:95
isl_ast_expr_op_type
Definition ast_type.h:16
@ isl_ast_expr_op_min
Definition ast_type.h:23
@ isl_ast_expr_op_le
Definition ast_type.h:36
@ isl_ast_expr_op_lt
Definition ast_type.h:37
@ isl_ast_expr_op_max
Definition ast_type.h:22
isl_size isl_constraint_dim(__isl_keep isl_constraint *constraint, enum isl_dim_type type)
__isl_give isl_map * isl_map_add_constraint(__isl_take isl_map *map, __isl_take isl_constraint *constraint)
__isl_give isl_constraint * isl_constraint_set_constant_val(__isl_take isl_constraint *constraint, __isl_take isl_val *v)
__isl_give isl_aff * isl_constraint_get_bound(__isl_keep isl_constraint *constraint, enum isl_dim_type type, int pos)
isl_bool isl_constraint_is_upper_bound(__isl_keep isl_constraint *constraint, enum isl_dim_type type, unsigned pos)
__isl_null isl_constraint * isl_constraint_free(__isl_take isl_constraint *c)
__isl_give isl_constraint * isl_constraint_copy(__isl_keep isl_constraint *c)
__isl_give isl_constraint * isl_constraint_alloc_equality(__isl_take isl_local_space *ls)
__isl_give isl_constraint * isl_equality_from_aff(__isl_take isl_aff *aff)
isl_bool isl_constraint_involves_dims(__isl_keep isl_constraint *constraint, enum isl_dim_type type, unsigned first, unsigned n)
__isl_give isl_basic_set * isl_basic_set_add_constraint(__isl_take isl_basic_set *bset, __isl_take isl_constraint *constraint)
__isl_give isl_constraint_list * isl_basic_set_get_constraint_list(__isl_keep isl_basic_set *bset)
isl_bool isl_constraint_is_lower_bound(__isl_keep isl_constraint *constraint, enum isl_dim_type type, unsigned pos)
isl_stat isl_basic_set_foreach_constraint(__isl_keep isl_basic_set *bset, isl_stat(*fn)(__isl_take isl_constraint *c, void *user), void *user)
__isl_give isl_basic_set * isl_basic_set_from_constraint(__isl_take isl_constraint *constraint)
__isl_give isl_val * isl_constraint_get_coefficient_val(__isl_keep isl_constraint *constraint, enum isl_dim_type type, int pos)
__isl_give isl_set * isl_set_add_constraint(__isl_take isl_set *set, __isl_take isl_constraint *constraint)
isl_stat isl_basic_map_foreach_constraint(__isl_keep isl_basic_map *bmap, isl_stat(*fn)(__isl_take isl_constraint *c, void *user), void *user)
__isl_give isl_constraint * isl_constraint_set_coefficient_si(__isl_take isl_constraint *constraint, enum isl_dim_type type, int pos, int v)
#define __isl_take
Definition ctx.h:23
isl_stat
Definition ctx.h:85
@ isl_stat_error
Definition ctx.h:86
@ isl_stat_ok
Definition ctx.h:87
#define __isl_give
Definition ctx.h:20
#define isl_die(ctx, errno, msg, code)
Definition ctx.h:139
isl_bool isl_bool_ok(int b)
Definition isl_ctx.c:58
@ isl_error_unsupported
Definition ctx.h:83
@ isl_error_invalid
Definition ctx.h:81
@ isl_error_internal
Definition ctx.h:80
#define isl_calloc_array(ctx, type, n)
Definition ctx.h:134
#define __isl_keep
Definition ctx.h:26
int isl_size
Definition ctx.h:98
isl_stat isl_stat_non_error_bool(isl_bool b)
Definition isl_ctx.c:22
isl_bool isl_bool_not(isl_bool b)
Definition isl_ctx.c:44
isl_bool
Definition ctx.h:90
@ isl_bool_false
Definition ctx.h:92
@ isl_bool_true
Definition ctx.h:93
@ isl_bool_error
Definition ctx.h:91
m
Definition guard1-0.c:2
__isl_export __isl_give ISL_HMAP __isl_take ISL_KEY __isl_take ISL_VAL * val
Definition hmap.h:32
isl_stat isl_stat(* fn)(__isl_take ISL_KEY *key, __isl_take ISL_VAL *val, void *user)
Definition hmap.h:37
isl_stat isl_stat void * user
Definition hmap.h:39
isl_bool isl_bool(* test)(__isl_keep ISL_KEY *key, __isl_keep ISL_VAL *val, void *user)
Definition hmap.h:41
__isl_null isl_id * isl_id_free(__isl_take isl_id *id)
Definition isl_id.c:207
__isl_export __isl_give isl_val * isl_set_max_val(__isl_keep isl_set *set, __isl_keep isl_aff *obj)
Definition isl_ilp.c:612
int GMPQAPI cmp(mp_rat op1, mp_rat op2)
void GMPZAPI neg(mp_int rop, mp_int op)
void GMPZAPI gcd(mp_int rop, mp_int op1, mp_int op2)
void GMPQAPI init(mp_rat x)
__isl_give isl_ast_node * isl_ast_node_for_mark_degenerate(__isl_take isl_ast_node *node)
Definition isl_ast.c:1358
__isl_give isl_ast_expr * isl_ast_expr_op_add_arg(__isl_take isl_ast_expr *expr, __isl_take isl_ast_expr *arg)
Definition isl_ast.c:448
__isl_give isl_ast_expr * isl_ast_expr_alloc_int_si(isl_ctx *ctx, int i)
Definition isl_ast.c:568
__isl_give isl_ast_node * isl_ast_node_alloc_for(__isl_take isl_id *id)
Definition isl_ast.c:955
__isl_give isl_ast_expr * isl_ast_expr_alloc_binary(enum isl_ast_expr_op_type type, __isl_take isl_ast_expr *expr1, __isl_take isl_ast_expr *expr2)
Definition isl_ast.c:666
__isl_give isl_ast_expr * isl_ast_expr_alloc_op(isl_ctx *ctx, enum isl_ast_expr_op_type op, int n_arg)
Definition isl_ast.c:186
__isl_give isl_ast_build * isl_ast_build_increase_depth(__isl_take isl_ast_build *build)
int isl_ast_build_has_value(__isl_keep isl_ast_build *build)
__isl_give isl_set * isl_ast_build_eliminate_inner(__isl_keep isl_ast_build *build, __isl_take isl_set *set)
__isl_give isl_aff * isl_ast_build_get_offset(__isl_keep isl_ast_build *build, int pos)
enum isl_ast_loop_type isl_ast_build_get_loop_type(__isl_keep isl_ast_build *build, int isolated)
__isl_give isl_map * isl_ast_build_map_to_iterator(__isl_keep isl_ast_build *build, __isl_take isl_set *set)
isl_bool isl_ast_build_has_stride(__isl_keep isl_ast_build *build)
__isl_give isl_set * isl_ast_build_specialize(__isl_keep isl_ast_build *build, __isl_take isl_set *set)
__isl_give isl_basic_set * isl_ast_build_compute_gist_basic_set(__isl_keep isl_ast_build *build, __isl_take isl_basic_set *bset)
__isl_give isl_ast_build * isl_ast_build_align_params(__isl_take isl_ast_build *build, __isl_take isl_space *model)
__isl_give isl_ast_build * isl_ast_build_extract_isolated(__isl_take isl_ast_build *build)
__isl_give isl_set * isl_ast_build_eliminate_divs(__isl_keep isl_ast_build *build, __isl_take isl_set *set)
__isl_give isl_ast_build * isl_ast_build_scale_down(__isl_take isl_ast_build *build, __isl_take isl_val *m, __isl_take isl_union_map *umap)
__isl_give isl_pw_aff * isl_ast_build_compute_gist_pw_aff(__isl_keep isl_ast_build *build, __isl_take isl_pw_aff *pa)
__isl_give isl_ast_build * isl_ast_build_detect_strides(__isl_take isl_ast_build *build, __isl_take isl_set *set)
__isl_give isl_ast_build * isl_ast_build_reset_schedule_node(__isl_take isl_ast_build *build)
__isl_give isl_ast_build * isl_ast_build_clear_local_info(__isl_take isl_ast_build *build)
__isl_give isl_set * isl_ast_build_compute_gist(__isl_keep isl_ast_build *build, __isl_take isl_set *set)
__isl_give isl_set * isl_ast_build_get_generated(__isl_keep isl_ast_build *build)
__isl_give isl_aff * isl_ast_build_compute_gist_aff(__isl_keep isl_ast_build *build, __isl_take isl_aff *aff)
__isl_give isl_map * isl_ast_build_get_schedule_map(__isl_keep isl_ast_build *build)
__isl_give isl_union_map * isl_ast_build_substitute_values_union_map_domain(__isl_keep isl_ast_build *build, __isl_take isl_union_map *umap)
int isl_ast_build_has_schedule_node(__isl_keep isl_ast_build *build)
__isl_give isl_map * isl_ast_build_get_separation_class(__isl_keep isl_ast_build *build)
__isl_give isl_ast_build * isl_ast_build_replace_pending_by_guard(__isl_take isl_ast_build *build, __isl_take isl_set *guard)
__isl_give isl_space * isl_ast_build_get_space(__isl_keep isl_ast_build *build, int internal)
int isl_ast_build_has_isolated(__isl_keep isl_ast_build *build)
__isl_give isl_set * isl_ast_build_get_stride_constraint(__isl_keep isl_ast_build *build)
__isl_give isl_val * isl_ast_build_get_stride(__isl_keep isl_ast_build *build, int pos)
__isl_give isl_ast_build * isl_ast_build_set_executed(__isl_take isl_ast_build *build, __isl_take isl_union_map *executed)
__isl_give isl_multi_aff * isl_ast_build_get_stride_expansion(__isl_keep isl_ast_build *build)
isl_size isl_ast_build_get_depth(__isl_keep isl_ast_build *build)
__isl_give isl_set * isl_ast_build_get_domain(__isl_keep isl_ast_build *build)
__isl_give isl_ast_build * isl_ast_build_set_loop_bounds(__isl_take isl_ast_build *build, __isl_take isl_basic_set *bounds)
__isl_give isl_ast_build * isl_ast_build_restrict_generated(__isl_take isl_ast_build *build, __isl_take isl_set *set)
__isl_give isl_set * isl_ast_build_get_option_domain(__isl_keep isl_ast_build *build, enum isl_ast_loop_type type)
__isl_give isl_id * isl_ast_build_get_iterator_id(__isl_keep isl_ast_build *build, int pos)
__isl_give isl_ast_build * isl_ast_build_set_pending_generated(__isl_take isl_ast_build *build, __isl_take isl_basic_set *bounds)
__isl_give isl_set * isl_ast_build_get_isolated(__isl_keep isl_ast_build *build)
__isl_give isl_schedule_node * isl_ast_build_get_schedule_node(__isl_keep isl_ast_build *build)
int isl_ast_build_options_involve_depth(__isl_keep isl_ast_build *build)
isl_size isl_ast_build_dim(__isl_keep isl_ast_build *build, enum isl_dim_type type)
__isl_give isl_ast_build * isl_ast_build_product(__isl_take isl_ast_build *build, __isl_take isl_space *space)
__isl_give isl_ast_build * isl_ast_build_insert_dim(__isl_take isl_ast_build *build, int pos)
isl_bool isl_ast_build_has_affine_value(__isl_keep isl_ast_build *build, int pos)
__isl_give isl_set * isl_ast_build_get_pending(__isl_keep isl_ast_build *build)
__isl_give isl_set * isl_ast_build_eliminate(__isl_keep isl_ast_build *build, __isl_take isl_set *domain)
__isl_give isl_ast_build * isl_ast_build_set_schedule_node(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node)
__isl_give isl_basic_set * isl_ast_build_specialize_basic_set(__isl_keep isl_ast_build *build, __isl_take isl_basic_set *bset)
__isl_give isl_multi_aff * isl_ast_build_get_internal2input(__isl_keep isl_ast_build *build)
__isl_give isl_ast_expr * isl_ast_build_expr_from_pw_aff_internal(__isl_keep isl_ast_build *build, __isl_take isl_pw_aff *pa)
static int cmp_constraint(__isl_keep isl_constraint *a, __isl_keep isl_constraint *b, void *user)
__isl_give isl_ast_expr * isl_ast_build_expr_from_set_internal(__isl_keep isl_ast_build *build, __isl_take isl_set *set)
static isl_bool after_in_set(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static isl_bool domain_follows_at_depth(__isl_keep isl_basic_set *i, __isl_keep isl_basic_set *j, void *user)
static __isl_give isl_ast_graft * set_for_cond_from_set(__isl_take isl_ast_graft *graft, __isl_keep isl_set *set, __isl_keep isl_ast_build *build)
static __isl_give isl_set * isl_set_coalesce_preserve(__isl_take isl_set *set)
static isl_stat check_band_schedule_total_on_instances(__isl_keep isl_multi_union_pw_aff *partial, __isl_keep isl_union_map *executed)
__isl_give isl_ast_node * isl_ast_build_node_from_schedule(__isl_keep isl_ast_build *build, __isl_take isl_schedule *schedule)
static __isl_give isl_ast_graft_list * generate_sorted_domains(__isl_keep isl_basic_set_list *domain_list, __isl_keep isl_union_map *executed, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_shifted_component_tree_unroll(__isl_take isl_union_map *executed, __isl_take isl_set *domain, __isl_take isl_ast_build *build)
static isl_stat count_constraints(__isl_take isl_constraint *c, void *user)
static void isl_split_fixed_free(struct isl_split_fixed_data *split)
static isl_bool has_pure_outer_disjunction(__isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
static __isl_give isl_union_map * plug_in_values(__isl_take isl_union_map *executed, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft * refine_generic_split(__isl_take isl_ast_graft *graft, __isl_take isl_constraint_list *list, __isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_shifted_component_tree_base(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build, int isolated)
static __isl_give isl_ast_graft_list * build_ast_from_filter(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static __isl_give isl_ast_graft_list * build_ast_from_mark(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static isl_stat add_domain(__isl_take isl_map *executed, struct isl_generate_domain_data *data)
static __isl_give isl_ast_graft_list * build_ast_from_leaf(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static isl_stat generate_code_in_space(struct isl_generate_code_data *data, __isl_take isl_set *set, __isl_take isl_space *space)
static __isl_give isl_pw_aff_list * upper_bounds(__isl_keep isl_constraint_list *constraints, int pos, __isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
static __isl_give isl_union_map * internal_executed(__isl_take isl_union_map *executed, __isl_keep isl_space *space, __isl_keep isl_ast_build *build)
static __isl_give isl_aff * find_unroll_lower_bound(__isl_keep isl_ast_build *build, __isl_keep isl_set *domain, int depth, __isl_keep isl_basic_map *expansion, int *n)
static __isl_give isl_ast_graft * at_each_domain(__isl_take isl_ast_graft *graft, __isl_keep isl_map *executed, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * build_ast_from_extension(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static isl_stat generate_sorted_domains_wrap(__isl_take isl_basic_set_list *scc, void *user)
static int has_anchored_subtree(__isl_keep isl_ast_build *build)
static isl_stat update_n_div(__isl_take isl_set *set, __isl_take isl_multi_aff *ma, void *user)
__isl_give isl_ast_node * isl_ast_build_ast_from_schedule(__isl_keep isl_ast_build *build, __isl_take isl_union_map *schedule)
static isl_bool shared_outer(__isl_keep isl_basic_set *i, __isl_keep isl_basic_set *j, void *user)
static __isl_give isl_ast_graft_list * build_ast_from_context(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static int foreach_iteration(__isl_take isl_set *domain, __isl_keep isl_ast_build *build, int(*init)(int n, void *user), int(*fn)(__isl_take isl_basic_set *bset, void *user), void *user)
static __isl_give isl_pw_aff * exact_bound(__isl_keep isl_set *domain, __isl_keep isl_ast_build *build, int upper)
static __isl_give isl_set * compute_domains_eliminate(__isl_keep isl_ast_build *build, __isl_take isl_set *domain)
static __isl_give isl_ast_graft_list * generate_parallel_domains(__isl_keep isl_basic_set_list *domain_list, __isl_keep isl_union_map *executed, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_shifted_component_tree_separate(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static isl_bool after_in_tree(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static __isl_give isl_basic_set_list * isl_basic_set_list_from_set(__isl_take isl_set *set)
static isl_stat generate_code_set(__isl_take isl_set *set, void *user)
static __isl_give isl_ast_graft_list * generate_shifted_component_only_after(__isl_take isl_union_map *executed, __isl_take isl_set *isolated, __isl_take isl_set *after, __isl_take isl_ast_build *build, __isl_take isl_set *dead1, __isl_take isl_set *dead2)
static __isl_give isl_ast_node * before_each_for(__isl_take isl_ast_node *node, __isl_keep isl_ast_build *build)
static int constraint_type(isl_constraint *c, int pos)
static __isl_give isl_set * extract_disjunction(__isl_take isl_set *domain, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_next_level(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static __isl_give isl_ast_graft * set_for_node_expressions(__isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *lower, int use_list, __isl_keep isl_pw_aff_list *upper_list, __isl_keep isl_set *upper_set, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_node * build_ast_from_domain(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node)
static isl_stat generate_domain(__isl_take isl_map *executed, void *user)
static __isl_give isl_ast_graft * after_each_for(__isl_take isl_ast_graft *graft, __isl_keep isl_ast_build *build)
static int use_upper_bound_list(isl_ctx *ctx, int n_upper, __isl_keep isl_set *domain, int depth)
static int reduce_list_cmp(__isl_keep isl_pw_aff *a, __isl_keep isl_pw_aff *b, void *user)
static __isl_give isl_set * explicit_bounds(__isl_take isl_map *map, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_code(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build, int internal)
static isl_stat basic_map_check_scaled(__isl_take isl_basic_map *bmap, void *user)
static __isl_give isl_ast_graft_list * build_ast_from_schedule_node(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static void isl_split_fixed_init(struct isl_split_fixed_data *split, __isl_keep isl_ast_build *build, __isl_take isl_space *space)
static int do_unroll_tree_iteration(__isl_take isl_basic_set *bset, void *user)
static __isl_give isl_ast_graft * set_for_cond_from_list(__isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
static isl_bool aff_constant_is_negative(__isl_keep isl_set *set, __isl_keep isl_aff *aff, void *user)
static __isl_give isl_set * split_off_fixed_non_strided(__isl_keep isl_ast_build *build, __isl_take isl_set *domain)
static int cmp_constraint(__isl_keep isl_constraint *a, __isl_keep isl_constraint *b, void *user)
static isl_stat compute_class_domains(__isl_take isl_point *pnt, void *user)
static __isl_give isl_ast_graft * create_node(__isl_take isl_union_map *executed, __isl_take isl_basic_set *bounds, __isl_take isl_set *domain, __isl_take isl_ast_build *build)
static __isl_give isl_basic_set * extract_shared_enforced(__isl_keep isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
static int is_better_lower_bound(struct isl_find_unroll_data *data, __isl_keep isl_aff *lower, __isl_keep isl_val *n)
__isl_give isl_ast_node * isl_ast_build_node_from_schedule_map(__isl_keep isl_ast_build *build, __isl_take isl_union_map *schedule)
static __isl_give isl_ast_graft_list * generate_shifted_component_flat(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static isl_bool after_in_filter(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static __isl_give isl_union_set * child_filter(__isl_keep isl_schedule_node *node, int i)
static isl_stat update_unrolling_lower_bound(struct isl_find_unroll_data *data, __isl_keep isl_constraint *c)
static int dim_is_fixed(__isl_keep isl_set *set, int pos)
static __isl_give isl_pw_aff_list * lower_bounds(__isl_keep isl_constraint_list *constraints, int pos, __isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * build_ast_from_child(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static __isl_give isl_ast_graft * set_enforced_from_list(__isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *lower, __isl_keep isl_pw_aff_list *upper)
static int init_unroll_tree(int n, void *user)
static __isl_give isl_ast_graft * after_each_mark(__isl_take isl_ast_graft *graft, __isl_keep isl_ast_build *build)
static isl_bool pw_aff_constant_is_negative(__isl_keep isl_pw_aff *pa, void *user)
static isl_stat generate_non_single_valued(__isl_take isl_map *executed, struct isl_generate_domain_data *data)
static __isl_give isl_ast_graft_list * add_node(__isl_take isl_ast_graft_list *list, __isl_take isl_union_map *executed, __isl_take isl_basic_set *bounds, __isl_take isl_ast_build *build)
static int do_unroll_iteration(__isl_take isl_basic_set *bset, void *user)
static __isl_give isl_pw_aff_list * remove_redundant_lower_bounds(__isl_take isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_components(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static int eliminate_non_fixed(struct isl_set_map_pair *domain, int *order, int n, int depth, __isl_keep isl_ast_build *build)
static __isl_give isl_constraint * at_offset(int depth, __isl_keep isl_aff *aff, int offset)
static isl_bool after_in_extension(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static isl_stat separate_domain(__isl_take isl_map *map, void *user)
static isl_stat collect_basic_set(__isl_take isl_basic_set *bset, void *user)
static __isl_give isl_ast_graft * set_enforced_from_set(__isl_take isl_ast_graft *graft, __isl_keep isl_pw_aff_list *lower, int pos, __isl_keep isl_set *upper)
static __isl_give isl_set * extract_pending(__isl_keep isl_ast_build *build, __isl_keep isl_basic_set *enforced)
static __isl_give isl_set * compute_atomic_domain(struct isl_codegen_domains *domains, __isl_take isl_set *class_domain)
static isl_stat split_not_strided(__isl_take isl_basic_set *bset, void *user)
static __isl_give isl_ast_graft_list * generate_shift_component(struct isl_set_map_pair *domain, int *order, int n, __isl_keep isl_val *stride, __isl_keep isl_multi_val *offset, __isl_take isl_ast_build *build)
static __isl_give isl_union_map * construct_shifted_executed(struct isl_set_map_pair *domain, int *order, int n, __isl_keep isl_val *stride, __isl_keep isl_multi_val *offset, __isl_keep isl_ast_build *build)
static isl_bool after_in_context(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static __isl_give isl_ast_graft * create_node_scaled(__isl_take isl_union_map *executed, __isl_take isl_basic_set *bounds, __isl_take isl_set *domain, __isl_take isl_ast_build *build)
static __isl_give isl_set * separate_schedule_domains(__isl_take isl_space *space, __isl_take isl_union_map *executed, __isl_keep isl_ast_build *build)
static isl_bool after_in_child(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static __isl_give isl_ast_node * create_for(__isl_keep isl_ast_build *build, int degenerate)
static __isl_give isl_ast_graft_list * hoist_out_of_context(__isl_take isl_ast_graft_list *list, __isl_keep isl_ast_build *build, __isl_keep isl_ast_build *sub_build)
static __isl_give isl_ast_graft_list * build_ast_from_band(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static isl_bool only_intersects_first(__isl_keep isl_set *set, __isl_keep isl_set *first, __isl_keep isl_set *second)
static __isl_give isl_ast_graft_list * generate_shifted_component(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static int list_constant_is_negative(__isl_keep isl_pw_aff_list *list)
static __isl_give isl_ast_expr * reduce_list(enum isl_ast_expr_op_type type, __isl_keep isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * build_ast_from_sequence(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static int first_offset(struct isl_set_map_pair *domain, int *order, int n, __isl_keep isl_ast_build *build)
static isl_stat map_check_scaled(__isl_take isl_map *map, void *user)
static __isl_give isl_ast_graft_list * generate_component(struct isl_set_map_pair *domain, int *order, int n, __isl_take isl_ast_build *build)
static isl_bool after_in_sequence(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static __isl_give isl_basic_set_list * compute_domains(__isl_keep isl_union_map *executed, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * build_ast_from_guard(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static __isl_give isl_ast_expr * for_inc(__isl_keep isl_ast_build *build)
static __isl_give isl_set * do_unroll(struct isl_codegen_domains *domains, __isl_take isl_set *domain, __isl_take isl_set *class_domain)
static __isl_give isl_set * intersect_constraints(__isl_keep isl_constraint_list *list)
static __isl_give isl_ast_graft_list * generate_shifted_component_tree_part(__isl_keep isl_union_map *executed, __isl_take isl_set *domain, __isl_keep isl_ast_build *build, int isolated)
static isl_stat extract_domain(__isl_take isl_map *map, void *user)
static void compute_domains_init_options(isl_set *option[4], __isl_keep isl_ast_build *build)
static isl_stat add_nodes(__isl_take isl_basic_set_list *scc, void *user)
static __isl_give isl_ast_graft_list * list_add_guard(__isl_take isl_ast_graft_list *list, __isl_keep isl_set *guard, __isl_keep isl_ast_build *build, __isl_keep isl_ast_build *sub_build)
static int at_most_one_non_fixed(struct isl_set_map_pair *domain, int *order, int n, int depth)
static __isl_give isl_ast_graft * refine_degenerate(__isl_take isl_ast_graft *graft, __isl_keep isl_ast_build *build, __isl_keep isl_ast_build *sub_build)
static __isl_give isl_ast_graft * refine_generic_bounds(__isl_take isl_ast_graft *graft, __isl_take isl_constraint_list *c_lower, __isl_take isl_constraint_list *c_upper, __isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
static int compute_separate_domain(struct isl_codegen_domains *domains, __isl_keep isl_set *class_domain)
static int get_expanded_n_div(struct isl_find_unroll_data *data, __isl_keep isl_aff *lower)
static isl_stat split_fixed(__isl_take isl_basic_set *bset, void *user)
static __isl_give isl_set * add_implied_guards(__isl_take isl_set *guard, int degenerate, __isl_keep isl_basic_set *bounds, __isl_keep isl_ast_build *build)
static isl_stat compute_partial_domains(struct isl_codegen_domains *domains, __isl_take isl_set *class_domain)
static __isl_give isl_ast_graft_list * generate_inner_level(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static __isl_give isl_ast_graft_list * build_ast_from_expansion(__isl_take isl_ast_build *build, __isl_take isl_schedule_node *node, __isl_take isl_union_map *executed)
static __isl_give isl_pw_aff_list * list_add_one(__isl_take isl_pw_aff_list *list, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * call_create_leaf(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static __isl_give isl_set * implicit_bounds(__isl_take isl_map *map, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_shifted_component_tree(__isl_take isl_union_map *executed, __isl_take isl_ast_build *build)
static isl_bool after_in_expansion(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static isl_bool isl_split_fixed_need_split(struct isl_split_fixed_data *split, __isl_keep isl_set *domain)
static __isl_give isl_aff * lower_bound(__isl_keep isl_constraint *c, int pos, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft * refine_generic(__isl_take isl_ast_graft *graft, __isl_keep isl_basic_set *bounds, __isl_keep isl_set *domain, __isl_keep isl_ast_build *build)
static isl_stat constraint_check_scaled(__isl_take isl_constraint *c, void *user)
static isl_stat constraint_find_unroll(__isl_take isl_constraint *c, void *user)
static __isl_give isl_basic_set_list * add_split_on(__isl_take isl_basic_set_list *list, __isl_take isl_basic_set *bset, __isl_keep isl_basic_map *gt)
static isl_bool after_in_band(__isl_keep isl_union_map *umap, __isl_keep isl_schedule_node *node)
static __isl_give isl_set * compute_unroll_domains(struct isl_codegen_domains *domains, __isl_take isl_set *class_domain)
static isl_bool any_scheduled_after(int i, int j, void *user)
static __isl_give isl_union_map * construct_component_executed(struct isl_set_map_pair *domain, int *order, int n)
static __isl_give isl_ast_graft_list * generate_shifted_component_from_list(struct isl_set_map_pair *domain, int *order, int n, __isl_take isl_ast_build *build)
static isl_bool after_in_subtree(__isl_keep isl_ast_build *build, __isl_keep isl_map *map1, __isl_keep isl_map *map2)
static isl_stat before_each_mark(__isl_keep isl_id *mark, __isl_keep isl_ast_build *build)
static __isl_give isl_ast_graft_list * generate_shifted_component_parts(__isl_take isl_union_map *executed, __isl_take isl_set *before, __isl_take isl_set *isolated, __isl_take isl_set *after, __isl_take isl_set *other, __isl_take isl_ast_build *build)
__isl_give isl_set * isl_ast_graft_list_extract_hoistable_guard(__isl_keep isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
__isl_give isl_ast_graft * isl_ast_graft_insert_mark(__isl_take isl_ast_graft *graft, __isl_take isl_id *mark)
__isl_give isl_ast_graft_list * isl_ast_graft_list_preimage_multi_aff(__isl_take isl_ast_graft_list *list, __isl_take isl_multi_aff *ma)
__isl_give isl_ast_graft_list * isl_ast_graft_list_merge(__isl_take isl_ast_graft_list *list1, __isl_take isl_ast_graft_list *list2, __isl_keep isl_ast_build *build)
__isl_give isl_ast_node * isl_ast_node_from_graft_list(__isl_take isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
__isl_give isl_ast_graft_list * isl_ast_graft_list_group_on_guard(__isl_take isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
__isl_null isl_ast_graft * isl_ast_graft_free(__isl_take isl_ast_graft *graft)
__isl_give isl_ast_graft * isl_ast_graft_alloc_from_children(__isl_take isl_ast_graft_list *list, __isl_take isl_set *guard, __isl_take isl_basic_set *enforced, __isl_keep isl_ast_build *build, __isl_keep isl_ast_build *sub_build)
__isl_give isl_ast_graft_list * isl_ast_graft_list_gist_guards(__isl_take isl_ast_graft_list *list, __isl_take isl_set *context)
__isl_give isl_basic_set * isl_ast_graft_list_extract_shared_enforced(__isl_keep isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
__isl_give isl_ast_graft * isl_ast_graft_insert_for(__isl_take isl_ast_graft *graft, __isl_take isl_ast_node *node)
__isl_give isl_ast_graft_list * isl_ast_graft_list_sort_guard(__isl_take isl_ast_graft_list *list)
__isl_give isl_ast_graft * isl_ast_graft_enforce(__isl_take isl_ast_graft *graft, __isl_take isl_basic_set *enforced)
__isl_give isl_ast_graft_list * isl_ast_graft_list_insert_pending_guard_nodes(__isl_take isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
__isl_give isl_ast_graft * isl_ast_graft_add_guard(__isl_take isl_ast_graft *graft, __isl_take isl_set *guard, __isl_keep isl_ast_build *build)
__isl_give isl_ast_graft_list * isl_ast_graft_list_fuse(__isl_take isl_ast_graft_list *list, __isl_keep isl_ast_build *build)
__isl_give isl_ast_graft_list * isl_ast_graft_list_unembed(__isl_take isl_ast_graft_list *list, int product)
isl_ctx * isl_ast_graft_get_ctx(__isl_keep isl_ast_graft *graft)
__isl_give isl_ast_graft * isl_ast_graft_alloc(__isl_take isl_ast_node *node, __isl_keep isl_ast_build *build)
__isl_give isl_ast_graft * isl_ast_graft_alloc_domain(__isl_take isl_map *executed, __isl_keep isl_ast_build *build)
__isl_give isl_ast_node * isl_ast_node_for_set_init(__isl_take isl_ast_node *node, __isl_take isl_ast_expr *init)
__isl_give isl_ast_node * isl_ast_node_for_set_inc(__isl_take isl_ast_node *node, __isl_take isl_ast_expr *init)
__isl_give isl_ast_node * isl_ast_node_for_set_cond(__isl_take isl_ast_node *node, __isl_take isl_ast_expr *init)
static unsigned offset(__isl_keep isl_constraint *c, enum isl_dim_type type)
static int before(void *first, void *second)
Definition isl_flow.c:2419
static unsigned pos(__isl_keep isl_space *space, enum isl_dim_type type)
Definition isl_map.c:73
#define isl_basic_set_list
#define isl_set
#define isl_basic_set
static struct isl_arg_choice bound[]
Definition isl_options.c:39
enum isl_schedule_node_type isl_schedule_node_get_type(__isl_keep isl_schedule_node *node)
static isl_bool match(__isl_keep isl_space *space1, enum isl_dim_type type1, __isl_keep isl_space *space2, enum isl_dim_type type2)
Definition isl_space.c:1145
static __isl_give isl_set * split(__isl_take isl_set *empty, __isl_take isl_set *min_expr, __isl_take isl_mat *cst)
struct isl_tarjan_graph * isl_tarjan_graph_free(struct isl_tarjan_graph *g)
Definition isl_tarjan.c:17
struct isl_tarjan_graph * isl_tarjan_graph_init(isl_ctx *ctx, int len, isl_bool(*follows)(int i, int j, void *user), void *user)
Definition isl_tarjan.c:119
int sv
Definition isl_test.c:3497
enum isl_fold type
Definition isl_test.c:3810
const char * pa
Definition isl_test.c:7116
const char * set
Definition isl_test.c:1364
const char * hull
Definition isl_test.c:1493
const char * ma
Definition isl_test.c:7330
const char * map
Definition isl_test.c:1734
int equal
Definition isl_test.c:7663
const char * pma
Definition isl_test.c:2962
const char * schedule
Definition isl_test.c:10451
const char * context
Definition isl_test.c:1735
const char * map1
Definition isl_test.c:365
const char * aff
Definition isl_test.c:7073
const char * map2
Definition isl_test.c:366
const char * set1
Definition isl_test.c:3998
const char * res
Definition isl_test.c:783
const char * set2
Definition isl_test.c:3999
int subset
Definition isl_test.c:4000
const char * mupa
Definition isl_test.c:7160
const char * f
Definition isl_test.c:8396
const char * id
Definition isl_test.c:7074
static __isl_give isl_map * universe(__isl_take isl_map *map)
static __isl_give isl_space * identity(__isl_take isl_space *space)
#define isl_union_set
t0 *a *b *t *a *b * t
__isl_give isl_local_space * isl_local_space_from_space(__isl_take isl_space *space)
__isl_export __isl_give isl_map * isl_map_domain_product(__isl_take isl_map *map1, __isl_take isl_map *map2)
Definition isl_map.c:11649
__isl_export __isl_give isl_map * isl_map_intersect_range(__isl_take isl_map *map, __isl_take isl_set *set)
Definition isl_map.c:8973
__isl_export isl_bool isl_basic_map_is_empty(__isl_keep isl_basic_map *bmap)
Definition isl_map.c:10044
__isl_give isl_basic_map * isl_basic_map_equate(__isl_take isl_basic_map *bmap, enum isl_dim_type type1, int pos1, enum isl_dim_type type2, int pos2)
Definition isl_map.c:13943
__isl_export __isl_give isl_set * isl_map_domain(__isl_take isl_map *bmap)
Definition isl_map.c:8777
isl_bool isl_map_plain_is_single_valued(__isl_keep isl_map *map)
Definition isl_map.c:12613
__isl_export isl_bool isl_map_is_single_valued(__isl_keep isl_map *map)
Definition isl_map.c:12632
__isl_export __isl_give isl_map * isl_map_apply_domain(__isl_take isl_map *map1, __isl_take isl_map *map2)
Definition isl_map.c:9263
__isl_give isl_map * isl_map_eliminate(__isl_take isl_map *map, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:2717
__isl_export isl_bool isl_map_is_empty(__isl_keep isl_map *map)
Definition isl_map.c:9801
__isl_export __isl_give isl_map * isl_map_universe(__isl_take isl_space *space)
Definition isl_map.c:6967
__isl_give isl_map * isl_map_copy(__isl_keep isl_map *map)
Definition isl_map.c:1494
__isl_export __isl_give isl_map * isl_set_identity(__isl_take isl_set *set)
Definition isl_map.c:9563
__isl_give isl_map * isl_map_lex_gt(__isl_take isl_space *set_space)
Definition isl_map.c:6015
__isl_null isl_basic_map * isl_basic_map_free(__isl_take isl_basic_map *bmap)
Definition isl_map.c:1503
__isl_give isl_map * isl_map_from_domain_and_range(__isl_take isl_set *domain, __isl_take isl_set *range)
Definition isl_map.c:6847
__isl_give isl_map * isl_map_drop_constraints_involving_dims(__isl_take isl_map *map, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:3727
__isl_give isl_map * isl_map_order_gt(__isl_take isl_map *map, enum isl_dim_type type1, int pos1, enum isl_dim_type type2, int pos2)
Definition isl_map.c:14132
__isl_give isl_map * isl_map_fix_val(__isl_take isl_map *map, enum isl_dim_type type, unsigned pos, __isl_take isl_val *v)
Definition isl_map.c:7288
__isl_export __isl_give isl_map * isl_map_intersect_domain(__isl_take isl_map *map, __isl_take isl_set *set)
Definition isl_map.c:9001
__isl_export isl_stat isl_map_foreach_basic_map(__isl_keep isl_map *map, isl_stat(*fn)(__isl_take isl_basic_map *bmap, void *user), void *user)
Definition isl_map.c:11933
__isl_give isl_map * isl_map_remove_divs_involving_dims(__isl_take isl_map *map, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:3440
__isl_export __isl_give isl_set * isl_map_deltas(__isl_take isl_map *map)
Definition isl_map.c:9442
__isl_export __isl_give isl_map * isl_map_lexmin(__isl_take isl_map *map)
__isl_export __isl_give isl_pw_multi_aff * isl_map_lexmin_pw_multi_aff(__isl_take isl_map *map)
__isl_give isl_map * isl_map_order_lt(__isl_take isl_map *map, enum isl_dim_type type1, int pos1, enum isl_dim_type type2, int pos2)
Definition isl_map.c:14147
isl_size isl_map_dim(__isl_keep isl_map *map, enum isl_dim_type type)
Definition isl_map.c:113
__isl_give isl_basic_map * isl_basic_map_order_ge(__isl_take isl_basic_map *bmap, enum isl_dim_type type1, int pos1, enum isl_dim_type type2, int pos2)
Definition isl_map.c:14035
__isl_export __isl_give isl_basic_map * isl_basic_map_intersect_domain(__isl_take isl_basic_map *bmap, __isl_take isl_basic_set *bset)
Definition isl_map.c:4118
__isl_export __isl_give isl_map * isl_map_reverse(__isl_take isl_map *map)
Definition isl_map.c:7801
__isl_export __isl_give isl_pw_multi_aff * isl_map_lexmax_pw_multi_aff(__isl_take isl_map *map)
__isl_give isl_basic_map * isl_basic_map_order_gt(__isl_take isl_basic_map *bmap, enum isl_dim_type type1, int pos1, enum isl_dim_type type2, int pos2)
Definition isl_map.c:14117
__isl_give isl_map * isl_map_equate(__isl_take isl_map *map, enum isl_dim_type type1, int pos1, enum isl_dim_type type2, int pos2)
Definition isl_map.c:13957
__isl_export __isl_give isl_map * isl_map_product(__isl_take isl_map *map1, __isl_take isl_map *map2)
Definition isl_map.c:11613
__isl_export __isl_give isl_map * isl_map_coalesce(__isl_take isl_map *map)
__isl_give isl_map * isl_map_identity(__isl_take isl_space *space)
Definition isl_map.c:9558
__isl_give isl_map * isl_map_insert_dims(__isl_take isl_map *map, enum isl_dim_type type, unsigned pos, unsigned n)
Definition isl_map.c:4760
__isl_null isl_map * isl_map_free(__isl_take isl_map *map)
Definition isl_map.c:7040
__isl_constructor __isl_give isl_map * isl_map_from_basic_map(__isl_take isl_basic_map *bmap)
Definition isl_map.c:4037
__isl_export __isl_give isl_basic_map * isl_basic_map_intersect_range(__isl_take isl_basic_map *bmap, __isl_take isl_basic_set *bset)
Definition isl_map.c:4167
__isl_give isl_basic_map * isl_basic_map_copy(__isl_keep isl_basic_map *bmap)
Definition isl_map.c:1479
__isl_give isl_basic_map * isl_basic_map_from_domain_and_range(__isl_take isl_basic_set *domain, __isl_take isl_basic_set *range)
Definition isl_map.c:6841
__isl_give isl_basic_map * isl_basic_map_from_multi_aff(__isl_take isl_multi_aff *maff)
__isl_export __isl_give isl_set * isl_map_range(__isl_take isl_map *map)
Definition isl_map.c:6728
__isl_give isl_basic_set * isl_basic_map_range(__isl_take isl_basic_map *bmap)
Definition isl_map.c:6635
__isl_give isl_basic_map * isl_basic_map_universe(__isl_take isl_space *space)
Definition isl_map.c:6902
__isl_give isl_map * isl_map_from_multi_aff(__isl_take isl_multi_aff *maff)
int isl_options_get_coalesce_preserve_locals(isl_ctx *ctx)
isl_stat isl_options_set_coalesce_preserve_locals(isl_ctx *ctx, int val)
__isl_export __isl_give isl_schedule_node * isl_schedule_get_root(__isl_keep isl_schedule *schedule)
__isl_null isl_schedule * isl_schedule_free(__isl_take isl_schedule *sched)
__isl_export isl_size isl_schedule_node_band_n_member(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_multi_union_pw_aff * isl_schedule_node_band_get_partial_schedule(__isl_keep isl_schedule_node *node)
__isl_export isl_size isl_schedule_node_n_children(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_union_map * isl_schedule_node_expansion_get_expansion(__isl_keep isl_schedule_node *node)
__isl_export isl_size isl_schedule_node_get_tree_depth(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_union_map * isl_schedule_node_extension_get_extension(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_union_set * isl_schedule_node_domain_get_domain(__isl_keep isl_schedule_node *node)
__isl_give isl_id * isl_schedule_node_mark_get_id(__isl_keep isl_schedule_node *node)
__isl_export isl_bool isl_schedule_node_is_subtree_anchored(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_union_set * isl_schedule_node_filter_get_filter(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_set * isl_schedule_node_context_get_context(__isl_keep isl_schedule_node *node)
__isl_export __isl_give isl_union_map * isl_schedule_node_get_prefix_schedule_union_map(__isl_keep isl_schedule_node *node)
__isl_null isl_schedule_node * isl_schedule_node_free(__isl_take isl_schedule_node *node)
__isl_export __isl_give isl_set * isl_schedule_node_guard_get_guard(__isl_keep isl_schedule_node *node)
isl_ctx * isl_schedule_node_get_ctx(__isl_keep isl_schedule_node *node)
__isl_give isl_schedule_node * isl_schedule_node_get_child(__isl_keep isl_schedule_node *node, int pos)
__isl_export __isl_give isl_schedule_node * isl_schedule_node_child(__isl_take isl_schedule_node *node, int pos)
isl_schedule_node_type
@ isl_schedule_node_mark
@ isl_schedule_node_filter
@ isl_schedule_node_domain
@ isl_schedule_node_band
@ isl_schedule_node_set
@ isl_schedule_node_guard
@ isl_schedule_node_extension
@ isl_schedule_node_expansion
@ isl_schedule_node_sequence
@ isl_schedule_node_error
@ isl_schedule_node_leaf
@ isl_schedule_node_context
a(0)
b(9)
__isl_export __isl_give isl_set * isl_set_universe(__isl_take isl_space *space)
Definition isl_map.c:6985
__isl_export __isl_give isl_basic_set * isl_set_unshifted_simple_hull(__isl_take isl_set *set)
__isl_export __isl_give isl_set * isl_set_coalesce(__isl_take isl_set *set)
__isl_give isl_basic_set * isl_basic_set_drop_constraints_not_involving_dims(__isl_take isl_basic_set *bset, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:3655
__isl_export __isl_give isl_set * isl_set_subtract(__isl_take isl_set *set1, __isl_take isl_set *set2)
isl_bool isl_set_dim_has_upper_bound(__isl_keep isl_set *set, enum isl_dim_type type, unsigned pos)
Definition isl_map.c:12251
isl_ctx * isl_set_get_ctx(__isl_keep isl_set *set)
Definition isl_map.c:397
__isl_give isl_set * isl_set_make_disjoint(__isl_take isl_set *set)
__isl_export __isl_give isl_space * isl_set_get_space(__isl_keep isl_set *set)
Definition isl_map.c:604
__isl_export __isl_give isl_set * isl_set_detect_equalities(__isl_take isl_set *set)
__isl_give isl_val * isl_set_plain_get_val_if_fixed(__isl_keep isl_set *set, enum isl_dim_type type, unsigned pos)
Definition isl_map.c:10850
__isl_give isl_space * isl_basic_set_get_space(__isl_keep isl_basic_set *bset)
Definition isl_map.c:422
__isl_constructor __isl_give isl_set * isl_set_from_point(__isl_take isl_point *pnt)
Definition isl_point.c:693
__isl_export __isl_give isl_set * isl_set_union(__isl_take isl_set *set1, __isl_take isl_set *set2)
Definition isl_map.c:8929
__isl_overload __isl_give isl_set * isl_set_preimage_multi_aff(__isl_take isl_set *set, __isl_take isl_multi_aff *ma)
Definition isl_map.c:14675
__isl_give isl_basic_set * isl_basic_set_preimage_multi_aff(__isl_take isl_basic_set *bset, __isl_take isl_multi_aff *ma)
Definition isl_map.c:14536
__isl_give isl_basic_set * isl_basic_set_project_out(__isl_take isl_basic_set *bset, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:5170
__isl_export isl_bool isl_set_is_disjoint(__isl_keep isl_set *set1, __isl_keep isl_set *set2)
__isl_give isl_set * isl_set_eliminate(__isl_take isl_set *set, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:2747
__isl_null isl_basic_set * isl_basic_set_free(__isl_take isl_basic_set *bset)
Definition isl_map.c:1523
__isl_give isl_basic_set * isl_set_simple_hull(__isl_take isl_set *set)
isl_bool isl_basic_set_is_disjoint(__isl_keep isl_basic_set *bset1, __isl_keep isl_basic_set *bset2)
__isl_null isl_set * isl_set_free(__isl_take isl_set *set)
Definition isl_map.c:4055
int isl_set_follows_at(__isl_keep isl_set *set1, __isl_keep isl_set *set2, int pos)
Definition isl_map.c:10684
__isl_overload __isl_give isl_set * isl_set_preimage_pw_multi_aff(__isl_take isl_set *set, __isl_take isl_pw_multi_aff *pma)
Definition isl_map.c:14802
__isl_export isl_bool isl_set_is_subset(__isl_keep isl_set *set1, __isl_keep isl_set *set2)
__isl_give isl_set * isl_set_remove_divs_involving_dims(__isl_take isl_set *set, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:3466
__isl_give isl_set * isl_set_drop_constraints_not_involving_dims(__isl_take isl_set *set, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:3766
__isl_give isl_set * isl_set_remove_unknown_divs(__isl_take isl_set *set)
Definition isl_map.c:3856
__isl_export __isl_give isl_set * isl_set_apply(__isl_take isl_set *set, __isl_take isl_map *map)
Definition isl_map.c:10489
__isl_give isl_set * isl_set_copy(__isl_keep isl_set *set)
Definition isl_map.c:1470
__isl_give isl_basic_set * isl_basic_set_remove_redundancies(__isl_take isl_basic_set *bset)
__isl_give isl_set * isl_set_project_out(__isl_take isl_set *set, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:5241
__isl_export __isl_give isl_set * isl_set_gist(__isl_take isl_set *set, __isl_take isl_set *context)
__isl_export isl_stat isl_set_foreach_point(__isl_keep isl_set *set, isl_stat(*fn)(__isl_take isl_point *pnt, void *user), void *user)
Definition isl_point.c:579
__isl_give isl_set * isl_set_compute_divs(__isl_take isl_set *set)
Definition isl_map.c:8772
isl_bool isl_set_plain_is_equal(__isl_keep isl_set *set1, __isl_keep isl_set *set2)
Definition isl_map.c:11240
isl_bool isl_set_is_params(__isl_keep isl_set *set)
Definition isl_map.c:1270
__isl_export isl_stat isl_set_foreach_basic_set(__isl_keep isl_set *set, isl_stat(*fn)(__isl_take isl_basic_set *bset, void *user), void *user)
Definition isl_map.c:11948
__isl_export __isl_give isl_basic_set * isl_basic_set_apply(__isl_take isl_basic_set *bset, __isl_take isl_basic_map *bmap)
Definition isl_map.c:5407
__isl_give isl_basic_set * isl_set_plain_unshifted_simple_hull(__isl_take isl_set *set)
isl_size isl_set_dim(__isl_keep isl_set *set, enum isl_dim_type type)
Definition isl_map.c:132
isl_stat isl_set_dim_residue_class_val(__isl_keep isl_set *set, int pos, __isl_give isl_val **modulo, __isl_give isl_val **residue)
__isl_give isl_set * isl_set_align_params(__isl_take isl_set *set, __isl_take isl_space *model)
Definition isl_map.c:13178
__isl_export isl_bool isl_basic_set_is_empty(__isl_keep isl_basic_set *bset)
Definition isl_map.c:10123
__isl_export isl_size isl_set_n_basic_set(__isl_keep isl_set *set)
Definition isl_map.c:11928
__isl_export __isl_give isl_set * isl_set_intersect(__isl_take isl_set *set1, __isl_take isl_set *set2)
Definition isl_map.c:4521
__isl_export __isl_give isl_set * isl_set_empty(__isl_take isl_space *space)
Definition isl_map.c:6962
__isl_constructor __isl_give isl_set * isl_set_from_basic_set(__isl_take isl_basic_set *bset)
Definition isl_map.c:4024
__isl_give isl_basic_set * isl_basic_set_copy(__isl_keep isl_basic_set *bset)
Definition isl_map.c:1465
__isl_export __isl_give isl_basic_set * isl_basic_set_intersect(__isl_take isl_basic_set *bset1, __isl_take isl_basic_set *bset2)
Definition isl_map.c:4312
__isl_give isl_basic_set * isl_basic_set_universe(__isl_take isl_space *space)
Definition isl_map.c:6910
__isl_export isl_bool isl_set_involves_locals(__isl_keep isl_set *set)
Definition isl_map.c:3556
__isl_give isl_set * isl_set_drop_constraints_involving_dims(__isl_take isl_set *set, enum isl_dim_type type, unsigned first, unsigned n)
Definition isl_map.c:3756
__isl_give isl_basic_set * isl_basic_set_remove_unknown_divs(__isl_take isl_basic_set *bset)
Definition isl_map.c:3826
isl_bool isl_set_plain_is_disjoint(__isl_keep isl_set *set1, __isl_keep isl_set *set2)
__isl_export isl_bool isl_set_is_empty(__isl_keep isl_set *set)
Definition isl_map.c:9828
__isl_null isl_space * isl_space_free(__isl_take isl_space *space)
Definition isl_space.c:478
__isl_give isl_space * isl_space_from_domain(__isl_take isl_space *space)
Definition isl_space.c:2242
__isl_give isl_space * isl_space_insert_dims(__isl_take isl_space *space, enum isl_dim_type type, unsigned pos, unsigned n)
Definition isl_space.c:1345
__isl_give isl_space * isl_space_set_from_params(__isl_take isl_space *space)
Definition isl_space.c:2321
__isl_give isl_space * isl_space_copy(__isl_keep isl_space *space)
Definition isl_space.c:469
__isl_give isl_space * isl_space_align_params(__isl_take isl_space *space1, __isl_take isl_space *space2)
Definition isl_space.c:3358
__isl_export __isl_give isl_space * isl_space_range(__isl_take isl_space *space)
Definition isl_space.c:2257
isl_bool isl_space_is_params(__isl_keep isl_space *space)
Definition isl_space.c:211
__isl_export __isl_give isl_space * isl_space_unwrap(__isl_take isl_space *space)
Definition isl_space.c:2951
__isl_give isl_space * isl_space_set_alloc(isl_ctx *ctx, unsigned nparam, unsigned dim)
Definition isl_space.c:188
__isl_export __isl_give isl_space * isl_space_map_from_set(__isl_take isl_space *space)
Definition isl_space.c:1927
isl_size isl_space_dim(__isl_keep isl_space *space, enum isl_dim_type type)
Definition isl_space.c:372
__isl_give isl_space * isl_space_add_dims(__isl_take isl_space *space, enum isl_dim_type type, unsigned n)
Definition isl_space.c:1262
isl_bool isl_space_is_domain(__isl_keep isl_space *space1, __isl_keep isl_space *space2)
Definition isl_space.c:2708
__isl_export __isl_give isl_space * isl_space_domain(__isl_take isl_space *space)
Definition isl_space.c:2232
isl_dim_type
Definition space_type.h:13
@ isl_dim_param
Definition space_type.h:15
@ isl_dim_in
Definition space_type.h:16
@ isl_dim_set
Definition space_type.h:18
@ isl_dim_div
Definition space_type.h:19
@ isl_dim_out
Definition space_type.h:17
isl_ast_graft_list * list
isl_ast_build * build
isl_union_map * executed
struct isl_set_map_pair * domain
isl_union_map * options
__isl_give isl_id *(* before_each_for)(__isl_keep isl_ast_build *context, void *user)
__isl_give isl_ast_node *(* after_each_for)(__isl_take isl_ast_node *node, __isl_keep isl_ast_build *context, void *user)
isl_stat(* before_each_mark)(__isl_keep isl_id *mark, __isl_keep isl_ast_build *build, void *user)
__isl_give isl_ast_node *(* after_each_mark)(__isl_take isl_ast_node *node, __isl_keep isl_ast_build *context, void *user)
isl_multi_aff * internal2input
__isl_give isl_ast_node *(* create_leaf)(__isl_take isl_ast_build *build, void *user)
struct isl_codegen_domains * domains
isl_ast_graft_list * list
isl_ast_build * build
isl_basic_set_list * list
isl_union_map * executed
isl_basic_map * expansion
isl_ast_graft_list * list
isl_ast_graft_list * list
static Signature range
static Signature domain
__isl_null isl_union_map * isl_union_map_free(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_space * isl_union_map_get_space(__isl_keep isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_reverse(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_domain_product(__isl_take isl_union_map *umap1, __isl_take isl_union_map *umap2)
__isl_give isl_union_map * isl_union_map_add_map(__isl_take isl_union_map *umap, __isl_take isl_map *map)
__isl_give isl_union_map * isl_union_map_align_params(__isl_take isl_union_map *umap, __isl_take isl_space *model)
__isl_export isl_stat isl_union_map_foreach_map(__isl_keep isl_union_map *umap, isl_stat(*fn)(__isl_take isl_map *map, void *user), void *user)
__isl_overload __isl_give isl_union_map * isl_union_map_preimage_domain_multi_aff(__isl_take isl_union_map *umap, __isl_take isl_multi_aff *ma)
__isl_give isl_union_set * isl_union_set_align_params(__isl_take isl_union_set *uset, __isl_take isl_space *model)
__isl_export __isl_give isl_union_map * isl_union_map_coalesce(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_apply_range(__isl_take isl_union_map *umap1, __isl_take isl_union_map *umap2)
__isl_give isl_union_map * isl_union_map_copy(__isl_keep isl_union_map *umap)
__isl_export isl_bool isl_union_map_is_empty(__isl_keep isl_union_map *umap)
__isl_export __isl_give isl_union_set * isl_union_map_range(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_apply_domain(__isl_take isl_union_map *umap1, __isl_take isl_union_map *umap2)
__isl_give isl_union_map * isl_union_map_empty(__isl_take isl_space *space)
__isl_constructor __isl_give isl_union_map * isl_union_map_from_map(__isl_take isl_map *map)
__isl_give isl_union_map * isl_union_map_remove_redundancies(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_union(__isl_take isl_union_map *umap1, __isl_take isl_union_map *umap2)
__isl_give isl_union_map * isl_union_map_intersect_domain(__isl_take isl_union_map *umap, __isl_take isl_union_set *uset)
isl_ctx * isl_union_map_get_ctx(__isl_keep isl_union_map *umap)
isl_size isl_union_map_dim(__isl_keep isl_union_map *umap, enum isl_dim_type type)
__isl_export isl_bool isl_union_map_is_subset(__isl_keep isl_union_map *umap1, __isl_keep isl_union_map *umap2)
isl_size isl_union_map_n_map(__isl_keep isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_detect_equalities(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_from_domain_and_range(__isl_take isl_union_set *domain, __isl_take isl_union_set *range)
__isl_export __isl_give isl_union_set * isl_union_map_domain(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_intersect_params(__isl_take isl_union_map *umap, __isl_take isl_set *set)
__isl_export __isl_give isl_union_map * isl_union_map_universe(__isl_take isl_union_map *umap)
__isl_export __isl_give isl_union_map * isl_union_map_intersect(__isl_take isl_union_map *umap1, __isl_take isl_union_map *umap2)
__isl_give isl_union_map * isl_union_map_intersect_range(__isl_take isl_union_map *umap, __isl_take isl_union_set *uset)
__isl_give isl_set * isl_set_from_union_set(__isl_take isl_union_set *uset)
__isl_export isl_stat isl_union_set_foreach_set(__isl_keep isl_union_set *uset, isl_stat(*fn)(__isl_take isl_set *set, void *user), void *user)
__isl_constructor __isl_give isl_union_set * isl_union_set_from_basic_set(__isl_take isl_basic_set *bset)
__isl_export __isl_give isl_space * isl_union_set_get_space(__isl_keep isl_union_set *uset)
__isl_constructor __isl_give isl_union_set * isl_union_set_from_set(__isl_take isl_set *set)
__isl_export __isl_give isl_union_set * isl_union_set_coalesce(__isl_take isl_union_set *uset)
__isl_give isl_union_set * isl_union_set_copy(__isl_keep isl_union_set *uset)
__isl_null isl_union_set * isl_union_set_free(__isl_take isl_union_set *uset)
__isl_export isl_bool isl_union_set_is_subset(__isl_keep isl_union_set *uset1, __isl_keep isl_union_set *uset2)
t1
Definition unroll11.c:2
t2
Definition unroll4.c:3
__isl_export isl_bool isl_val_is_nan(__isl_keep isl_val *v)
Definition isl_val.c:1161
__isl_give isl_multi_val * isl_multi_val_mod_val(__isl_take isl_multi_val *mv, __isl_take isl_val *v)
Definition isl_val.c:1626
__isl_give isl_val * isl_val_copy(__isl_keep isl_val *v)
Definition isl_val.c:219
__isl_export int isl_val_cmp_si(__isl_keep isl_val *v, long i)
Definition isl_val.c:1394
__isl_give isl_val * isl_val_set_si(__isl_take isl_val *v, long i)
Definition isl_val.c:144
__isl_export isl_bool isl_val_is_infty(__isl_keep isl_val *v)
Definition isl_val.c:1171
__isl_export __isl_give isl_val * isl_val_div(__isl_take isl_val *v1, __isl_take isl_val *v2)
Definition isl_val.c:875
__isl_overload __isl_give isl_multi_val * isl_multi_val_add_val(__isl_take isl_multi_val *mv, __isl_take isl_val *v)
Definition isl_val.c:1612
__isl_export __isl_give isl_val * isl_val_gcd(__isl_take isl_val *v1, __isl_take isl_val *v2)
Definition isl_val.c:1016
__isl_export isl_bool isl_val_is_divisible_by(__isl_keep isl_val *v1, __isl_keep isl_val *v2)
Definition isl_val.c:964
__isl_export isl_bool isl_val_is_neg(__isl_keep isl_val *v)
Definition isl_val.c:1234
__isl_null isl_val * isl_val_free(__isl_take isl_val *v)
Definition isl_val.c:263
__isl_export isl_bool isl_val_is_zero(__isl_keep isl_val *v)
Definition isl_val.c:1191
__isl_export isl_bool isl_val_ne(__isl_keep isl_val *v1, __isl_keep isl_val *v2)
Definition isl_val.c:1458
__isl_export isl_bool isl_val_is_one(__isl_keep isl_val *v)
Definition isl_val.c:1201
__isl_export __isl_give isl_val * isl_val_neg(__isl_take isl_val *v)
Definition isl_val.c:410
__isl_export long isl_val_get_num_si(__isl_keep isl_val *v)
Definition isl_val.c:282
struct isl_multi_val isl_multi_val
Definition val_type.h:16
n
Definition youcefn.c:8