aGrUM 2.3.2
a C++ library for (probabilistic) graphical models
schedule.cpp
Go to the documentation of this file.
1/****************************************************************************
2 * This file is part of the aGrUM/pyAgrum library. *
3 * *
4 * Copyright (c) 2005-2025 by *
5 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
6 * - Christophe GONZALES(_at_AMU) *
7 * *
8 * The aGrUM/pyAgrum library is free software; you can redistribute it *
9 * and/or modify it under the terms of either : *
10 * *
11 * - the GNU Lesser General Public License as published by *
12 * the Free Software Foundation, either version 3 of the License, *
13 * or (at your option) any later version, *
14 * - the MIT license (MIT), *
15 * - or both in dual license, as here. *
16 * *
17 * (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) *
18 * *
19 * This aGrUM/pyAgrum library is distributed in the hope that it will be *
20 * useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
21 * INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS *
22 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR *
26 * OTHER DEALINGS IN THE SOFTWARE. *
27 * *
28 * See LICENCES for more details. *
29 * *
30 * SPDX-FileCopyrightText: Copyright 2005-2025 *
31 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
32 * - Christophe GONZALES(_at_AMU) *
33 * SPDX-License-Identifier: LGPL-3.0-or-later OR MIT *
34 * *
35 * Contact : info_at_agrum_dot_org *
36 * homepage : http://agrum.gitlab.io *
37 * gitlab : https://gitlab.com/agrumery/agrum *
38 * *
39 ****************************************************************************/
40
41
48
50
51#ifndef DOXYGEN_SHOULD_SKIP_THIS
52
54# ifdef GUM_NO_INLINE
56# endif /* GUM_NO_INLINE */
57
58namespace gum {
59
60 // the version number of the schedules
61 std::atomic< Idx > Schedule::_overall_version_number_ = Idx(0);
62
64 DAG Schedule::_fullDAG_() const {
65 // if no operation has been performed yet, return _dag_
66 if (_dag_.sizeNodes() == _node2op_.size()) return _dag_;
67
68 // now, we should reconstruct the graph. First, we add all the nodes
69 DAG dag(_node2op_.size(), true, 2 * _node2op_.size(), true); // estimated default size
70 for (auto iter = _node2op_.cbegin(); iter != _node2op_.cend(); ++iter) {
71 dag.addNodes(iter.first());
72 }
73
74 // add the arcs toward node or from node if another node deletes one of its
75 // arguments
76 for (auto iter = _node2op_.cbegin(); iter != _node2op_.cend(); ++iter) {
77 const NodeId node = iter.first();
78 const auto* op = iter.second();
79
80 for (const auto arg: op->args()) {
81 // get the arguments used by node/op that are not sources and add arcs
82 // from the operations/nodes that created them to the current node.
83
84 if (const auto arg_location = _multidim_location_[arg].first; arg_location != nullptr) {
85 const NodeId arg_node = _node2op_.first(arg_location);
86 dag.addArc(arg_node, node);
87 }
88
89 // If there exists another operation that deletes Parameter arg, then we
90 // should add an arc from node to this operation (as node/op should be
91 // executed before the argument is deleted).
92 if (_deleted_multidim2node_.existsFirst(arg)) {
93 const NodeId other_node = _deleted_multidim2node_.second(arg);
94 if (other_node != node) { dag.addArc(node, other_node); }
95 }
96
97 // if node/op deletes its parameters, then we should add arcs from all
98 // the operations that use arg to node: actually, these nodes use arg
99 // but do not delete it, hence they should be executed before node/op
100 if (op->implyDeletion()) {
101 for (const auto other_node: _multidim2nodes_[arg]) {
102 if (other_node != node) { dag.addArc(other_node, node); }
103 }
104 }
105 }
106 }
107
108 return dag;
109 }
110
112 void Schedule::_copy_(const Schedule& from) {
113 // copy the dag-related structures
114 _dag_ = from._dag_;
115 _newId_ = from._newId_;
116 _version_number_ = from._version_number_;
117
118 // here, we create the mapping from all the ScheduleMultiDims contained into
119 // from to those contained into this
120 HashTable< const IScheduleMultiDim*, const IScheduleMultiDim* > multidim_from2this(
121 from._multidim2id_.size());
122
123 // we copy the source multidims, i.e., those that are not the result of any
124 // operation, and we store the mapping from these IScheduleMultiDim contained
125 // into from to those contained into this
126 for (const auto& [first, second]: from._multidim_location_) {
127 if (second.first == nullptr) { // here, this is a source
128 const IScheduleMultiDim* new_multidim
129 = from._emplaced_multidims_.exists(first) ? first : first->clone();
130 multidim_from2this.insert(first, new_multidim);
131 _multidim_location_.insert(new_multidim, second);
132 _multidim2id_.insert(new_multidim, new_multidim->id());
133 _multidim2nodes_.insert(new_multidim, NodeSet());
134 }
135 }
136 _emplaced_multidims_ = from._emplaced_multidims_;
137
138
139 // get a topological order of from's full dag, i.e. the dag corresponding to
140 // all the operations of that schedule, as if those had not been executed.
141 // This is the order in which we will reconstruct all the operations. This
142 // will allow us to construct the ScheduleMultiDims in the right order.
143 const Sequence< NodeId > nodes_sequence = (from._dag_.sizeNodes() == from._node2op_.size())
144 ? from._dag_.topologicalOrder()
145 : from._fullDAG_().topologicalOrder();
146
147 // we can now create all the operations contained in this in the same
148 // topological order as from: this enables to create all the ScheduleMultiDims
149 // needed by a ScheduleOperator before creating the ScheduleOperator itself
150 for (const auto node: nodes_sequence) {
151 // get the "from" operation and clone it
152 const ScheduleOperator* from_op = from._node2op_.second(node);
153 ScheduleOperator* new_op = from_op->clone();
154 _node2op_.insert(node, new_op);
155
156 // we get the ScheduleMultiDims passed as parameters to from_op, and we
157 // compute the corresponding ScheduleMultiDims in the current schedule,
158 // and we update accordingly the parameters of new_op. In addition,
159 // we store the information that the ScheduleMultiDims are used by our
160 // new operation
161 const Sequence< const IScheduleMultiDim* > from_args = from_op->args();
162 Sequence< const IScheduleMultiDim* > new_args(from_args.size());
163 for (auto from_arg: from_args) {
164 const auto& new_arg = multidim_from2this[from_arg];
165 new_args << new_arg;
166 _multidim2nodes_[new_arg].insert(node);
167 }
168 new_op->updateArgs(new_args);
169
170 // if the operation deletes its parameters, then keep this information
171 if (new_op->implyDeletion()) {
172 for (auto new_arg: new_args) {
173 _deleted_multidim2node_.insert(new_arg, node);
174 }
175 }
176
177 // we get the ScheduleMultiDims resulting from from_op, and we compute their
178 // mapping with those resulting from new_op. We also store the location and
179 // id of the new results
180 const Sequence< const IScheduleMultiDim* > from_res = from_op->results();
181 const Sequence< const IScheduleMultiDim* > new_res = new_op->results();
182 for (Idx i = Idx(0), end = from_res.size(); i < end; ++i) {
183 multidim_from2this.insert(from_res[i], new_res[i]);
184 _multidim_location_.insert(new_res[i], std::pair< ScheduleOperator*, Idx >(new_op, i));
185 _multidim2id_.insert(new_res[i], new_res[i]->id());
186 _multidim2nodes_.insert(new_res[i], NodeSet());
187 }
188 }
189 }
190
192 void Schedule::_destroy_() {
193 // remove all the operations of the schedule
194 for (auto iter = _node2op_.begin(), end = _node2op_.end(); iter != end; ++iter) {
195 delete iter.second();
196 }
197
198 // remove all the source ScheduleMultiDims
199 for (const auto& [first, second]: _multidim_location_) {
200 if ((second.first == nullptr) && !_emplaced_multidims_.exists(first)) {
201 const IScheduleMultiDim* multidim = first;
202 delete multidim;
203 }
204 }
205 }
206
208 void Schedule::clear() {
209 // remove all the allocated objects
210 _destroy_();
211
212 // clear all the data structures
213 _dag_.clear();
214 _newId_ = NodeId(0);
215 _node2op_.clear();
216 _multidim_location_.clear();
217 _multidim2id_.clear();
218 _emplaced_multidims_.clear();
219 _multidim2nodes_.clear();
221
222 // indicate that the version of the schedule has changed
224 }
225
227 Schedule::Schedule(const Size nb_ops) :
228 _dag_(nb_ops, true, 2 * nb_ops, true), _node2op_(nb_ops), _multidim_location_(2 * nb_ops),
229 _multidim2id_(2 * nb_ops), _emplaced_multidims_(2 * nb_ops), _multidim2nodes_(2 * nb_ops),
230 _deleted_multidim2node_(2 * nb_ops),
231 _version_number_(_newVersionNumber_()) { // for debugging purposes
232 GUM_CONSTRUCTOR(Schedule)
233 }
234
236 Schedule::Schedule(const Schedule& from) : _version_number_(from._version_number_) {
237 // really perform the copy
238 _copy_(from);
239
240 // for debugging purposes
241 GUM_CONS_CPY(Schedule)
242 }
243
245 Schedule::Schedule(Schedule&& from) :
246 _dag_(std::move(from._dag_)), _newId_(from._newId_), _node2op_(std::move(from._node2op_)),
247 _multidim_location_(std::move(from._multidim_location_)),
248 _multidim2id_(std::move(from._multidim2id_)),
249 _emplaced_multidims_(std::move(from._emplaced_multidims_)),
250 _multidim2nodes_(std::move(from._multidim2nodes_)),
251 _deleted_multidim2node_(std::move(from._deleted_multidim2node_)),
252 _version_number_(from._version_number_) {
253 // empty properly from
254 from._newId_ = NodeId(0);
255 from._dag_.clear();
256 from._node2op_.clear();
257 from._multidim_location_.clear();
258 from._multidim2id_.clear();
259 from._emplaced_multidims_.clear();
260 from._multidim2nodes_.clear();
261 from._deleted_multidim2node_.clear();
262
263 // for debugging purposes
264 GUM_CONS_MOV(Schedule)
265 }
266
268 Schedule::~Schedule() {
269 // really destroy all the allocated objects contained into the schedule
270 _destroy_();
271
272 // for debugging purposes
273 GUM_DESTRUCTOR(Schedule)
274 }
275
277 Schedule& Schedule::operator=(const Schedule& from) {
278 // avoid self assignment
279 if (this != &from) {
280 // remove all the operations that are currently stored
281 clear();
282
283 // perform the copy
284 _copy_(from);
285 }
286
287 return *this;
288 }
289
291 Schedule& Schedule::operator=(Schedule&& from) {
292 // avoid self assignment
293 if (this != &from) {
294 // remove all the operations that are currently stored
295 clear();
296
297 // move from
298 _newId_ = from._newId_;
299 _dag_ = std::move(from._dag_);
300 _node2op_ = std::move(from._node2op_);
301 _multidim_location_ = std::move(from._multidim_location_);
302 _multidim2id_ = std::move(from._multidim2id_);
303 _emplaced_multidims_ = std::move(from._emplaced_multidims_);
304 _multidim2nodes_ = std::move(from._multidim2nodes_);
305 _deleted_multidim2node_ = std::move(from._deleted_multidim2node_);
306 _version_number_ = from._version_number_;
307
308 // empty properly from
309 from._newId_ = NodeId(0);
310 from._dag_.clear();
311 from._node2op_.clear();
312 from._multidim_location_.clear();
313 from._multidim2id_.clear();
314 from._emplaced_multidims_.clear();
315 from._multidim2nodes_.clear();
316 from._deleted_multidim2node_.clear();
317 }
318
319 return *this;
320 }
321
323 bool Schedule::operator==(const Schedule& from) const {
324 // check if both schedules have the same graph and the same set of
325 // operation ids
326 if ((_dag_ != from._dag_) || (_node2op_.size() != from._node2op_.size())) return false;
327 for (auto iter = _node2op_.cbegin(); iter != _node2op_.cend(); ++iter)
328 if (!from._node2op_.existsFirst(iter.first())) return false;
329
330 // map "this"'s operations and source multidims to those of "from"
331 HashTable< const ScheduleOperator*, const ScheduleOperator* > this_op2from(_node2op_.size());
332 Bijection< const IScheduleMultiDim*, const IScheduleMultiDim* > this_multidim2from(
333 _multidim2nodes_.size());
334
335 // get a topological order of the full graph. We will use it to compare the
336 // parameters of the operations. This order enforces that the parameters are
337 // already known before we examine them in the operations.
338 // check if all the operations have the same type and the same parameters
339 for (const Sequence< NodeId > order = (_dag_.sizeNodes() == _node2op_.size())
340 ? _dag_.topologicalOrder()
341 : _fullDAG_().topologicalOrder();
342 const auto node: order) {
343 // get the operations corresponding to node
344 const ScheduleOperator* this_op = _node2op_.second(node);
345 const ScheduleOperator* from_op = from._node2op_.second(node);
346
347 // check if they perform the same operations
348 if (!this_op->isSameOperator(*from_op)) return false;
349
350 // check that the content of their parameters are the same
351 const Sequence< const IScheduleMultiDim* > this_args = this_op->args();
352 const Sequence< const IScheduleMultiDim* > from_args = from_op->args();
353 if (this_args.size() != from_args.size()) return false;
354
355 for (Idx i = 0, end = this_args.size(); i < end; ++i) {
356 const auto& [first, second] = _multidim_location_[this_args[i]];
357 const auto& [from_first, from_second] = from._multidim_location_[from_args[i]];
358
359 if (first != nullptr) {
360 // here, this's and from's locations are the same if they correspond
361 // to the same operation, with the same argument's index
362 if ((from_first == nullptr) || (second != from_second)
363 || (from_first != this_op2from[first]))
364 return false;
365 } else {
366 // here, this_args[i] and from_args[i] should both be source multidims
367 if (from_first != nullptr) return false;
368
369 // here, we know that they are truly source multidims. If we have
370 // already examined these multidims, we have already put the mapping
371 // between this's and from's multidim into Bijection this_multidim2from
372 bool found = false;
373 if (this_multidim2from.existsFirst(this_args[i])) {
374 if (this_multidim2from.second(this_args[i]) != from_args[i]) return false;
375 else found = true;
376 }
377
378 if (!found) {
379 // here, this is the first time we encounter both multidims. Hence,
380 // we should check that their content are equal and store the two
381 // ScheduleMultiDim into Structure this_multidim2from
382 if (!this_args[i]->hasSameVariables(*from_args[i])
383 || !this_args[i]->hasSameContent(*from_args[i]))
384 return false;
385
386 this_multidim2from.insert(this_args[i], from_args[i]);
387 }
388 }
389 }
390
391 // here, both operations have been identified as identical.
392 // we should therefore store them into mapping table this_op2from
393 this_op2from.insert(this_op, from_op);
394 }
395
396 // here, source multidims and operations, including their parameters, are
397 // identical. Hence, both schedules should be considered equal
398 return true;
399 }
400
402 const IScheduleMultiDim* Schedule::insertScheduleMultiDim(const IScheduleMultiDim& multidim) {
403 // check that the ScheduleMultiDim neither already belongs to the schedule
404 // nor contains an abstract table: since it is a source multidim, it will
405 // never be computed by the schedule. Hence if it is abstract, it will not
406 // be possible to execute the schedule
407 if (_multidim2id_.existsSecond(multidim.id())) {
409 "A ScheduleMultiDim with Id " << multidim.id() << " already exists in the schedule")
410 }
411 if (multidim.isAbstract()) {
413 "It is impossible to insert an abstract ScheduleMultiDim " << "into a Schedule")
414 }
415
416 // now, everything is ok, so we should insert a copy of the ScheduleMultiDim
417 // into the schedule
418 const IScheduleMultiDim* new_multidim = multidim.clone();
419 _multidim2nodes_.insert(new_multidim, NodeSet());
420 _multidim_location_.insert(new_multidim, std::pair< ScheduleOperator*, Idx >(nullptr, Idx(0)));
421 _multidim2id_.insert(new_multidim, new_multidim->id());
422
423 // indicate that the schedule has been modified
424 ++_version_number_;
425
426 return new_multidim;
427 }
428
430 void Schedule::emplaceScheduleMultiDim(const IScheduleMultiDim& multidim) {
431 // check that the ScheduleMultiDim neither already belongs to the schedule
432 // nor contains an abstract table: since it is a source multidim, it will
433 // never be computed by the schedule. Hence, if it is abstract, it will not
434 // be possible to execute the schedule
435 if (_multidim2id_.existsSecond(multidim.id())) {
437 "A ScheduleMultiDim with Id " << multidim.id() << " already exists in the schedule")
438 }
439 if (multidim.isAbstract()) {
441 "It is impossible to insert an abstract ScheduleMultiDim " << "into a Schedule")
442 }
443
444 // now, everything is ok, so we should insert the ScheduleMultiDim
445 // into the schedule
446 _multidim2nodes_.insert(&multidim, NodeSet());
447 _multidim_location_.insert(&multidim, std::pair< ScheduleOperator*, Idx >(nullptr, Idx(0)));
448 _multidim2id_.insert(&multidim, multidim.id());
449 _emplaced_multidims_.insert(&multidim);
450
451 // indicate that the schedule has been modified
452 ++_version_number_;
453 }
454
456 std::string Schedule::_paramString_(Idx i) {
457 if (i == 0) return "1st";
458 else if (i == 1) return "2nd";
459 else if (i == 2) return "3rd";
460
461 std::stringstream str;
462 str << (i + 1) << "th";
463 return str.str();
464 }
465
467 const ScheduleOperator& Schedule::insertOperation(const ScheduleOperator& op,
468 const bool are_results_persistent) {
469 // check that the parameters of the operation already belong to the schedule.
470 // to do so, it is sufficient to check that their ids belong to the schedule
471 const Sequence< const IScheduleMultiDim* >& op_args = op.args();
472 for (Idx i = Idx(0), end = op_args.size(); i < end; ++i) {
473 if (!_multidim2id_.existsSecond(op_args[i]->id())) {
475 "Schedule::insertOperation: the "
476 << _paramString_(i + 1) << " (id: " << op_args[i]->id()
477 << ") operation's argument does not already belong to"
478 << " the schedule")
479 }
480 }
481
482 // if another operation deletes some parameters used by the new operation, we
483 // should check that this operation has not been executed yet. In this case,
484 // we will enforce that the new operation is performed before the deletion.
485 // In addition, we should also check that the new operation does not delete
486 // its parameters (else those would be deleted twice)
487 for (Idx i = Idx(0), end = op_args.size(); i < end; ++i) {
488 const IScheduleMultiDim* arg = op_args[i];
489 if (_deleted_multidim2node_.existsFirst(arg)
490 && (_node2op_.second(_deleted_multidim2node_.second(arg))->isExecuted()
491 || op.implyDeletion())) {
493 "Schedule::insertOperation: The operation deletes its "
494 << _paramString_(i + 1) << " argument, already deleted by another operation.")
495 }
496 }
497
498 // finally, if the new operation deletes its parameters and has already
499 // been executed, we should check that none of its parameters are used
500 // by any other operation not performed yet
501 if (op.implyDeletion() && op.isExecuted()) {
502 for (Idx i = Idx(0), end = op_args.size(); i < end; ++i) {
503 const auto arg = _multidim2id_.first(op_args[i]->id());
504 for (const auto using_node: _multidim2nodes_[arg]) {
505 if (!_node2op_.second(using_node)->isExecuted()) {
507 "Schedule::insertOperation: the operation has"
508 << " deleted its " << _paramString_(i + 1)
509 << " argument, which is used by another operation"
510 << " not executed yet.")
511 }
512 }
513 }
514 }
515
516 // here, we know that we can safely add the new operation into the schedule
517
518 // create a copy of the operation. Remember that the parameters of
519 // ScheduleOperations are pointers to ScheduleMultiDim. Therefore, we need
520 // to map the pointers of op_args to pointers of the corresponding
521 // ScheduleMultiDim in our new operation
522 ScheduleOperator* new_op = op.clone();
523 Sequence< const IScheduleMultiDim* > new_args(op_args.size());
524 for (Idx i = Idx(0), end = op_args.size(); i < end; ++i) {
525 try {
526 new_args << _multidim2id_.first(op_args[i]->id());
527 } catch (NotFound const&) {
528 // deallocate everything we have allocated
529 delete new_op;
530
532 "the " << _paramString_(i + 1) << " argument of the operation is not known by"
533 << " the schedule")
534 }
535 }
536 new_op->updateArgs(new_args);
537 new_op->makeResultsPersistent(are_results_persistent);
538
539 // everything is ok, so we should add the operation to the data structures
540 const NodeId new_node = ++_newId_;
541 _dag_.addNodeWithId(new_node);
542 _node2op_.insert(new_node, new_op);
543
544 // keep track that the new_node uses its arguments
545 for (const auto new_arg: new_args) {
546 _multidim2nodes_[new_arg].insert(new_node);
547 }
548
549 // if the operation deletes its arguments, keep track of this information.
550 if (new_op->implyDeletion()) {
551 for (const auto new_arg: new_args) {
552 _deleted_multidim2node_.insert(new_arg, new_node);
553 }
554 }
555
556 // keep track of the locations and ids of new_op's results
557 const Sequence< const IScheduleMultiDim* > new_results = new_op->results();
558 for (Idx i = Idx(0), end = new_results.size(); i < end; ++i) {
559 _multidim_location_.insert(new_results[i], std::pair< ScheduleOperator*, Idx >(new_op, i));
560 _multidim2id_.insert(new_results[i], new_results[i]->id());
561 _multidim2nodes_.insert(new_results[i], NodeSet());
562 }
563
564
565 // now we should try to update Graph _dag_.
566 // if the new operation has already been executed, we should remove it from
567 // _dag_ and no other update is needed
568 if (new_op->isExecuted()) {
569 _dag_.eraseNode(new_node);
570 return *new_op;
571 }
572
573
574 // Here, the new operation has not been executed yet, so we should update the
575 // graph and the set of available operations (operations that delete their
576 // arguments could have been available but can now become unavailable due to
577 // the fact that the new operation uses these arguments)
578 for (const auto arg: new_args) {
579 // If there exists another operation that deletes Parameter arg, then we
580 // should add an arc from new_node to this operation (as new_op should be
581 // executed before the argument is deleted).
582 if (_deleted_multidim2node_.existsFirst(arg)) {
583 const NodeId other_node = _deleted_multidim2node_.second(arg);
584 if (other_node != new_node) { _dag_.addArc(new_node, other_node); }
585 }
586
587 // if Parameter arg has been created by another operation, then new_node
588 // should be a child of this node
589 if (const auto arg_location = _multidim_location_[arg].first; arg_location != nullptr) {
590 const NodeId parent_node = _node2op_.first(arg_location);
591 if ((parent_node != new_node) && !arg_location->isExecuted()) {
592 _dag_.addArc(parent_node, new_node);
593 }
594 }
595
596 // if new_op deletes its parameters, then we should add arcs from all
597 // the operations that use Parameter arg to new_node: actually, these nodes
598 // use the parameter but do not delete it, hence they should be executed
599 // before new_op
600 if (new_op->implyDeletion()) {
601 for (const auto other_node: _multidim2nodes_[arg]) {
602 if ((other_node != new_node) && !_dag_.existsArc(other_node, new_node)
603 && !_node2op_.second(other_node)->isExecuted()) {
604 _dag_.addArc(other_node, new_node);
605 }
606 }
607 }
608 }
609
610 // indicate that the schedule has been modified
611 ++_version_number_;
612
613 return *new_op;
614 }
615
617 NodeSet Schedule::availableOperations() const {
618 NodeSet available_nodes;
619 for (const auto node: _dag_) {
620 if (!this->operation(node).isExecuted()) {
621 bool all_parents_executed = true;
622 for (const auto par: _dag_.parents(node)) {
623 if (!this->operation(par).isExecuted()) {
624 all_parents_executed = false;
625 break;
626 }
627 }
628 if (all_parents_executed) available_nodes.insert(node);
629 }
630 }
631 return available_nodes;
632 }
633
635 void Schedule::updateAfterExecution(const NodeId exec_node,
636 std::vector< NodeId >& new_available_nodes,
637 const bool check) {
638 if (check) {
639 // check that the node exists
640 if (!_dag_.existsNode(exec_node)) {
642 "the schedule cannot be updated because Operation of Id "
643 << exec_node << " that has been executed does not belong to its DAG.")
644 }
645
646 // before performing the update, check that the operation was available
647 // to do so, it is sufficient to check it had no parent
648 if (!_dag_.parents(exec_node).empty()) {
650 "the schedule cannot be updated because Operation of Id "
651 << exec_node << " is not available yet and should not have been executed.")
652 }
653
654 // check that the operation has really been executed
655 if (!_node2op_.second(exec_node)->isExecuted()) {
657 "the schedule cannot be updated because Operation of Id "
658 << exec_node << " has not been executed yet.")
659 }
660 }
661
662 // If the children of exec_node have just one remaining parent (i.e., this
663 // is exec_node), then these children are new available operations
664 new_available_nodes.clear();
665 for (const auto child_node: _dag_.children(exec_node)) {
666 if (_dag_.parents(child_node).size() == 1) { // only exec_node is a parent
667 new_available_nodes.push_back(child_node);
668 }
669 }
670
671 // remove node_exec
672 _dag_.eraseNode(exec_node);
673
674 _version_number_ = _newVersionNumber_();
675 }
676
677} // namespace gum
678
679#endif /* DOXYGEN_SHOULD_SKIP_THIS */
Exception : The Schedule MultiDim Table is abstract.
Base class for dag.
Definition DAG.h:121
Exception : There exists another identical Schedule MultiDim Table.
Exception : the element we looked for cannot be found.
Exception : operation not allowed.
Class containing a schedule of operations to perform on multidims.
Definition schedule.h:80
static std::atomic< Idx > _overall_version_number_
Definition schedule.h:446
void _copy_(const Schedule &from)
a function to copy the content of a schedule into another one
void _destroy_()
a function to delete from memory the content of a schedule
Schedule(const Size nb_ops=256)
default constructor (construct an empty sequence)
Bijection< const IScheduleMultiDim *, NodeId > _deleted_multidim2node_
A bijection mapping ScheduleMultiDims to be deleted with the operations that will delete them.
Definition schedule.h:421
void clear()
empty the schedule, i.e., remove its content
const DAG & dag() const
returns a DAG indicating in which order the operations can be performed
DAG _fullDAG_() const
returns the graph that would obtain when no operation is performed
Bijection< const IScheduleMultiDim *, Idx > _multidim2id_
a bijection between pointers to IScheduleMultiDim and their Ids
Definition schedule.h:402
Idx _version_number_
a number that identifies the current version of the schedule
Definition schedule.h:426
HashTable< const IScheduleMultiDim *, NodeSet > _multidim2nodes_
maps the multidims to the set of operations that use them
Definition schedule.h:408
static Idx _newVersionNumber_()
returns a new distinct version for each schedule
Set< const IScheduleMultiDim * > _emplaced_multidims_
indicates which ScheduleMultiDims were emplaced
Definition schedule.h:405
Bijection< NodeId, ScheduleOperator * > _node2op_
a mapping between the ids of the operations and their pointer
Definition schedule.h:384
NodeId _newId_
the highest node id in the graph
Definition schedule.h:381
DAG _dag_
the DAG of the operations to perform
Definition schedule.h:378
HashTable< const IScheduleMultiDim *, std::pair< ScheduleOperator *, Idx > > _multidim_location_
a structure to indicate precisely where a ScheduleMultiDim comes from
Definition schedule.h:399
Exception : The Schedule Operation is not available yet.
Exception : The Schedule Operation has not been executed yet.
Exception : The Schedule MultiDim Table is unknown.
Exception : The Schedule Operation is unknown.
#define GUM_ERROR(type, msg)
Definition exceptions.h:72
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition types.h:74
Size Idx
Type for indexes.
Definition types.h:79
Size NodeId
Type for node ids.
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
gum is the global namespace for all aGrUM entities
Definition agrum.h:46
STL namespace.
Class containing a schedule of operations to perform on multidims.