aGrUM 2.3.2
a C++ library for (probabilistic) graphical models
lazyPropagation_tpl.h
Go to the documentation of this file.
1/****************************************************************************
2 * This file is part of the aGrUM/pyAgrum library. *
3 * *
4 * Copyright (c) 2005-2025 by *
5 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
6 * - Christophe GONZALES(_at_AMU) *
7 * *
8 * The aGrUM/pyAgrum library is free software; you can redistribute it *
9 * and/or modify it under the terms of either : *
10 * *
11 * - the GNU Lesser General Public License as published by *
12 * the Free Software Foundation, either version 3 of the License, *
13 * or (at your option) any later version, *
14 * - the MIT license (MIT), *
15 * - or both in dual license, as here. *
16 * *
17 * (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) *
18 * *
19 * This aGrUM/pyAgrum library is distributed in the hope that it will be *
20 * useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
21 * INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS *
22 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR *
26 * OTHER DEALINGS IN THE SOFTWARE. *
27 * *
28 * See LICENCES for more details. *
29 * *
30 * SPDX-FileCopyrightText: Copyright 2005-2025 *
31 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
32 * - Christophe GONZALES(_at_AMU) *
33 * SPDX-License-Identifier: LGPL-3.0-or-later OR MIT *
34 * *
35 * Contact : info_at_agrum_dot_org *
36 * homepage : http://agrum.gitlab.io *
37 * gitlab : https://gitlab.com/agrumery/agrum *
38 * *
39 ****************************************************************************/
40#pragma once
41
42
50
51#ifndef DOXYGEN_SHOULD_SKIP_THIS
52# include <algorithm>
53# include <limits>
54
62
63namespace gum {
64
65
66 // default constructor
67 template < typename GUM_SCALAR >
69 RelevantTensorsFinderType relevant_type,
70 FindBarrenNodesType barren_type,
71 bool use_binary_join_tree) :
72 JointTargetedInference< GUM_SCALAR >(BN), EvidenceInference< GUM_SCALAR >(BN),
73 _use_binary_join_tree_(use_binary_join_tree) {
74 // sets the relevant tensor and the barren nodes finding algorithm
75 _findRelevantTensors_ = &LazyPropagation< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_;
76 setRelevantTensorsFinderType(relevant_type);
77 setFindBarrenNodesType(barren_type);
78
79 // create a default triangulation (the user can change it afterwards)
80 _triangulation_ = new DefaultTriangulation;
81
82 // for debugging purposes
83 GUM_CONSTRUCTOR(LazyPropagation);
84 }
85
86 // destructor
87 template < typename GUM_SCALAR >
88 INLINE LazyPropagation< GUM_SCALAR >::~LazyPropagation() {
89 // remove all the tensors created during the last message passing
90 for (const auto& pots: _arc_to_created_tensors_)
91 for (const auto pot: pots.second)
92 delete pot;
93
94 // remove all the tensors stored into the cliques
95 for (const auto& pots: _clique_tensors_)
96 for (const auto pot: pots.second)
97 delete pot;
98
99 // remove all the posteriors computed
100 for (const auto& pot: _target_posteriors_)
101 delete pot.second;
102 for (const auto& pot: _joint_target_posteriors_)
103 delete pot.second;
104
105 // remove the junction tree and the triangulation algorithm
106 if (_JT_ != nullptr) delete _JT_;
107 if (_junctionTree_ != nullptr) delete _junctionTree_;
108 delete _triangulation_;
109
110 GUM_DESTRUCTOR(LazyPropagation);
111 }
112
114 template < typename GUM_SCALAR >
115 void LazyPropagation< GUM_SCALAR >::setTriangulation(const Triangulation& new_triangulation) {
116 delete _triangulation_;
117 _triangulation_ = new_triangulation.newFactory();
118 _is_new_jt_needed_ = true;
119 this->setOutdatedStructureState_();
120 }
121
123 template < typename GUM_SCALAR >
124 INLINE const JoinTree* LazyPropagation< GUM_SCALAR >::joinTree() {
125 if (_is_new_jt_needed_) _createNewJT_();
126
127 return _JT_;
128 }
129
131 template < typename GUM_SCALAR >
132 INLINE const JunctionTree* LazyPropagation< GUM_SCALAR >::junctionTree() {
133 if (_is_new_jt_needed_) _createNewJT_();
134
135 return _junctionTree_;
136 }
137
139 template < typename GUM_SCALAR >
140 void LazyPropagation< GUM_SCALAR >::setRelevantTensorsFinderType(RelevantTensorsFinderType type) {
141 if (type != _find_relevant_tensor_type_) {
142 switch (type) {
143 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
144 _findRelevantTensors_
145 = &LazyPropagation< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_;
146 break;
147
148 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES :
149 _findRelevantTensors_
150 = &LazyPropagation< GUM_SCALAR >::_findRelevantTensorsWithdSeparation_;
151 break;
152
153 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 :
154 _findRelevantTensors_
155 = &LazyPropagation< GUM_SCALAR >::_findRelevantTensorsWithdSeparation3_;
156 break;
157
158 case RelevantTensorsFinderType::FIND_ALL :
159 _findRelevantTensors_ = &LazyPropagation< GUM_SCALAR >::_findRelevantTensorsGetAll_;
160 break;
161
162 default :
164 "setRelevantTensorsFinderType for type " << (unsigned int)type
165 << " is not implemented yet");
166 }
167
168 _find_relevant_tensor_type_ = type;
169
170 // indicate that all messages need be reconstructed to take into account
171 // the change in d-separation analysis
172 _invalidateAllMessages_();
173 }
174 }
175
177 template < typename GUM_SCALAR >
178 INLINE void LazyPropagation< GUM_SCALAR >::_setProjectionFunction_(
179 Tensor< GUM_SCALAR > (*proj)(const Tensor< GUM_SCALAR >&, const gum::VariableSet&)) {
180 _projection_op_ = proj;
181
182 // indicate that all messages need be reconstructed to take into account
183 // the change in of the projection operator
184 _invalidateAllMessages_();
185 }
186
188 template < typename GUM_SCALAR >
189 INLINE void LazyPropagation< GUM_SCALAR >::_setCombinationFunction_(
190 Tensor< GUM_SCALAR > (*comb)(const Tensor< GUM_SCALAR >&, const Tensor< GUM_SCALAR >&)) {
191 _combination_op_ = comb;
192
193 // indicate that all messages need be reconstructed to take into account
194 // the change of the combination operator
195 _invalidateAllMessages_();
196 }
197
199 template < typename GUM_SCALAR >
200 void LazyPropagation< GUM_SCALAR >::_invalidateAllMessages_() {
201 // remove all the messages computed
202 for (auto& potset: _separator_tensors_)
203 potset.second.clear();
204 for (auto& mess_computed: _messages_computed_)
205 mess_computed.second = false;
206
207 // remove all the created tensors kept on the arcs
208 for (const auto& potset: _arc_to_created_tensors_)
209 for (const auto pot: potset.second)
210 delete pot;
211 _arc_to_created_tensors_.clear();
212
213 // remove all the posteriors
214 for (const auto& pot: _target_posteriors_)
215 delete pot.second;
216 _target_posteriors_.clear();
217 for (const auto& pot: _joint_target_posteriors_)
218 delete pot.second;
219 _joint_target_posteriors_.clear();
220
221 // indicate that new messages need be computed
222 if (this->isInferenceReady() || this->isInferenceDone()) this->setOutdatedTensorsState_();
223 }
224
226 template < typename GUM_SCALAR >
227 void LazyPropagation< GUM_SCALAR >::setFindBarrenNodesType(FindBarrenNodesType type) {
228 if (type != _barren_nodes_type_) {
229 // WARNING: if a new type is added here, method _createJT_ should certainly
230 // be updated as well, in particular its step 2.
231 switch (type) {
232 case FindBarrenNodesType::FIND_BARREN_NODES :
233 case FindBarrenNodesType::FIND_NO_BARREN_NODES : break;
234
235 default :
237 "setFindBarrenNodesType for type " << (unsigned int)type
238 << " is not implemented yet");
239 }
240
241 _barren_nodes_type_ = type;
242
243 // tensorly, we may need to reconstruct a junction tree
244 this->setOutdatedStructureState_();
245 }
246 }
247
249 template < typename GUM_SCALAR >
250 INLINE void LazyPropagation< GUM_SCALAR >::onEvidenceAdded_(const NodeId id,
251 bool isHardEvidence) {
252 // if we have a new hard evidence, this modifies the undigraph over which
253 // the join tree is created. This is also the case if id is not a node of
254 // of the undigraph
255 if (isHardEvidence || !_graph_.exists(id)) _is_new_jt_needed_ = true;
256 else {
257 try {
258 _evidence_changes_.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
259 } catch (DuplicateElement const&) {
260 // here, the evidence change already existed. This necessarily means
261 // that the current saved change is an EVIDENCE_ERASED. So if we
262 // erased the evidence and added some again, this corresponds to an
263 // EVIDENCE_MODIFIED
264 _evidence_changes_[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
265 }
266 }
267 }
268
270 template < typename GUM_SCALAR >
271 INLINE void LazyPropagation< GUM_SCALAR >::onEvidenceErased_(const NodeId id,
272 bool isHardEvidence) {
273 // if we delete a hard evidence, this modifies the undigraph over which
274 // the join tree is created.
275 if (isHardEvidence) _is_new_jt_needed_ = true;
276 else {
277 try {
278 _evidence_changes_.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
279 } catch (DuplicateElement const&) {
280 // here, the evidence change already existed and it is necessarily an
281 // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
282 // been added and is now erased, this is similar to not having created
283 // it. If the evidence was only modified, it already existed in the
284 // last inference and we should now indicate that it has been removed.
285 if (_evidence_changes_[id] == EvidenceChangeType::EVIDENCE_ADDED)
286 _evidence_changes_.erase(id);
287 else _evidence_changes_[id] = EvidenceChangeType::EVIDENCE_ERASED;
288 }
289 }
290 }
291
293 template < typename GUM_SCALAR >
294 void LazyPropagation< GUM_SCALAR >::onAllEvidenceErased_(bool has_hard_evidence) {
295 if (has_hard_evidence || !this->hardEvidenceNodes().empty()) _is_new_jt_needed_ = true;
296 else {
297 for (const auto node: this->softEvidenceNodes()) {
298 try {
299 _evidence_changes_.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
300 } catch (DuplicateElement const&) {
301 // here, the evidence change already existed and it is necessarily an
302 // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
303 // been added and is now erased, this is similar to not having created
304 // it. If the evidence was only modified, it already existed in the
305 // last inference and we should now indicate that it has been removed.
306 if (_evidence_changes_[node] == EvidenceChangeType::EVIDENCE_ADDED)
307 _evidence_changes_.erase(node);
308 else _evidence_changes_[node] = EvidenceChangeType::EVIDENCE_ERASED;
309 }
310 }
311 }
312 }
313
315 template < typename GUM_SCALAR >
316 INLINE void LazyPropagation< GUM_SCALAR >::onEvidenceChanged_(const NodeId id,
317 bool hasChangedSoftHard) {
318 if (hasChangedSoftHard) _is_new_jt_needed_ = true;
319 else {
320 try {
321 _evidence_changes_.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
322 } catch (DuplicateElement const&) {
323 // here, the evidence change already existed and it is necessarily an
324 // EVIDENCE_ADDED. So we should keep this state to indicate that this
325 // evidence is new w.r.t. the last inference
326 }
327 }
328 }
329
331 template < typename GUM_SCALAR >
332 INLINE void LazyPropagation< GUM_SCALAR >::onModelChanged_(const GraphicalModel* bn) {}
333
335 template < typename GUM_SCALAR >
336 INLINE void LazyPropagation< GUM_SCALAR >::onMarginalTargetAdded_(const NodeId id) {
337 // if the graph does not contain the node, either this is due to the fact that
338 // the node has received a hard evidence or because it was d-separated from the
339 // target nodes during the last inference. In the latter case, we should change
340 // the graph, hence we should recompute the JT
341 if (!_graph_.exists(id) && !_hard_ev_nodes_.contains(id)) { _is_new_jt_needed_ = true; }
342 }
343
345 template < typename GUM_SCALAR >
346 INLINE void LazyPropagation< GUM_SCALAR >::onMarginalTargetErased_(const NodeId id) {}
347
349 template < typename GUM_SCALAR >
350 void LazyPropagation< GUM_SCALAR >::onJointTargetAdded_(const NodeSet& set) {
351 // if there is no current joint tree, obviously, we need one.
352 if (_JT_ == nullptr) {
353 _is_new_jt_needed_ = true;
354 return;
355 }
356
357 // here, we will remove from set the nodes that received hard evidence and try
358 // to find a clique in the current _JT_ that can contain the resulting set. If
359 // we cannot find one, we must recompute the _JT_
360 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
361 int elim_number = std::numeric_limits< int >::max();
362 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
363 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
364 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
365 elim_order.insert(JT_elim_order[i], (int)i);
366 NodeSet unobserved_set(set.size());
367 for (const auto node: set) {
368 if (!_graph_.exists(node)) {
369 if (!_hard_ev_nodes_.contains(node)) {
370 _is_new_jt_needed_ = true;
371 return;
372 }
373 } else {
374 unobserved_set.insert(node);
375 if (elim_order[node] < elim_number) {
376 elim_number = elim_order[node];
377 first_eliminated_node = node;
378 }
379 }
380 }
381
382 if (!unobserved_set.empty()) {
383 // here, first_eliminated_node contains the first var (node or one of its
384 // parents) eliminated => the clique created during its elimination
385 // should contain all the nodes in unobserved_set
386 const auto clique_id = _node_to_clique_[first_eliminated_node];
387 const auto& clique = _JT_->clique(clique_id);
388 for (const auto node: unobserved_set) {
389 if (!clique.contains(node)) {
390 _is_new_jt_needed_ = true;
391 return;
392 }
393 }
394 }
395 }
396
398 template < typename GUM_SCALAR >
399 INLINE void LazyPropagation< GUM_SCALAR >::onJointTargetErased_(const NodeSet& set) {}
400
402 template < typename GUM_SCALAR >
403 INLINE void LazyPropagation< GUM_SCALAR >::onAllMarginalTargetsAdded_() {
404 for (const auto node: this->BN().dag()) {
405 // if the graph does not contain the node, either this is due to the fact
406 // that the node has received a hard evidence or because it was d-separated
407 // from the target nodes during the last inference. In the latter case, we
408 // should change the graph, hence we should recompute the JT
409 if (!_graph_.exists(node) && !_hard_ev_nodes_.contains(node)) {
410 _is_new_jt_needed_ = true;
411 return;
412 }
413 }
414 }
415
417 template < typename GUM_SCALAR >
418 INLINE void LazyPropagation< GUM_SCALAR >::onAllMarginalTargetsErased_() {}
419
421 template < typename GUM_SCALAR >
422 INLINE void LazyPropagation< GUM_SCALAR >::onAllJointTargetsErased_() {}
423
425 template < typename GUM_SCALAR >
426 INLINE void LazyPropagation< GUM_SCALAR >::onAllTargetsErased_() {}
427
428 // check whether a new junction tree is really needed for the next inference
429 template < typename GUM_SCALAR >
430 bool LazyPropagation< GUM_SCALAR >::_isNewJTNeeded_() const {
431 // if we do not have a JT or if _new_jt_needed_ is set to true, then
432 // we know that we need to create a new join tree
433 if ((_JT_ == nullptr) || _is_new_jt_needed_) return true;
434
435 // if some targets do not belong to the join tree and, consequently, to the
436 // undirected graph that was used to construct the join tree, then we need
437 // to create a new JT. This situation may occur if we constructed the
438 // join tree after pruning irrelevant/barren nodes from the BN.
439 // However, note that the nodes that received hard evidence do not belong to
440 // the graph and, therefore, should not be taken into account
441 const auto& hard_ev_nodes = this->hardEvidenceNodes();
442 for (const auto node: this->targets()) {
443 if (!_graph_.exists(node) && !hard_ev_nodes.exists(node)) return true;
444 }
445
446 // now, do the same for the joint targets
447 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
448 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
449 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
450 elim_order.insert(JT_elim_order[i], (int)i);
451 NodeSet unobserved_set;
452
453 for (const auto& joint_target: this->jointTargets()) {
454 // here, we need to check that at least one clique contains all the
455 // nodes of the joint target.
456 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
457 int elim_number = std::numeric_limits< int >::max();
458 unobserved_set.clear();
459 for (const auto node: joint_target) {
460 if (!_graph_.exists(node)) {
461 if (!hard_ev_nodes.exists(node)) return true;
462 } else {
463 unobserved_set.insert(node);
464 if (elim_order[node] < elim_number) {
465 elim_number = elim_order[node];
466 first_eliminated_node = node;
467 }
468 }
469 }
470 if (!unobserved_set.empty()) {
471 // here, first_eliminated_node contains the first var (node or one of its
472 // parents) eliminated => the clique created during its elimination
473 // should contain all the nodes in unobserved_set
474 const auto clique_id = _node_to_clique_[first_eliminated_node];
475 const auto& clique = _JT_->clique(clique_id);
476 for (const auto node: unobserved_set) {
477 if (!clique.contains(node)) return true;
478 }
479 }
480 }
481
482 // if some new evidence have been added on nodes that do not belong
483 // to _graph_, then we tensorly have to reconstruct the join tree
484 for (const auto& change: _evidence_changes_) {
485 if ((change.second == EvidenceChangeType::EVIDENCE_ADDED) && !_graph_.exists(change.first))
486 return true;
487 }
488
489 // here, the current JT is exactly what we need for the next inference
490 return false;
491 }
492
494 template < typename GUM_SCALAR >
495 void LazyPropagation< GUM_SCALAR >::_createNewJT_() {
496 // to create the JT, we first create the moral graph of the BN in the
497 // following way in order to take into account the barren nodes and the
498 // nodes that received evidence:
499 // 1/ we create an undirected graph containing only the nodes and no edge
500 // 2/ if we take into account barren nodes, remove them from the graph
501 // 3/ if we take d-separation into account, remove the d-separated nodes
502 // 4/ add edges so that each node and its parents in the BN form a clique
503 // 5/ add edges so that join targets form a clique of the moral graph
504 // 6/ remove the nodes that received hard evidence (by step 4/, their
505 // parents are linked by edges, which is necessary for inference)
506 //
507 // At the end of step 6/, we have our moral graph and we can triangulate it
508 // to get the new junction tree
509
510 // 1/ create an undirected graph containing only the nodes and no edge
511 const auto& bn = this->BN();
512 _graph_.clear();
513 for (const auto node: bn.dag())
514 _graph_.addNodeWithId(node);
515
516 // identify the target nodes
517 NodeSet target_nodes = this->targets();
518 for (const auto& nodeset: this->jointTargets()) {
519 target_nodes += nodeset;
520 }
521
522 // 2/ if we wish to exploit barren nodes, we shall remove them from the
523 // BN. To do so: we identify all the nodes that are not targets and have
524 // received no evidence and such that their descendants are neither
525 // targets nor evidence nodes. Such nodes can be safely discarded from
526 // the BN without altering the inference output
527 if ((this->nbrTargets() != bn.dag().size())
528 && (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES)) {
529 // check that all the nodes are not targets, otherwise, there is no
530 // barren node
531 if (target_nodes.size() != bn.size()) {
532 BarrenNodesFinder finder(&(bn.dag()));
533 finder.setTargets(&target_nodes);
534
535 NodeSet evidence_nodes(this->evidence().size());
536 for (const auto& pair: this->evidence()) {
537 evidence_nodes.insert(pair.first);
538 }
539
540 finder.setEvidence(&evidence_nodes);
541
542
543 NodeSet barren_nodes = finder.barrenNodes();
544
545 // remove the barren nodes from the moral graph
546 for (const auto node: barren_nodes) {
547 _graph_.eraseNode(node);
548 }
549 }
550 }
551
552 // 3/ if we wish to exploit d-separation, remove all the nodes that are
553 // d-separated from our targets. Of course, if all the nodes are targets,
554 // no need to perform a d-separation analysis
555 if (this->nbrTargets() != bn.dag().size()) {
556 NodeSet requisite_nodes;
557 bool dsep_analysis = false;
558 switch (_find_relevant_tensor_type_) {
559 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
560 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES : {
561 BayesBall::requisiteNodes(bn.dag(),
562 target_nodes,
563 this->hardEvidenceNodes(),
564 this->softEvidenceNodes(),
565 requisite_nodes);
566 dsep_analysis = true;
567 } break;
568
569 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 : {
570 dSeparationAlgorithm dsep;
571 dsep.requisiteNodes(bn.dag(),
572 target_nodes,
573 this->hardEvidenceNodes(),
574 this->softEvidenceNodes(),
575 requisite_nodes);
576 dsep_analysis = true;
577 } break;
578
579 case RelevantTensorsFinderType::FIND_ALL : break;
580
581 default : GUM_ERROR(FatalError, "not implemented yet")
582 }
583
584 // remove all the nodes that are not requisite
585 if (dsep_analysis) {
586 for (auto iter = _graph_.beginSafe(); iter != _graph_.endSafe(); ++iter) {
587 if (!requisite_nodes.contains(*iter) && !this->hardEvidenceNodes().contains(*iter)) {
588 _graph_.eraseNode(*iter);
589 }
590 }
591 }
592 }
593
594 // 4/ add edges so that each node and its parents in the BN form a clique
595 for (const auto node: _graph_) {
596 const NodeSet& parents = bn.parents(node);
597 for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
598 // before adding an edge between node and its parent, check that the
599 // parent belongs to the graph. Actually, when d-separated nodes are
600 // removed, it may be the case that the parents of hard evidence nodes
601 // are removed. But the latter still exist in the graph.
602 if (_graph_.existsNode(*iter1)) {
603 _graph_.addEdge(*iter1, node);
604
605 auto iter2 = iter1;
606 for (++iter2; iter2 != parents.cend(); ++iter2) {
607 // before adding an edge, check that both extremities belong to
608 // the graph. Actually, when d-separated nodes are removed, it may
609 // be the case that the parents of hard evidence nodes are removed.
610 // But the latter still exist in the graph.
611 if (_graph_.existsNode(*iter2)) _graph_.addEdge(*iter1, *iter2);
612 }
613 }
614 }
615 }
616
617 // 5/ if there exist some joint targets, we shall add new edges
618 // into the moral graph in order to ensure that there exists a clique
619 // containing each joint
620 for (const auto& nodeset: this->jointTargets()) {
621 for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
622 auto iter2 = iter1;
623 for (++iter2; iter2 != nodeset.cend(); ++iter2) {
624 _graph_.addEdge(*iter1, *iter2);
625 }
626 }
627 }
628
629 // 6/ remove all the nodes that received hard evidence
630 _hard_ev_nodes_ = this->hardEvidenceNodes();
631 for (const auto node: _hard_ev_nodes_) {
632 _graph_.eraseNode(node);
633 }
634
635
636 // now, we can compute the new junction tree. To speed-up computations
637 // (essentially, those of a distribution phase), we construct from this
638 // junction tree a binary join tree
639 if (_JT_ != nullptr) delete _JT_;
640 if (_junctionTree_ != nullptr) delete _junctionTree_;
641
642 _triangulation_->setGraph(&_graph_, &(this->domainSizes()));
643 const JunctionTree& triang_jt = _triangulation_->junctionTree();
644 if (_use_binary_join_tree_) {
645 BinaryJoinTreeConverterDefault bjt_converter;
646 NodeSet emptyset;
647 _JT_ = new CliqueGraph(bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
648 } else {
649 _JT_ = new CliqueGraph(triang_jt);
650 }
651 _junctionTree_ = new CliqueGraph(triang_jt);
652
653
654 // indicate, for each node of the moral graph, a clique in _JT_ that can
655 // contain its conditional probability table
656 _node_to_clique_.clear();
657 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
658 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
659 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
660 elim_order.insert(JT_elim_order[i], (int)i);
661 const DAG& dag = bn.dag();
662 for (const auto node: _graph_) {
663 // get the variables in the tensor of node (and its parents)
664 NodeId first_eliminated_node = node;
665 int elim_number = elim_order[first_eliminated_node];
666
667 for (const auto parent: dag.parents(node)) {
668 if (_graph_.existsNode(parent) && (elim_order[parent] < elim_number)) {
669 elim_number = elim_order[parent];
670 first_eliminated_node = parent;
671 }
672 }
673
674 // first_eliminated_node contains the first var (node or one of its
675 // parents) eliminated => the clique created during its elimination
676 // contains node and all of its parents => it can contain the tensor
677 // assigned to the node in the BN
678 _node_to_clique_.insert(node,
679 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
680 }
681
682 // do the same for the nodes that received hard evidence. Here, we only store
683 // the nodes for which at least one parent belongs to _graph_ (otherwise
684 // their CPT is just a constant real number).
685 for (const auto node: _hard_ev_nodes_) {
686 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
687 int elim_number = std::numeric_limits< int >::max();
688
689 for (const auto parent: dag.parents(node)) {
690 if (_graph_.exists(parent) && (elim_order[parent] < elim_number)) {
691 elim_number = elim_order[parent];
692 first_eliminated_node = parent;
693 }
694 }
695
696 // first_eliminated_node contains the first var (node or one of its
697 // parents) eliminated => the clique created during its elimination
698 // contains node and all of its parents => it can contain the tensor
699 // assigned to the node in the BN
700 if (elim_number != std::numeric_limits< int >::max()) {
701 _node_to_clique_.insert(node,
702 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
703 }
704 }
705 // indicate for each joint_target a clique that contains it
706 _joint_target_to_clique_.clear();
707 for (const auto& set: this->jointTargets()) {
708 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
709 int elim_number = std::numeric_limits< int >::max();
710
711 // do not take into account the nodes that received hard evidence
712 // (since they do not belong to the join tree)
713 for (const auto node: set) {
714 if (!_hard_ev_nodes_.contains(node)) {
715 // the clique we are looking for is the one that was created when
716 // the first element of nodeset was eliminated
717 if (elim_order[node] < elim_number) {
718 elim_number = elim_order[node];
719 first_eliminated_node = node;
720 }
721 }
722 }
723
724 if (elim_number != std::numeric_limits< int >::max()) {
725 _joint_target_to_clique_.insert(
726 set,
727 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
728 }
729 }
730
731 // compute the roots of _JT_'s connected components
732 _computeJoinTreeRoots_();
733
734 // remove all the tensors stored into the cliques. Note that these include
735 // the CPTs resulting from the projections of hard evidence as well as the
736 // CPTs of the soft evidence
737 for (const auto& potlist: _clique_tensors_)
738 for (const auto pot: potlist.second)
739 delete pot;
740 _clique_tensors_.clear();
741
742 // remove all the tensors created during the last inference
743 for (const auto& potlist: _arc_to_created_tensors_)
744 for (const auto pot: potlist.second)
745 delete pot;
746 _arc_to_created_tensors_.clear();
747
748 // remove all the tensors created to take into account hard evidence
749 // during the last inference (they have already been deleted from memory
750 // by the clearing of _clique_tensors_).
751 _node_to_hard_ev_projected_CPTs_.clear();
752
753 // remove all the soft evidence.
754 _node_to_soft_evidence_.clear();
755
756 // create empty tensor lists into the cliques of the joint tree as well
757 // as empty lists of evidence
758 _ScheduleMultiDimSet_ empty_set;
759 for (const auto node: *_JT_) {
760 _clique_tensors_.insert(node, empty_set);
761 }
762
763 // remove all the constants created due to projections of CPTs that were
764 // defined over only hard evidence nodes
765 _constants_.clear();
766
767 // create empty lists of tensors for the messages and indicate that no
768 // message has been computed yet
769 _separator_tensors_.clear();
770 _messages_computed_.clear();
771 for (const auto& edge: _JT_->edges()) {
772 const Arc arc1(edge.first(), edge.second());
773 _separator_tensors_.insert(arc1, empty_set);
774 _messages_computed_.insert(arc1, false);
775 const Arc arc2(edge.second(), edge.first());
776 _separator_tensors_.insert(arc2, empty_set);
777 _messages_computed_.insert(arc2, false);
778 }
779
780 // remove all the posteriors computed so far
781 for (const auto& pot: _target_posteriors_)
782 delete pot.second;
783 _target_posteriors_.clear();
784 for (const auto& pot: _joint_target_posteriors_)
785 delete pot.second;
786 _joint_target_posteriors_.clear();
787
788 // here, we determine whether we should use schedules during the inference.
789 // the rule is: if the sum of the domain sizes of the cliques is greater
790 // than a threshold, use schedules
791 double overall_size = 0;
792 for (const auto clique: *_JT_) {
793 double clique_size = 1.0;
794 for (const auto node: _JT_->clique(clique))
795 clique_size *= this->domainSizes()[node];
796 overall_size += clique_size;
797 }
798 _use_schedules_ = (overall_size > _schedule_threshold_);
799
800 // put all the CPTs of the Bayes net nodes into the cliques
801 // here, beware: all the tensors that are defined over some nodes
802 // including hard evidence must be projected so that these nodes are
803 // removed from the tensor
804 if (_use_schedules_) {
805 Schedule schedule;
806 _initializeJTCliques_(schedule);
807 } else {
808 _initializeJTCliques_();
809 }
810
811
812 // we shall now add all the tensors of the soft evidence
813 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
814 for (const auto node: this->softEvidenceNodes()) {
815 if (_node_to_clique_.exists(node)) {
816 auto ev_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*evidence[node], false);
817 _node_to_soft_evidence_.insert(node, ev_pot);
818 _clique_tensors_[_node_to_clique_[node]].insert(ev_pot);
819 }
820 }
821
822
823 // indicate that the data structures are up to date.
824 _evidence_changes_.clear();
825 _is_new_jt_needed_ = false;
826 }
827
829 template < typename GUM_SCALAR >
830 void LazyPropagation< GUM_SCALAR >::_initializeJTCliques_() {
831 const auto& bn = this->BN();
832 const DAG& dag = bn.dag();
833
834 // put all the CPTs of the Bayes net nodes into the cliques
835 // here, beware: all the tensors that are defined over some nodes
836 // including hard evidence must be projected so that these nodes are
837 // removed from the tensor
838 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
839 const NodeProperty< Idx >& hard_evidence = this->hardEvidence();
840
841 for (const auto node: dag) {
842 if (_graph_.exists(node) || _hard_ev_nodes_.contains(node)) {
843 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
844
845 // get the list of nodes with hard evidence in cpt
846 NodeSet hard_nodes;
847 const auto& variables = cpt.variablesSequence();
848 bool graph_contains_nodes = false;
849 for (const auto var: variables) {
850 NodeId xnode = bn.nodeId(*var);
851 if (_hard_ev_nodes_.contains(xnode)) hard_nodes.insert(xnode);
852 else if (_graph_.exists(xnode)) graph_contains_nodes = true;
853 }
854
855 // if hard_nodes contains hard evidence nodes, perform a projection
856 // and insert the result into the appropriate clique, else insert
857 // directly cpt into the clique
858 if (hard_nodes.empty()) {
859 auto sched_cpt = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(cpt, false);
860 _clique_tensors_[_node_to_clique_[node]].insert(sched_cpt);
861 } else {
862 // marginalize out the hard evidence nodes: if the cpt is defined
863 // only over nodes that received hard evidence, do not consider it
864 // as a tensor anymore but as a constant
865 // TODO substitute constants by 0-dimensional tensors
866 if (hard_nodes.size() == variables.size()) {
867 Instantiation inst(cpt);
868 for (Size i = 0; i < hard_nodes.size(); ++i) {
869 inst.chgVal(*variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
870 }
871 _constants_.insert(node, cpt.get(inst));
872 } else {
873 // here, we have a CPT defined over some nodes that received hard
874 // evidence and other nodes that did not receive it. If none of the
875 // latter belong to the graph, then the CPT is useless for inference
876 if (!graph_contains_nodes) continue;
877
878 // prepare the projection with a combine and project instance
879 gum::VariableSet hard_variables;
880 _TensorSet_ marg_cpt_set(1 + hard_nodes.size());
881 marg_cpt_set.insert(&cpt);
882 for (const auto xnode: hard_nodes) {
883 marg_cpt_set.insert(evidence[xnode]);
884 hard_variables.insert(&(bn.variable(xnode)));
885 }
886
887 // perform the combination of those tensors and their projection
888 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
889 _combination_op_,
890 _projection_op_);
891
892 _TensorSet_ new_cpt_list = combine_and_project.execute(marg_cpt_set, hard_variables);
893
894 // there should be only one tensor in new_cpt_list
895 if (new_cpt_list.size() != 1) {
896 for (const auto pot: new_cpt_list) {
897 if (!marg_cpt_set.contains(pot)) delete pot;
898 }
900 "the projection of a tensor containing " << "hard evidence is empty!");
901 }
902 auto new_pot = const_cast< Tensor< GUM_SCALAR >* >(*(new_cpt_list.begin()));
903 auto projected_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*new_pot));
904 delete new_pot;
905
906 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
907 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
908 }
909 }
910 }
911 }
912 }
913
915 template < typename GUM_SCALAR >
916 void LazyPropagation< GUM_SCALAR >::_initializeJTCliques_(Schedule& schedule) {
917 const auto& bn = this->BN();
918 const DAG& dag = bn.dag();
919
920 // put all the CPTs of the Bayes net nodes into the cliques
921 // here, beware: all the tensors that are defined over some nodes
922 // including hard evidence must be projected so that these nodes are
923 // removed from the tensor
924 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
925 const NodeProperty< Idx >& hard_evidence = this->hardEvidence();
926
927 for (const auto node: dag) {
928 if (_graph_.exists(node) || _hard_ev_nodes_.contains(node)) {
929 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
930
931 // get the list of nodes with hard evidence in cpt
932 NodeSet hard_nodes;
933 const auto& variables = cpt.variablesSequence();
934 bool graph_contains_nodes = false;
935 for (const auto var: variables) {
936 NodeId xnode = bn.nodeId(*var);
937 if (_hard_ev_nodes_.contains(xnode)) hard_nodes.insert(xnode);
938 else if (_graph_.exists(xnode)) graph_contains_nodes = true;
939 }
940
941 // if hard_nodes contains hard evidence nodes, perform a projection
942 // and insert the result into the appropriate clique, else insert
943 // directly cpt into the clique
944 if (hard_nodes.empty()) {
945 auto sched_cpt = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(cpt, false);
946 _clique_tensors_[_node_to_clique_[node]].insert(sched_cpt);
947 } else {
948 // marginalize out the hard evidence nodes: if the cpt is defined
949 // only over nodes that received hard evidence, do not consider it
950 // as a tensor anymore but as a constant
951 // TODO substitute constants by 0-dimensional tensors
952 if (hard_nodes.size() == variables.size()) {
953 Instantiation inst(cpt);
954 for (Size i = 0; i < hard_nodes.size(); ++i) {
955 inst.chgVal(*variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
956 }
957 _constants_.insert(node, cpt.get(inst));
958 } else {
959 // here, we have a CPT defined over some nodes that received hard
960 // evidence and other nodes that did not receive it. If none of the
961 // latter belong to the graph, then the CPT is useless for inference
962 if (!graph_contains_nodes) continue;
963
964 // prepare the projection with a combine and project instance
965 gum::VariableSet hard_variables;
966 _ScheduleMultiDimSet_ marg_cpt_set(1 + hard_nodes.size());
967 const IScheduleMultiDim* sched_cpt
968 = schedule.insertTable< Tensor< GUM_SCALAR > >(cpt, false);
969 marg_cpt_set.insert(sched_cpt);
970
971 for (const auto xnode: hard_nodes) {
972 const IScheduleMultiDim* pot
973 = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[xnode], false);
974 marg_cpt_set.insert(pot);
975 hard_variables.insert(&(bn.variable(xnode)));
976 }
977
978 // perform the combination of those tensors and their projection
979 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
980 _combination_op_,
981 _projection_op_);
982
983 _ScheduleMultiDimSet_ new_cpt_list
984 = combine_and_project.schedule(schedule, marg_cpt_set, hard_variables);
985
986 // there should be only one tensor in new_cpt_list
987 if (new_cpt_list.size() != 1) {
989 "the projection of a tensor containing " << "hard evidence is empty!");
990 }
991 auto projected_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
992 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
993 *new_cpt_list.begin()));
994 const_cast< ScheduleOperator* >(schedule.scheduleMultiDimCreator(projected_pot))
995 ->makeResultsPersistent(true);
996 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
997 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
998 }
999 }
1000 }
1001 }
1002 this->scheduler().execute(schedule);
1003 }
1004
1006 template < typename GUM_SCALAR >
1007 void LazyPropagation< GUM_SCALAR >::updateOutdatedStructure_() {
1008 // check if a new JT is really needed. If so, create it
1009 if (_isNewJTNeeded_()) {
1010 _createNewJT_();
1011 } else {
1012 // here, we can answer the next queries without reconstructing all the
1013 // junction tree. All we need to do is to indicate that we should
1014 // update the tensors and messages for these queries
1015 updateOutdatedTensors_();
1016 }
1017 }
1018
1020 template < typename GUM_SCALAR >
1021 void LazyPropagation< GUM_SCALAR >::_diffuseMessageInvalidations_(NodeId from_id,
1022 NodeId to_id,
1023 NodeSet& invalidated_cliques) {
1024 // invalidate the current clique
1025 invalidated_cliques.insert(to_id);
1026
1027 // invalidate the current arc
1028 const Arc arc(from_id, to_id);
1029 bool& message_computed = _messages_computed_[arc];
1030 if (message_computed) {
1031 message_computed = false;
1032 _separator_tensors_[arc].clear();
1033 if (_arc_to_created_tensors_.exists(arc)) {
1034 _ScheduleMultiDimSet_& arc_created_tensors = _arc_to_created_tensors_[arc];
1035 for (const auto pot: arc_created_tensors)
1036 delete pot;
1037 arc_created_tensors.clear();
1038 }
1039
1040 // go on with the diffusion
1041 for (const auto node_id: _JT_->neighbours(to_id)) {
1042 if (node_id != from_id) _diffuseMessageInvalidations_(to_id, node_id, invalidated_cliques);
1043 }
1044 }
1045 }
1046
1049 template < typename GUM_SCALAR >
1050 void LazyPropagation< GUM_SCALAR >::updateOutdatedTensors_() {
1051 // compute the set of CPTs that were projected due to hard evidence and
1052 // whose hard evidence have changed, so that they need a new projection.
1053 // By the way, remove these CPTs since they are no more needed
1054 // Here only the values of the hard evidence can have changed (else a
1055 // fully new join tree would have been computed).
1056 // Note also that we know that the CPTs still contain some variable(s) after
1057 // the projection (else they should be constants)
1058
1059
1060 NodeSet hard_nodes_changed(_hard_ev_nodes_.size());
1061 for (const auto node: _hard_ev_nodes_)
1062 if (_evidence_changes_.exists(node)) hard_nodes_changed.insert(node);
1063
1064 NodeSet nodes_with_projected_CPTs_changed;
1065 const auto& bn = this->BN();
1066 for (auto pot_iter = _node_to_hard_ev_projected_CPTs_.beginSafe();
1067 pot_iter != _node_to_hard_ev_projected_CPTs_.endSafe();
1068 ++pot_iter) {
1069 for (const auto var: bn.cpt(pot_iter.key()).variablesSequence()) {
1070 if (hard_nodes_changed.contains(bn.nodeId(*var))) {
1071 nodes_with_projected_CPTs_changed.insert(pot_iter.key());
1072 delete pot_iter.val();
1073 _clique_tensors_[_node_to_clique_[pot_iter.key()]].erase(pot_iter.val());
1074 _node_to_hard_ev_projected_CPTs_.erase(pot_iter);
1075 break;
1076 }
1077 }
1078 }
1079
1080
1081 // invalidate all the messages that are no more correct: start from each of
1082 // the nodes whose soft evidence has changed and perform a diffusion from
1083 // the clique into which the soft evidence has been entered, indicating that
1084 // the messages spreading from this clique are now invalid. At the same time,
1085 // if there were tensors created on the arcs over which the messages were
1086 // sent, remove them from memory. For all the cliques that received some
1087 // projected CPT that should now be changed, do the same.
1088 NodeSet invalidated_cliques(_JT_->size());
1089 for (const auto& pair: _evidence_changes_) {
1090 if (_node_to_clique_.exists(pair.first)) {
1091 const auto clique = _node_to_clique_[pair.first];
1092 invalidated_cliques.insert(clique);
1093 for (const auto neighbor: _JT_->neighbours(clique)) {
1094 _diffuseMessageInvalidations_(clique, neighbor, invalidated_cliques);
1095 }
1096 }
1097 }
1098
1099 // now, add to the set of invalidated cliques those that contain projected
1100 // CPTs that were changed.
1101 for (const auto node: nodes_with_projected_CPTs_changed) {
1102 const auto clique = _node_to_clique_[node];
1103 invalidated_cliques.insert(clique);
1104 for (const auto neighbor: _JT_->neighbours(clique)) {
1105 _diffuseMessageInvalidations_(clique, neighbor, invalidated_cliques);
1106 }
1107 }
1108
1109
1110 // now we shall remove all the posteriors that belong to the
1111 // invalidated cliques. First, cope only with the nodes that did not
1112 // receive hard evidence since the other nodes do not belong to the
1113 // join tree
1114 for (auto iter = _target_posteriors_.beginSafe(); iter != _target_posteriors_.endSafe();
1115 ++iter) {
1116 if (_graph_.exists(iter.key())
1117 && (invalidated_cliques.exists(_node_to_clique_[iter.key()]))) {
1118 delete iter.val();
1119 _target_posteriors_.erase(iter);
1120 }
1121 }
1122
1123 // now cope with the nodes that received hard evidence
1124 for (auto iter = _target_posteriors_.beginSafe(); iter != _target_posteriors_.endSafe();
1125 ++iter) {
1126 if (hard_nodes_changed.contains(iter.key())) {
1127 delete iter.val();
1128 _target_posteriors_.erase(iter);
1129 }
1130 }
1131
1132 // finally, cope with joint targets. Notably, remove the joint posteriors whose
1133 // nodes have all received changed evidence
1134 for (auto iter = _joint_target_posteriors_.beginSafe();
1135 iter != _joint_target_posteriors_.endSafe();
1136 ++iter) {
1137 if (invalidated_cliques.exists(_joint_target_to_clique_[iter.key()])) {
1138 delete iter.val();
1139 _joint_target_posteriors_.erase(iter);
1140 } else {
1141 // check for sets in which all nodes have received evidence
1142 bool has_unevidenced_node = false;
1143 for (const auto node: iter.key()) {
1144 if (!hard_nodes_changed.exists(node)) {
1145 has_unevidenced_node = true;
1146 break;
1147 }
1148 }
1149 if (!has_unevidenced_node) {
1150 delete iter.val();
1151 _joint_target_posteriors_.erase(iter);
1152 }
1153 }
1154 }
1155
1156 // remove all the evidence that were entered into _node_to_soft_evidence_
1157 // and _clique_tensors_ and add the new soft ones
1158 for (const auto& pot_pair: _node_to_soft_evidence_) {
1159 delete pot_pair.second;
1160 _clique_tensors_[_node_to_clique_[pot_pair.first]].erase(pot_pair.second);
1161 }
1162 _node_to_soft_evidence_.clear();
1163
1164 const auto& evidence = this->evidence();
1165 for (const auto node: this->softEvidenceNodes()) {
1166 auto ev_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*evidence[node], false);
1167 _node_to_soft_evidence_.insert(node, ev_pot);
1168 _clique_tensors_[_node_to_clique_[node]].insert(ev_pot);
1169 }
1170
1171
1172 // Now add the projections of the CPTs due to newly changed hard evidence:
1173 // if we are performing updateOutdatedTensors_, this means that the
1174 // set of nodes that received hard evidence has not changed, only
1175 // their instantiations can have changed. So, if there is an entry
1176 // for node in _constants_, there will still be such an entry after
1177 // performing the new projections. Idem for _node_to_hard_ev_projected_CPTs_
1178 if (_use_schedules_) {
1179 Schedule schedule;
1180 for (const auto node: nodes_with_projected_CPTs_changed) {
1181 // perform the projection with a combine and project instance
1182 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
1183 const auto& variables = cpt.variablesSequence();
1184 _ScheduleMultiDimSet_ marg_cpt_set;
1185 const auto sched_cpt = schedule.insertTable< Tensor< GUM_SCALAR > >(cpt, false);
1186 marg_cpt_set.insert(sched_cpt);
1187
1188 gum::VariableSet hard_variables;
1189 for (const auto var: variables) {
1190 NodeId xnode = bn.nodeId(*var);
1191 if (_hard_ev_nodes_.exists(xnode)) {
1192 const auto pot = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[xnode], false);
1193 marg_cpt_set.insert(pot);
1194 hard_variables.insert(var);
1195 }
1196 }
1197
1198 // perform the combination of those tensors and their projection
1199 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1200 _combination_op_,
1201 _projection_op_);
1202
1203 _ScheduleMultiDimSet_ new_cpt_list
1204 = combine_and_project.schedule(schedule, marg_cpt_set, hard_variables);
1205
1206 // there should be only one tensor in new_cpt_list
1207 if (new_cpt_list.size() != 1) {
1209 "the projection of a tensor containing " << "hard evidence is empty!");
1210 }
1211 auto projected_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1212 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(*new_cpt_list.begin()));
1213 const_cast< ScheduleOperator* >(schedule.scheduleMultiDimCreator(projected_pot))
1214 ->makeResultsPersistent(true);
1215 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1216 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1217 }
1218 this->scheduler().execute(schedule);
1219 } else {
1220 for (const auto node: nodes_with_projected_CPTs_changed) {
1221 // perform the projection with a combine and project instance
1222 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
1223 const auto& variables = cpt.variablesSequence();
1224 _TensorSet_ marg_cpt_set(1 + variables.size());
1225 marg_cpt_set.insert(&cpt);
1226
1227 gum::VariableSet hard_variables;
1228 for (const auto var: variables) {
1229 NodeId xnode = bn.nodeId(*var);
1230 if (_hard_ev_nodes_.exists(xnode)) {
1231 marg_cpt_set.insert(evidence[xnode]);
1232 hard_variables.insert(var);
1233 }
1234 }
1235
1236 // perform the combination of those tensors and their projection
1237 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1238 _combination_op_,
1239 _projection_op_);
1240
1241 _TensorSet_ new_cpt_list = combine_and_project.execute(marg_cpt_set, hard_variables);
1242
1243 // there should be only one tensor in new_cpt_list
1244 if (new_cpt_list.size() != 1) {
1246 "the projection of a tensor containing " << "hard evidence is empty!");
1247 }
1248 Tensor< GUM_SCALAR >* sched_pot
1249 = const_cast< Tensor< GUM_SCALAR >* >(*new_cpt_list.begin());
1250 auto projected_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*sched_pot));
1251 delete sched_pot;
1252 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1253 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1254 }
1255 }
1256
1257
1258 // update the constants
1259 const auto& hard_evidence = this->hardEvidence();
1260 for (auto& node_cst: _constants_) {
1261 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
1262 const auto& variables = cpt.variablesSequence();
1263 Instantiation inst(cpt);
1264 for (const auto var: variables) {
1265 inst.chgVal(*var, hard_evidence[bn.nodeId(*var)]);
1266 }
1267 node_cst.second = cpt.get(inst);
1268 }
1269
1270 // indicate that all changes have been performed
1271 _evidence_changes_.clear();
1272 }
1273
1275 template < typename GUM_SCALAR >
1276 void LazyPropagation< GUM_SCALAR >::_computeJoinTreeRoots_() {
1277 // get the set of cliques in which we can find the targets and joint_targets.
1278 // Due to hard evidence, the cliques related to a given target node
1279 // might not exist, hence the try..catch.
1280 NodeSet clique_targets;
1281 for (const auto node: this->targets()) {
1282 try {
1283 clique_targets.insert(_node_to_clique_[node]);
1284 } catch (Exception const&) {}
1285 }
1286 for (const auto& set: this->jointTargets()) {
1287 try {
1288 clique_targets.insert(_joint_target_to_clique_[set]);
1289 } catch (Exception const&) {}
1290 }
1291
1292 // put in a vector these cliques and their sizes
1293 std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
1294 const auto& bn = this->BN();
1295 std::size_t i = 0;
1296 for (const auto clique_id: clique_targets) {
1297 const auto& clique = _JT_->clique(clique_id);
1298 Size dom_size = 1;
1299 for (const auto node: clique) {
1300 dom_size *= bn.variable(node).domainSize();
1301 }
1302 possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
1303 ++i;
1304 }
1305
1306 // sort the cliques by increasing domain size
1307 std::sort(possible_roots.begin(),
1308 possible_roots.end(),
1309 [](const std::pair< NodeId, Size >& a, const std::pair< NodeId, Size >& b) -> bool {
1310 return a.second < b.second;
1311 });
1312
1313 // pick up the clique with the smallest size in each connected component
1314 NodeProperty< bool > marked = _JT_->nodesPropertyFromVal(false);
1315 std::function< void(NodeId, NodeId) > diffuse_marks
1316 = [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1317 if (!marked[node]) {
1318 marked[node] = true;
1319 for (const auto neigh: _JT_->neighbours(node))
1320 if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1321 }
1322 };
1323 _roots_.clear();
1324 for (const auto& xclique: possible_roots) {
1325 NodeId clique = xclique.first;
1326 if (!marked[clique]) {
1327 _roots_.insert(clique);
1328 diffuse_marks(clique, clique);
1329 }
1330 }
1331 }
1332
1333 // find the tensors d-connected to a set of variables
1334 template < typename GUM_SCALAR >
1335 void LazyPropagation< GUM_SCALAR >::_findRelevantTensorsGetAll_(
1336 Set< const IScheduleMultiDim* >& pot_list,
1337 gum::VariableSet& kept_vars) {}
1338
1339 // find the tensors d-connected to a set of variables
1340 template < typename GUM_SCALAR >
1341 void LazyPropagation< GUM_SCALAR >::_findRelevantTensorsWithdSeparation_(
1342 Set< const IScheduleMultiDim* >& pot_list,
1343 gum::VariableSet& kept_vars) {
1344 // find the node ids of the kept variables
1345 NodeSet kept_ids(kept_vars.size());
1346 const auto& bn = this->BN();
1347 for (const auto var: kept_vars) {
1348 kept_ids.insert(bn.nodeId(*var));
1349 }
1350
1351 // determine the set of tensors d-connected with the kept variables
1352 NodeSet requisite_nodes;
1353 BayesBall::requisiteNodes(bn.dag(),
1354 kept_ids,
1355 this->hardEvidenceNodes(),
1356 this->softEvidenceNodes(),
1357 requisite_nodes);
1358 for (auto iter = pot_list.beginSafe(); iter != pot_list.endSafe(); ++iter) {
1359 const Sequence< const DiscreteVariable* >& vars = (*iter)->variablesSequence();
1360 bool found = false;
1361 for (const auto var: vars) {
1362 if (requisite_nodes.exists(bn.nodeId(*var))) {
1363 found = true;
1364 break;
1365 }
1366 }
1367
1368 if (!found) { pot_list.erase(iter); }
1369 }
1370 }
1371
1372 // find the tensors d-connected to a set of variables
1373 template < typename GUM_SCALAR >
1374 void LazyPropagation< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_(
1375 Set< const IScheduleMultiDim* >& pot_list,
1376 gum::VariableSet& kept_vars) {
1377 // find the node ids of the kept variables
1378 NodeSet kept_ids(kept_vars.size());
1379 const auto& bn = this->BN();
1380 for (const auto var: kept_vars) {
1381 kept_ids.insert(bn.nodeId(*var));
1382 }
1383
1384 // determine the set of tensors d-connected with the kept variables
1385 BayesBall::relevantTensors(bn,
1386 kept_ids,
1387 this->hardEvidenceNodes(),
1388 this->softEvidenceNodes(),
1389 pot_list);
1390 }
1391
1392 // find the tensors d-connected to a set of variables
1393 template < typename GUM_SCALAR >
1394 void LazyPropagation< GUM_SCALAR >::_findRelevantTensorsWithdSeparation3_(
1395 Set< const IScheduleMultiDim* >& pot_list,
1396 gum::VariableSet& kept_vars) {
1397 // find the node ids of the kept variables
1398 NodeSet kept_ids(kept_vars.size());
1399 const auto& bn = this->BN();
1400 for (const auto var: kept_vars) {
1401 kept_ids.insert(bn.nodeId(*var));
1402 }
1403
1404 // determine the set of tensors d-connected with the kept variables
1405 dSeparationAlgorithm dsep;
1406 dsep.relevantTensors(bn,
1407 kept_ids,
1408 this->hardEvidenceNodes(),
1409 this->softEvidenceNodes(),
1410 pot_list);
1411 }
1412
1413 // find the tensors d-connected to a set of variables
1414 template < typename GUM_SCALAR >
1415 void LazyPropagation< GUM_SCALAR >::_findRelevantTensorsXX_(
1416 Set< const IScheduleMultiDim* >& pot_list,
1417 gum::VariableSet& kept_vars) {
1418 switch (_find_relevant_tensor_type_) {
1419 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
1420 _findRelevantTensorsWithdSeparation2_(pot_list, kept_vars);
1421 break;
1422
1423 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES :
1424 _findRelevantTensorsWithdSeparation_(pot_list, kept_vars);
1425 break;
1426
1427 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 :
1428 _findRelevantTensorsWithdSeparation3_(pot_list, kept_vars);
1429 break;
1430
1431 case RelevantTensorsFinderType::FIND_ALL :
1432 _findRelevantTensorsGetAll_(pot_list, kept_vars);
1433 break;
1434
1435 default : GUM_ERROR(FatalError, "not implemented yet")
1436 }
1437 }
1438
1439 // remove barren variables using schedules
1440 template < typename GUM_SCALAR >
1441 Set< const IScheduleMultiDim* >
1442 LazyPropagation< GUM_SCALAR >::_removeBarrenVariables_(Schedule& schedule,
1443 _ScheduleMultiDimSet_& pot_list,
1444 gum::VariableSet& del_vars) {
1445 // remove from del_vars the variables that received some evidence:
1446 // only those that did not receive evidence can be barren variables
1447 gum::VariableSet the_del_vars = del_vars;
1448 for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe(); ++iter) {
1449 NodeId id = this->BN().nodeId(**iter);
1450 if (this->hardEvidenceNodes().exists(id) || this->softEvidenceNodes().exists(id)) {
1451 the_del_vars.erase(iter);
1452 }
1453 }
1454
1455 // assign to each random variable the set of tensors that contain it
1456 HashTable< const DiscreteVariable*, _ScheduleMultiDimSet_ > var2pots(the_del_vars.size());
1457 _ScheduleMultiDimSet_ empty_pot_set;
1458 for (const auto pot: pot_list) {
1459 const auto& vars = pot->variablesSequence();
1460 for (const auto var: vars) {
1461 if (the_del_vars.exists(var)) {
1462 if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1463 var2pots[var].insert(pot);
1464 }
1465 }
1466 }
1467
1468 // each variable with only one tensor is necessarily a barren variable
1469 // assign to each tensor with barren nodes its set of barren variables
1470 HashTable< const IScheduleMultiDim*, gum::VariableSet > pot2barren_var;
1471 gum::VariableSet empty_var_set;
1472 for (const auto& elt: var2pots) {
1473 if (elt.second.size() == 1) { // here we have a barren variable
1474 const IScheduleMultiDim* pot = *(elt.second.begin());
1475 if (!pot2barren_var.exists(pot)) { pot2barren_var.insert(pot, empty_var_set); }
1476 pot2barren_var[pot].insert(elt.first); // insert the barren variable
1477 }
1478 }
1479
1480 // for each tensor with barren variables, marginalize them.
1481 // if the tensor has only barren variables, simply remove them from the
1482 // set of tensors, else just project the tensor
1483 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1484 _ScheduleMultiDimSet_ projected_pots;
1485 for (const auto& elt: pot2barren_var) {
1486 // remove the current tensor from pot_list as, anyway, we will change it
1487 const IScheduleMultiDim* pot = elt.first;
1488 pot_list.erase(pot);
1489
1490 // check whether we need to add a projected new tensor or not (i.e.,
1491 // whether there exist non-barren variables or not)
1492 if (pot->variablesSequence().size() != elt.second.size()) {
1493 const IScheduleMultiDim* new_pot = projector.schedule(schedule, pot, elt.second);
1494 // here, there is no need to enforce that new_pot is persistent since,
1495 // if this is needed, the function that called _removeBarrenVariables_ will
1496 // do it
1497 pot_list.insert(new_pot);
1498 projected_pots.insert(new_pot);
1499 }
1500 }
1501
1502 return projected_pots;
1503 }
1504
1505 // remove barren variables directly without schedules
1506 template < typename GUM_SCALAR >
1507 Set< const Tensor< GUM_SCALAR >* >
1508 LazyPropagation< GUM_SCALAR >::_removeBarrenVariables_(_TensorSet_& pot_list,
1509 gum::VariableSet& del_vars) {
1510 // remove from del_vars the variables that received some evidence:
1511 // only those that did not receive evidence can be barren variables
1512 gum::VariableSet the_del_vars = del_vars;
1513 for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe(); ++iter) {
1514 NodeId id = this->BN().nodeId(**iter);
1515 if (this->hardEvidenceNodes().exists(id) || this->softEvidenceNodes().exists(id)) {
1516 the_del_vars.erase(iter);
1517 }
1518 }
1519
1520 // assign to each random variable the set of tensors that contain it
1521 HashTable< const DiscreteVariable*, _TensorSet_ > var2pots;
1522 _TensorSet_ empty_pot_set;
1523 for (const auto pot: pot_list) {
1524 const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1525 for (const auto var: vars) {
1526 if (the_del_vars.exists(var)) {
1527 if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1528 var2pots[var].insert(pot);
1529 }
1530 }
1531 }
1532
1533 // each variable with only one tensor is a barren variable
1534 // assign to each tensor with barren nodes its set of barren variables
1535 HashTable< const Tensor< GUM_SCALAR >*, gum::VariableSet > pot2barren_var;
1536 gum::VariableSet empty_var_set;
1537 for (const auto& elt: var2pots) {
1538 if (elt.second.size() == 1) { // here we have a barren variable
1539 const Tensor< GUM_SCALAR >* pot = *(elt.second.begin());
1540 if (!pot2barren_var.exists(pot)) { pot2barren_var.insert(pot, empty_var_set); }
1541 pot2barren_var[pot].insert(elt.first); // insert the barren variable
1542 }
1543 }
1544
1545 // for each tensor with barren variables, marginalize them.
1546 // if the tensor has only barren variables, simply remove them from the
1547 // set of tensors, else just project the tensor
1548 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1549 _TensorSet_ projected_pots;
1550 for (const auto& elt: pot2barren_var) {
1551 // remove the current tensor from pot_list as, anyway, we will change it
1552 const Tensor< GUM_SCALAR >* pot = elt.first;
1553 pot_list.erase(pot);
1554
1555 // check whether we need to add a projected new tensor or not (i.e.,
1556 // whether there exist non-barren variables or not)
1557 if (pot->variablesSequence().size() != elt.second.size()) {
1558 const Tensor< GUM_SCALAR >* new_pot = projector.execute(*pot, elt.second);
1559 pot_list.insert(new_pot);
1560 projected_pots.insert(new_pot);
1561 }
1562 }
1563
1564 return projected_pots;
1565 }
1566
1567 // performs the collect phase of Lazy Propagation using schedules
1568 template < typename GUM_SCALAR >
1569 INLINE void
1570 LazyPropagation< GUM_SCALAR >::_collectMessage_(Schedule& schedule, NodeId id, NodeId from) {
1571 for (const auto other: _JT_->neighbours(id)) {
1572 if ((other != from) && !_messages_computed_[Arc(other, id)])
1573 _collectMessage_(schedule, other, id);
1574 }
1575
1576 if ((id != from) && !_messages_computed_[Arc(id, from)]) {
1577 _produceMessage_(schedule, id, from);
1578 }
1579 }
1580
1581 // performs the collect phase of Lazy Propagation without schedules
1582 template < typename GUM_SCALAR >
1583 INLINE void LazyPropagation< GUM_SCALAR >::_collectMessage_(NodeId id, NodeId from) {
1584 for (const auto other: _JT_->neighbours(id)) {
1585 if ((other != from) && !_messages_computed_[Arc(other, id)]) _collectMessage_(other, id);
1586 }
1587
1588 if ((id != from) && !_messages_computed_[Arc(id, from)]) { _produceMessage_(id, from); }
1589 }
1590
1591 // remove variables del_vars from the list of tensors pot_list
1592 template < typename GUM_SCALAR >
1593 Set< const IScheduleMultiDim* >
1594 LazyPropagation< GUM_SCALAR >::_marginalizeOut_(Schedule& schedule,
1595 Set< const IScheduleMultiDim* > pot_list,
1596 gum::VariableSet& del_vars,
1597 gum::VariableSet& kept_vars) {
1598 // use d-separation analysis to check which tensors shall be combined
1599 // _findRelevantTensorsXX_(pot_list, kept_vars);
1600
1601 // if pot list is empty, do nothing. This may happen when there are only barren variables
1602 if (pot_list.empty()) { return _ScheduleMultiDimSet_(); }
1603
1604 // now, let's guarantee that all the tensors to be combined and projected
1605 // belong to the schedule
1606 for (const auto pot: pot_list) {
1607 if (!schedule.existsScheduleMultiDim(pot->id())) schedule.emplaceScheduleMultiDim(*pot);
1608 }
1609
1610 // remove the tensors corresponding to barren variables if we want
1611 // to exploit barren nodes
1612 _ScheduleMultiDimSet_ barren_projected_tensors;
1613 if (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
1614 barren_projected_tensors = _removeBarrenVariables_(schedule, pot_list, del_vars);
1615 }
1616
1617 // Combine and project the tensors
1618 _ScheduleMultiDimSet_ new_pot_list;
1619 if (pot_list.size() == 1) { // only one tensor, so just project it
1620 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1621 auto xpot = projector.schedule(schedule, *(pot_list.begin()), del_vars);
1622 new_pot_list.insert(xpot);
1623 } else if (pot_list.size() > 1) {
1624 // create a combine and project operator that will perform the
1625 // marginalization
1626 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(_combination_op_,
1627 _projection_op_);
1628 new_pot_list = combine_and_project.schedule(schedule, pot_list, del_vars);
1629 }
1630
1631 // remove all the tensors that were created due to projections of
1632 // barren nodes and that are not part of the new_pot_list: these
1633 // tensors were just temporary tensors
1634 for (auto barren_pot: barren_projected_tensors) {
1635 if (!new_pot_list.exists(barren_pot))
1636 schedule.emplaceDeletion(
1637 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >& >(*barren_pot));
1638 }
1639
1640 return new_pot_list;
1641 }
1642
1643 // remove variables del_vars from the list of tensors pot_list
1644 template < typename GUM_SCALAR >
1645 Set< const IScheduleMultiDim* >
1646 LazyPropagation< GUM_SCALAR >::_marginalizeOut_(Set< const IScheduleMultiDim* >& pot_list,
1647 gum::VariableSet& del_vars,
1648 gum::VariableSet& kept_vars) {
1649 // if pot list is empty, do nothing. This may happen when there are many barren variables
1650 if (pot_list.empty()) { return _ScheduleMultiDimSet_(); }
1651
1652 _TensorSet_ xpot_list(pot_list.size());
1653 for (auto pot: pot_list)
1654 xpot_list.insert(
1655 &(static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot)->multiDim()));
1656
1657 // use d-separation analysis to check which tensors shall be combined
1658 // _findRelevantTensorsXX_(pot_list, kept_vars);
1659
1660 // remove the tensors corresponding to barren variables if we want
1661 // to exploit barren nodes
1662 _TensorSet_ barren_projected_tensors;
1663 if (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
1664 barren_projected_tensors = _removeBarrenVariables_(xpot_list, del_vars);
1665 }
1666
1667 // Combine and project the remaining tensors
1668 _TensorSet_ xnew_pot_list;
1669 _ScheduleMultiDimSet_ new_pot_list;
1670 if (xpot_list.size() == 1) {
1671 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1672 auto xpot = projector.execute(**(xpot_list.begin()), del_vars);
1673 ScheduleMultiDim< Tensor< GUM_SCALAR > >* pot;
1674 if (xpot_list.contains(xpot))
1675 pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*xpot, false);
1676 else {
1677 pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(
1678 std::move(const_cast< Tensor< GUM_SCALAR >& >(*xpot)));
1679 delete xpot;
1680 }
1681 new_pot_list.insert(pot);
1682 } else if (xpot_list.size() > 1) {
1683 // create a combine and project operator that will perform the
1684 // marginalization
1685 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(_combination_op_,
1686 _projection_op_);
1687 xnew_pot_list = combine_and_project.execute(xpot_list, del_vars);
1688
1689 for (auto xpot: xnew_pot_list) {
1690 ScheduleMultiDim< Tensor< GUM_SCALAR > >* pot;
1691 if (xpot_list.contains(xpot))
1692 pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*xpot, false);
1693 else {
1694 pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(
1695 std::move(const_cast< Tensor< GUM_SCALAR >& >(*xpot)));
1696 delete xpot;
1697 }
1698 new_pot_list.insert(pot);
1699 }
1700 }
1701
1702 // remove all the tensors that were created due to projections of
1703 // barren nodes and that are not part of the new_pot_list: these
1704 // tensors were just temporary tensors
1705 for (const auto barren_pot: barren_projected_tensors) {
1706 if (!xnew_pot_list.exists(barren_pot)) delete barren_pot;
1707 }
1708
1709 return new_pot_list;
1710 }
1711
1712 // creates the message sent by clique from_id to clique to_id
1713 template < typename GUM_SCALAR >
1714 void LazyPropagation< GUM_SCALAR >::_produceMessage_(Schedule& schedule,
1715 NodeId from_id,
1716 NodeId to_id) {
1717 // get the tensors of the clique
1718 _ScheduleMultiDimSet_ pot_list = _clique_tensors_[from_id];
1719
1720 // add the messages sent by adjacent nodes to from_id.
1721 for (const auto other_id: _JT_->neighbours(from_id)) {
1722 if (other_id != to_id) pot_list += _separator_tensors_[Arc(other_id, from_id)];
1723 }
1724
1725 // get the set of variables that need be removed from the tensors
1726 const NodeSet& from_clique = _JT_->clique(from_id);
1727 const NodeSet& separator = _JT_->separator(from_id, to_id);
1728 gum::VariableSet del_vars(from_clique.size());
1729 gum::VariableSet kept_vars(separator.size());
1730 const auto& bn = this->BN();
1731
1732 for (const auto node: from_clique) {
1733 if (!separator.contains(node)) {
1734 del_vars.insert(&(bn.variable(node)));
1735 } else {
1736 kept_vars.insert(&(bn.variable(node)));
1737 }
1738 }
1739
1740 // pot_list now contains all the tensors to multiply and marginalize
1741 // => combine the messages
1742 _ScheduleMultiDimSet_ new_pot_list = _marginalizeOut_(schedule, pot_list, del_vars, kept_vars);
1743
1744 // keep track of the newly created tensors but first replace all the
1745 // tensors whose values are all equal by constant tensors (nbrDim=0)
1746 // with this very value (as probability matrix multiplications
1747 // are tensorial, replacing the former tensor by constants provides the
1748 // same computation results but speeds-up these computations)
1749 const Arc arc(from_id, to_id);
1750
1751 if (!_arc_to_created_tensors_.exists(arc))
1752 _arc_to_created_tensors_.insert(arc, _ScheduleMultiDimSet_());
1753
1754 for (auto iter = new_pot_list.beginSafe(); iter != new_pot_list.endSafe(); ++iter) {
1755 const auto pot = *iter;
1756
1757 if (!pot_list.exists(pot)) {
1758 _arc_to_created_tensors_[arc].insert(pot);
1759
1760 // do not forget to make the ScheduleMultiDim persistent
1761 auto op = schedule.scheduleMultiDimCreator(pot);
1762 if (op != nullptr) const_cast< ScheduleOperator* >(op)->makeResultsPersistent(true);
1763 }
1764 }
1765
1766 _separator_tensors_[arc] = std::move(new_pot_list);
1767 _messages_computed_[arc] = true;
1768 }
1769
1770 // creates the message sent by clique from_id to clique to_id
1771 template < typename GUM_SCALAR >
1772 void LazyPropagation< GUM_SCALAR >::_produceMessage_(NodeId from_id, NodeId to_id) {
1773 // get the tensors of the clique
1774 _ScheduleMultiDimSet_ pot_list = _clique_tensors_[from_id];
1775
1776 // add the messages sent by adjacent nodes to from_id.
1777 for (const auto other_id: _JT_->neighbours(from_id)) {
1778 if (other_id != to_id) pot_list += _separator_tensors_[Arc(other_id, from_id)];
1779 }
1780
1781 // get the set of variables that need be removed from the tensors
1782 const NodeSet& from_clique = _JT_->clique(from_id);
1783 const NodeSet& separator = _JT_->separator(from_id, to_id);
1784 gum::VariableSet del_vars(from_clique.size());
1785 gum::VariableSet kept_vars(separator.size());
1786 const auto& bn = this->BN();
1787
1788 for (const auto node: from_clique) {
1789 if (!separator.contains(node)) {
1790 del_vars.insert(&(bn.variable(node)));
1791 } else {
1792 kept_vars.insert(&(bn.variable(node)));
1793 }
1794 }
1795
1796 // pot_list now contains all the tensors to multiply and marginalize
1797 // => combine the messages
1798 _ScheduleMultiDimSet_ new_pot_list = _marginalizeOut_(pot_list, del_vars, kept_vars);
1799
1800 // keep track of the newly created tensors but first replace all the
1801 // tensors whose values are all equal by constant tensors (nbrDim=0)
1802 // with this very value (as probability matrix multiplications
1803 // are tensorial, replacing the former tensor by constants provides the
1804 // same computation results but speeds-up these computations)
1805 const Arc arc(from_id, to_id);
1806
1807 if (!_arc_to_created_tensors_.exists(arc))
1808 _arc_to_created_tensors_.insert(arc, _ScheduleMultiDimSet_());
1809
1810 for (const auto pot: new_pot_list) {
1811 if (!pot_list.exists(pot)) { _arc_to_created_tensors_[arc].insert(pot); }
1812 }
1813
1814 _separator_tensors_[arc] = std::move(new_pot_list);
1815 _messages_computed_[arc] = true;
1816 }
1817
1818 // performs a whole inference
1819 template < typename GUM_SCALAR >
1820 INLINE void LazyPropagation< GUM_SCALAR >::makeInference_() {
1821 if (_use_schedules_) {
1822 Schedule schedule;
1823
1824 // collect messages for all single targets
1825 for (const auto node: this->targets()) {
1826 // perform only collects in the join tree for nodes that have
1827 // not received hard evidence (those that received hard evidence were
1828 // not included into the join tree for speed-up reasons)
1829 if (_graph_.exists(node)) {
1830 _collectMessage_(schedule, _node_to_clique_[node], _node_to_clique_[node]);
1831 }
1832 }
1833
1834 // collect messages for all set targets
1835 // by parsing _joint_target_to_clique_, we ensure that the cliques that
1836 // are referenced belong to the join tree (even if some of the nodes in
1837 // their associated joint_target do not belong to _graph_)
1838 for (const auto& set: _joint_target_to_clique_)
1839 _collectMessage_(schedule, set.second, set.second);
1840
1841 // really perform the computations
1842 this->scheduler().execute(schedule);
1843 } else {
1844 // collect messages for all single targets
1845 for (const auto node: this->targets()) {
1846 // perform only collects in the join tree for nodes that have
1847 // not received hard evidence (those that received hard evidence were
1848 // not included into the join tree for speed-up reasons)
1849 if (_graph_.exists(node)) {
1850 _collectMessage_(_node_to_clique_[node], _node_to_clique_[node]);
1851 }
1852 }
1853
1854 // collect messages for all set targets
1855 // by parsing _joint_target_to_clique_, we ensure that the cliques that
1856 // are referenced belong to the join tree (even if some of the nodes in
1857 // their associated joint_target do not belong to _graph_)
1858 for (const auto& set: _joint_target_to_clique_)
1859 _collectMessage_(set.second, set.second);
1860 }
1861 }
1862
1864 template < typename GUM_SCALAR >
1865 Tensor< GUM_SCALAR >* LazyPropagation< GUM_SCALAR >::unnormalizedJointPosterior_(NodeId id) {
1866 if (_use_schedules_) {
1867 Schedule schedule;
1868 return _unnormalizedJointPosterior_(schedule, id);
1869 } else {
1870 return _unnormalizedJointPosterior_(id);
1871 }
1872 }
1873
1875 template < typename GUM_SCALAR >
1876 Tensor< GUM_SCALAR >*
1877 LazyPropagation< GUM_SCALAR >::_unnormalizedJointPosterior_(Schedule& schedule, NodeId id) {
1878 const auto& bn = this->BN();
1879
1880 // hard evidence do not belong to the join tree
1881 // # TODO: check for sets of inconsistent hard evidence
1882 if (this->hardEvidenceNodes().contains(id)) {
1883 return new Tensor< GUM_SCALAR >(*(this->evidence()[id]));
1884 }
1885
1886 auto& scheduler = this->scheduler();
1887
1888 // if we still need to perform some inference task, do it (this should
1889 // already have been done by makeInference_)
1890 const NodeId clique_of_id = _node_to_clique_[id];
1891 _collectMessage_(schedule, clique_of_id, clique_of_id);
1892
1893 // now we just need to create the product of the tensors of the clique
1894 // containing id with the messages received by this clique and
1895 // marginalize out all variables except id
1896 _ScheduleMultiDimSet_ pot_list = _clique_tensors_[clique_of_id];
1897
1898 // add the messages sent by adjacent nodes to targetClique
1899 for (const auto other: _JT_->neighbours(clique_of_id))
1900 pot_list += _separator_tensors_[Arc(other, clique_of_id)];
1901
1902 // get the set of variables that need be removed from the tensors
1903 const NodeSet& nodes = _JT_->clique(clique_of_id);
1904 gum::VariableSet kept_vars{&(bn.variable(id))};
1905 gum::VariableSet del_vars(nodes.size());
1906 for (const auto node: nodes) {
1907 if (node != id) del_vars.insert(&(bn.variable(node)));
1908 }
1909
1910 // pot_list now contains all the tensors to multiply and marginalize
1911 // => combine the messages
1912 _ScheduleMultiDimSet_ new_pot_list = _marginalizeOut_(schedule, pot_list, del_vars, kept_vars);
1913 Tensor< GUM_SCALAR >* joint = nullptr;
1914 ScheduleMultiDim< Tensor< GUM_SCALAR > >* resulting_pot = nullptr;
1915
1916 if (new_pot_list.size() == 0) {
1917 joint = new Tensor< GUM_SCALAR >;
1918 for (const auto var: kept_vars)
1919 *joint << *var;
1920 } else {
1921 if (new_pot_list.size() == 1) {
1922 scheduler.execute(schedule);
1923 resulting_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1924 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(*new_pot_list.begin()));
1925 } else {
1926 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1927 const IScheduleMultiDim* pot = fast_combination.schedule(schedule, new_pot_list);
1928 scheduler.execute(schedule);
1929 resulting_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1930 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot));
1931 }
1932
1933 // if resulting_pot already existed, create a copy, so that we can put it into
1934 // the _target_posteriors_ property
1935 if (pot_list.exists(resulting_pot)) {
1936 joint = new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
1937 } else {
1938 joint = resulting_pot->exportMultiDim();
1939 }
1940 }
1941
1942 // check that the joint posterior is different from a 0 vector: this would
1943 // indicate that some hard evidence are not compatible (their joint
1944 // probability is equal to 0)
1945 bool nonzero_found = false;
1946 for (Instantiation inst(*joint); !inst.end(); ++inst) {
1947 if (joint->get(inst)) {
1948 nonzero_found = true;
1949 break;
1950 }
1951 }
1952 if (!nonzero_found) {
1953 // remove joint from memory to avoid memory leaks
1954 delete joint;
1956 "some evidence entered into the Bayes "
1957 "net are incompatible (their joint proba = 0)");
1958 }
1959 return joint;
1960 }
1961
1963 template < typename GUM_SCALAR >
1964 Tensor< GUM_SCALAR >* LazyPropagation< GUM_SCALAR >::_unnormalizedJointPosterior_(NodeId id) {
1965 const auto& bn = this->BN();
1966
1967 // hard evidence do not belong to the join tree
1968 // # TODO: check for sets of inconsistent hard evidence
1969 if (this->hardEvidenceNodes().contains(id)) {
1970 return new Tensor< GUM_SCALAR >(*(this->evidence()[id]));
1971 }
1972
1973 // if we still need to perform some inference task, do it (this should
1974 // already have been done by makeInference_)
1975 NodeId clique_of_id = _node_to_clique_[id];
1976 _collectMessage_(clique_of_id, clique_of_id);
1977
1978 // now we just need to create the product of the tensors of the clique
1979 // containing id with the messages received by this clique and
1980 // marginalize out all variables except id
1981 _ScheduleMultiDimSet_ pot_list = _clique_tensors_[clique_of_id];
1982
1983 // add the messages sent by adjacent nodes to targetClique
1984 for (const auto other: _JT_->neighbours(clique_of_id))
1985 pot_list += _separator_tensors_[Arc(other, clique_of_id)];
1986
1987 // get the set of variables that need be removed from the tensors
1988 const NodeSet& nodes = _JT_->clique(clique_of_id);
1989 gum::VariableSet kept_vars{&(bn.variable(id))};
1990 gum::VariableSet del_vars(nodes.size());
1991 for (const auto node: nodes) {
1992 if (node != id) del_vars.insert(&(bn.variable(node)));
1993 }
1994
1995 // pot_list now contains all the tensors to multiply and marginalize
1996 // => combine the messages
1997 _ScheduleMultiDimSet_ new_pot_list = _marginalizeOut_(pot_list, del_vars, kept_vars);
1998 Tensor< GUM_SCALAR >* joint = nullptr;
1999
2000 if (new_pot_list.size() == 0) {
2001 joint = new Tensor< GUM_SCALAR >;
2002 for (const auto var: kept_vars)
2003 *joint << *var;
2004 } else if (new_pot_list.size() == 1) {
2005 auto sched_joint = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2006 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(*(new_pot_list.begin())));
2007
2008 // if pot already existed, create a copy, so that we can put it into
2009 // the _target_posteriors_ property
2010 if (pot_list.exists(sched_joint)) {
2011 joint = new Tensor< GUM_SCALAR >(sched_joint->multiDim());
2012 } else {
2013 joint = sched_joint->exportMultiDim();
2014
2015 // remove the joint from new_pot_list so that it will not be
2016 // removed just after the else block
2017 delete sched_joint;
2018 new_pot_list.clear();
2019 }
2020 } else {
2021 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2022
2023 // get the tensors stored into the ScheduleMultiDims
2024 _TensorSet_ xnew_pot_list(new_pot_list.size());
2025 for (auto xpot: new_pot_list) {
2026 xnew_pot_list.insert(
2027 &(static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(xpot)->multiDim()));
2028 }
2029
2030 joint = fast_combination.execute(xnew_pot_list);
2031 }
2032
2033 // remove the tensors that were created in new_pot_list
2034 // note that, if joint was the result of _marginalize_out_, it has been copied,
2035 // so we can remove safely all the elements of new_pot_list
2036 for (const auto pot: new_pot_list)
2037 if (!pot_list.exists(pot)) delete pot;
2038
2039 // check that the joint posterior is different from a 0 vector: this would
2040 // indicate that some hard evidence are not compatible (their joint
2041 // probability is equal to 0)
2042 bool nonzero_found = false;
2043 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2044 if (joint->get(inst)) {
2045 nonzero_found = true;
2046 break;
2047 }
2048 }
2049 if (!nonzero_found) {
2050 // remove joint from memory to avoid memory leaks
2051 delete joint;
2053 "some evidence entered into the Bayes "
2054 "net are incompatible (their joint proba = 0)");
2055 }
2056 return joint;
2057 }
2058
2060 template < typename GUM_SCALAR >
2061 const Tensor< GUM_SCALAR >& LazyPropagation< GUM_SCALAR >::posterior_(NodeId id) {
2062 // check if we have already computed the posterior
2063 if (_target_posteriors_.exists(id)) { return *(_target_posteriors_[id]); }
2064
2065 // compute the joint posterior and normalize
2066 auto joint = unnormalizedJointPosterior_(id);
2067 if (joint->sum() != 1) // hard test for ReadOnly CPT (as aggregator)
2068 joint->normalize();
2069 _target_posteriors_.insert(id, joint);
2070
2071 return *joint;
2072 }
2073
2074 // returns the marginal a posteriori proba of a given node
2075 template < typename GUM_SCALAR >
2076 Tensor< GUM_SCALAR >*
2077 LazyPropagation< GUM_SCALAR >::unnormalizedJointPosterior_(const NodeSet& set) {
2078 if (_use_schedules_) {
2079 Schedule schedule;
2080 return _unnormalizedJointPosterior_(schedule, set);
2081 } else {
2082 return _unnormalizedJointPosterior_(set);
2083 }
2084 }
2085
2086 // returns the marginal a posteriori proba of a given node
2087 template < typename GUM_SCALAR >
2088 Tensor< GUM_SCALAR >*
2089 LazyPropagation< GUM_SCALAR >::_unnormalizedJointPosterior_(Schedule& schedule,
2090 const NodeSet& set) {
2091 // hard evidence do not belong to the join tree, so extract the nodes
2092 // from targets that are not hard evidence
2093 NodeSet targets = set, hard_ev_nodes;
2094 for (const auto node: this->hardEvidenceNodes()) {
2095 if (targets.contains(node)) {
2096 targets.erase(node);
2097 hard_ev_nodes.insert(node);
2098 }
2099 }
2100
2101 auto& scheduler = this->scheduler();
2102
2103 // if all the nodes have received hard evidence, then compute the
2104 // joint posterior directly by multiplying the hard evidence tensors
2105 const auto& evidence = this->evidence();
2106 if (targets.empty()) {
2107 if (set.size() == 1) {
2108 return new Tensor< GUM_SCALAR >(*evidence[*set.begin()]);
2109 } else {
2110 _ScheduleMultiDimSet_ pot_list;
2111 for (const auto node: set) {
2112 auto new_pot_ev = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[node], false);
2113 pot_list.insert(new_pot_ev);
2114 }
2115
2116 // combine all the tensors of the nodes in set
2117 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2118 const IScheduleMultiDim* pot = fast_combination.schedule(schedule, pot_list);
2119 auto schedule_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2120 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot));
2121 scheduler.execute(schedule);
2122 auto result = schedule_pot->exportMultiDim();
2123
2124 return result;
2125 }
2126 }
2127
2128
2129 // if we still need to perform some inference task, do it: so, first,
2130 // determine the clique on which we should perform collect to compute
2131 // the unnormalized joint posterior of a set of nodes containing "targets"
2132 NodeId clique_of_set;
2133 try {
2134 clique_of_set = _joint_target_to_clique_[set];
2135 } catch (NotFound const&) {
2136 // here, the precise set of targets does not belong to the set of targets
2137 // defined by the user. So we will try to find a clique in the junction
2138 // tree that contains "targets":
2139
2140 // 1/ we should check that all the nodes belong to the join tree
2141 for (const auto node: targets) {
2142 if (!_graph_.exists(node)) {
2144 "The variable " << this->BN().variable(node).name() << "(" << node
2145 << ") does not belong to this optimized inference.")
2146 }
2147 }
2148
2149 // 2/ the clique created by the first eliminated node among target is the
2150 // one we are looking for
2151 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
2152
2153 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
2154 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
2155 elim_order.insert(JT_elim_order[i], (int)i);
2156 NodeId first_eliminated_node = *(targets.begin());
2157 int elim_number = elim_order[first_eliminated_node];
2158 for (const auto node: targets) {
2159 if (elim_order[node] < elim_number) {
2160 elim_number = elim_order[node];
2161 first_eliminated_node = node;
2162 }
2163 }
2164
2165 clique_of_set = _triangulation_->createdJunctionTreeClique(first_eliminated_node);
2166
2167
2168 // 3/ check that clique_of_set contains the all the nodes in the target
2169 const NodeSet& clique_nodes = _JT_->clique(clique_of_set);
2170 for (const auto node: targets) {
2171 if (!clique_nodes.contains(node)) {
2173 this->BN().names(set) << "(" << set << ")"
2174 << " is not addressable in this optimized inference.")
2175 }
2176 }
2177
2178 // add the discovered clique to _joint_target_to_clique_
2179 _joint_target_to_clique_.insert(set, clique_of_set);
2180 }
2181
2182 // now perform a collect on the clique
2183 _collectMessage_(schedule, clique_of_set, clique_of_set);
2184
2185 // now we just need to create the product of the tensors of the clique
2186 // containing set with the messages received by this clique and
2187 // marginalize out all variables except set
2188 _ScheduleMultiDimSet_ pot_list = _clique_tensors_[clique_of_set];
2189
2190 // add the messages sent by adjacent nodes to targetClique
2191 for (const auto other: _JT_->neighbours(clique_of_set))
2192 pot_list += _separator_tensors_[Arc(other, clique_of_set)];
2193
2194 // get the set of variables that need be removed from the tensors
2195 const NodeSet& nodes = _JT_->clique(clique_of_set);
2196 gum::VariableSet del_vars(nodes.size());
2197 gum::VariableSet kept_vars(targets.size());
2198 const auto& bn = this->BN();
2199 for (const auto node: nodes) {
2200 if (!targets.contains(node)) {
2201 del_vars.insert(&(bn.variable(node)));
2202 } else {
2203 kept_vars.insert(&(bn.variable(node)));
2204 }
2205 }
2206
2207 // pot_list now contains all the tensors to multiply and marginalize
2208 // => combine the messages
2209 _ScheduleMultiDimSet_ new_pot_list = _marginalizeOut_(schedule, pot_list, del_vars, kept_vars);
2210 ScheduleMultiDim< Tensor< GUM_SCALAR > >* resulting_pot = nullptr;
2211 Tensor< GUM_SCALAR >* joint = nullptr;
2212
2213 if (new_pot_list.size() == 0) {
2214 joint = new Tensor< GUM_SCALAR >();
2215 for (const auto var: kept_vars)
2216 *joint << *var;
2217 } else {
2218 if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
2219 scheduler.execute(schedule);
2220 resulting_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2221 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(*new_pot_list.begin()));
2222 } else {
2223 // combine all the tensors in new_pot_list with all the hard evidence
2224 // of the nodes in set
2225 for (const auto node: hard_ev_nodes) {
2226 auto new_pot_ev = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[node], false);
2227 new_pot_list.insert(new_pot_ev);
2228 }
2229 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2230 const auto pot = fast_combination.schedule(schedule, new_pot_list);
2231 scheduler.execute(schedule);
2232 resulting_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2233 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot));
2234 }
2235
2236 // if pot already existed, create a copy, so that we can put it into
2237 // the _target_posteriors_ property
2238 if (pot_list.exists(resulting_pot)) {
2239 joint = new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2240 } else {
2241 joint = resulting_pot->exportMultiDim();
2242 }
2243 }
2244
2245
2246 // check that the joint posterior is different from a 0 vector: this would
2247 // indicate that some hard evidence are not compatible
2248 bool nonzero_found = false;
2249 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2250 if ((*joint)[inst]) {
2251 nonzero_found = true;
2252 break;
2253 }
2254 }
2255 if (!nonzero_found) {
2256 // remove joint from memory to avoid memory leaks
2257 delete joint;
2259 "some evidence entered into the Bayes "
2260 "net are incompatible (their joint proba = 0)");
2261 }
2262
2263 return joint;
2264 }
2265
2266 // returns the marginal a posteriori proba of a given node
2267 template < typename GUM_SCALAR >
2268 Tensor< GUM_SCALAR >*
2269 LazyPropagation< GUM_SCALAR >::_unnormalizedJointPosterior_(const NodeSet& set) {
2270 // hard evidence do not belong to the join tree, so extract the nodes
2271 // from targets that are not hard evidence
2272 NodeSet targets = set;
2273 NodeSet hard_ev_nodes;
2274 for (const auto node: this->hardEvidenceNodes()) {
2275 if (targets.contains(node)) {
2276 targets.erase(node);
2277 hard_ev_nodes.insert(node);
2278 }
2279 }
2280
2281 // if all the nodes have received hard evidence, then compute the
2282 // joint posterior directly by multiplying the hard evidence tensors
2283 const auto& evidence = this->evidence();
2284 if (targets.empty()) {
2285 if (set.size() == 1) {
2286 return new Tensor< GUM_SCALAR >(*evidence[*set.begin()]);
2287 } else {
2288 _TensorSet_ pot_list;
2289 for (const auto node: set) {
2290 pot_list.insert(evidence[node]);
2291 }
2292
2293 // combine all the tensors of the nodes in set
2294 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2295 const Tensor< GUM_SCALAR >* pot = fast_combination.execute(pot_list);
2296
2297 return const_cast< Tensor< GUM_SCALAR >* >(pot);
2298 }
2299 }
2300
2301
2302 // if we still need to perform some inference task, do it: so, first,
2303 // determine the clique on which we should perform collect to compute
2304 // the unnormalized joint posterior of a set of nodes containing "targets"
2305 NodeId clique_of_set;
2306 try {
2307 clique_of_set = _joint_target_to_clique_[set];
2308 } catch (NotFound const&) {
2309 // here, the precise set of targets does not belong to the set of targets
2310 // defined by the user. So we will try to find a clique in the junction
2311 // tree that contains "targets":
2312
2313 // 1/ we should check that all the nodes belong to the join tree
2314 for (const auto node: targets) {
2315 if (!_graph_.exists(node)) {
2317 "The variable " << this->BN().variable(node).name() << "(" << node
2318 << ") does not belong to this optimized inference.")
2319 }
2320 }
2321
2322 // 2/ the clique created by the first eliminated node among target is the
2323 // one we are looking for
2324 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
2325
2326 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
2327 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
2328 elim_order.insert(JT_elim_order[i], (int)i);
2329 NodeId first_eliminated_node = *(targets.begin());
2330 int elim_number = elim_order[first_eliminated_node];
2331 for (const auto node: targets) {
2332 if (elim_order[node] < elim_number) {
2333 elim_number = elim_order[node];
2334 first_eliminated_node = node;
2335 }
2336 }
2337
2338 clique_of_set = _triangulation_->createdJunctionTreeClique(first_eliminated_node);
2339
2340
2341 // 3/ check that clique_of_set contains the all the nodes in the target
2342 const NodeSet& clique_nodes = _JT_->clique(clique_of_set);
2343 for (const auto node: targets) {
2344 if (!clique_nodes.contains(node)) {
2346 this->BN().names(set) << "(" << set << ")"
2347 << " is not addressable in this optimized inference.")
2348 }
2349 }
2350
2351 // add the discovered clique to _joint_target_to_clique_
2352 _joint_target_to_clique_.insert(set, clique_of_set);
2353 }
2354
2355 // now perform a collect on the clique
2356 _collectMessage_(clique_of_set, clique_of_set);
2357
2358 // now we just need to create the product of the tensors of the clique
2359 // containing set with the messages received by this clique and
2360 // marginalize out all variables except set
2361 _ScheduleMultiDimSet_ pot_list = _clique_tensors_[clique_of_set];
2362
2363 // add the messages sent by adjacent nodes to targetClique
2364 for (const auto other: _JT_->neighbours(clique_of_set))
2365 pot_list += _separator_tensors_[Arc(other, clique_of_set)];
2366
2367 // get the set of variables that need be removed from the tensors
2368 const NodeSet& nodes = _JT_->clique(clique_of_set);
2369 gum::VariableSet del_vars(nodes.size());
2370 gum::VariableSet kept_vars(targets.size());
2371 const auto& bn = this->BN();
2372 for (const auto node: nodes) {
2373 if (!targets.contains(node)) {
2374 del_vars.insert(&(bn.variable(node)));
2375 } else {
2376 kept_vars.insert(&(bn.variable(node)));
2377 }
2378 }
2379
2380 // pot_list now contains all the tensors to multiply and marginalize
2381 // => combine the messages
2382 _ScheduleMultiDimSet_ new_pot_list = _marginalizeOut_(pot_list, del_vars, kept_vars);
2383 Tensor< GUM_SCALAR >* joint = nullptr;
2384 if (new_pot_list.empty()) {
2385 joint = new Tensor< GUM_SCALAR >();
2386 for (const auto var: kept_vars)
2387 *joint << *var;
2388 } else {
2389 if ((new_pot_list.size() == 1) && hard_ev_nodes.empty()) {
2390 auto sched_joint = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2391 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(*new_pot_list.begin()));
2392
2393 // if pot already existed, create a copy, so that we can put it into
2394 // the _target_posteriors_ property
2395 if (pot_list.exists(sched_joint)) {
2396 joint = new Tensor< GUM_SCALAR >(sched_joint->multiDim());
2397 } else {
2398 joint = sched_joint->exportMultiDim();
2399
2400 // remove the joint from new_pot_list so that it will not be
2401 // removed just after the else block
2402 delete sched_joint;
2403 new_pot_list.clear();
2404 }
2405 } else {
2406 // combine all the tensors in new_pot_list with all the hard evidence
2407 // of the nodes in set
2408 _TensorSet_ xnew_pot_list(new_pot_list.size());
2409 for (auto xpot: new_pot_list) {
2410 xnew_pot_list.insert(
2411 &(static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(xpot)->multiDim()));
2412 }
2413 for (const auto node: hard_ev_nodes) {
2414 xnew_pot_list.insert(evidence[node]);
2415 }
2416 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2417 joint = fast_combination.execute(xnew_pot_list);
2418 }
2419 }
2420
2421 // remove the tensors that were created in new_pot_list
2422 // note that, if joint was the result of _marginalize_out_, it has been copied,
2423 // so we can remove safely all the elements of new_pot_list
2424 for (const auto pot: new_pot_list)
2425 if (!pot_list.exists(pot)) delete pot;
2426
2427 // check that the joint posterior is different from a 0 vector: this would
2428 // indicate that some hard evidence are not compatible
2429 bool nonzero_found = false;
2430 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2431 if ((*joint)[inst]) {
2432 nonzero_found = true;
2433 break;
2434 }
2435 }
2436 if (!nonzero_found) {
2437 // remove joint from memory to avoid memory leaks
2438 delete joint;
2440 "some evidence entered into the Bayes "
2441 "net are incompatible (their joint proba = 0)");
2442 }
2443
2444 return joint;
2445 }
2446
2448 template < typename GUM_SCALAR >
2449 const Tensor< GUM_SCALAR >& LazyPropagation< GUM_SCALAR >::jointPosterior_(const NodeSet& set) {
2450 // check if we have already computed the posterior
2451 if (_joint_target_posteriors_.exists(set)) { return *(_joint_target_posteriors_[set]); }
2452
2453 // compute the joint posterior and normalize
2454 auto joint = unnormalizedJointPosterior_(set);
2455 joint->normalize();
2456 _joint_target_posteriors_.insert(set, joint);
2457
2458 return *joint;
2459 }
2460
2462 template < typename GUM_SCALAR >
2463 const Tensor< GUM_SCALAR >&
2464 LazyPropagation< GUM_SCALAR >::jointPosterior_(const NodeSet& wanted_target,
2465 const NodeSet& declared_target) {
2466 // check if we have already computed the posterior of wanted_target
2467 if (_joint_target_posteriors_.exists(wanted_target))
2468 return *(_joint_target_posteriors_[wanted_target]);
2469
2470 // here, we will have to compute the posterior of declared_target and
2471 // marginalize out all the variables that do not belong to wanted_target
2472
2473 // check if we have already computed the posterior of declared_target
2474 if (!_joint_target_posteriors_.exists(declared_target)) { jointPosterior_(declared_target); }
2475
2476 // marginalize out all the variables that do not belong to wanted_target
2477 const auto& bn = this->BN();
2478 gum::VariableSet del_vars;
2479 for (const auto node: declared_target)
2480 if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
2481 auto pot
2482 = new Tensor< GUM_SCALAR >(_joint_target_posteriors_[declared_target]->sumOut(del_vars));
2483
2484 // save the result into the cache
2485 _joint_target_posteriors_.insert(wanted_target, pot);
2486
2487 return *pot;
2488 }
2489
2490 template < typename GUM_SCALAR >
2491 GUM_SCALAR LazyPropagation< GUM_SCALAR >::evidenceProbability() {
2492 // here, we should check that _find_relevant_tensor_type_ is equal to
2493 // FIND_ALL. Otherwise, the computations could be wrong.
2494 RelevantTensorsFinderType old_relevant_type = _find_relevant_tensor_type_;
2495
2496 // if the relevant tensors finder is not equal to FIND_ALL, all the
2497 // current computations may lead to incorrect results, so we shall
2498 // discard them
2499 if (old_relevant_type != RelevantTensorsFinderType::FIND_ALL) {
2500 _find_relevant_tensor_type_ = RelevantTensorsFinderType::FIND_ALL;
2501 _is_new_jt_needed_ = true;
2502 this->setOutdatedStructureState_();
2503 }
2504
2505 // perform inference in each connected component
2506 this->makeInference();
2507
2508 // for each connected component, select a variable X and compute the
2509 // joint probability of X and evidence e. Then marginalize-out X to get
2510 // p(e) in this connected component. Finally, multiply all the p(e) that
2511 // we got and the elements in _constants_. The result is the probability
2512 // of evidence
2513
2514 GUM_SCALAR prob_ev = 1;
2515 for (const auto root: _roots_) {
2516 // get a node in the clique
2517 NodeId node = *(_JT_->clique(root).begin());
2518 Tensor< GUM_SCALAR >* tmp = unnormalizedJointPosterior_(node);
2519 prob_ev *= tmp->sum();
2520 delete tmp;
2521 }
2522
2523 for (const auto& projected_cpt: _constants_)
2524 prob_ev *= projected_cpt.second;
2525
2526 // put back the relevant tensor type selected by the user
2527 _find_relevant_tensor_type_ = old_relevant_type;
2528
2529 return prob_ev;
2530 }
2531
2532 template < typename GUM_SCALAR >
2533 Instantiation LazyPropagation< GUM_SCALAR >::mpe() {
2534 // here, we should check that _find_relevant_tensor_type_ is equal to
2535 // FIND_ALL. Otherwise, the computations could be wrong.
2536 RelevantTensorsFinderType old_relevant_type = _find_relevant_tensor_type_;
2537
2538 // if the relevant tensors finder is not equal to FIND_ALL, all the
2539 // current computations may lead to incorrect results, so we shall
2540 // discard them
2541 if (old_relevant_type != RelevantTensorsFinderType::FIND_ALL) {
2542 _find_relevant_tensor_type_ = RelevantTensorsFinderType::FIND_ALL;
2543 _is_new_jt_needed_ = true;
2544 this->setOutdatedStructureState_();
2545 }
2546
2547 // here, we should enforce that the projections are max operators
2548 auto old_projection_op = _projection_op_;
2549 auto new_projection_op = LPMaxprojTensor< GUM_SCALAR >;
2550 bool projection_op_changed = old_projection_op != new_projection_op;
2551 if (projection_op_changed) { this->_setProjectionFunction_(new_projection_op); }
2552
2553 // make all nodes as targets and remove all the target sets
2554 const auto in_target_mode = this->isInTargetMode();
2555 NodeSet old_targets;
2556 Set< NodeSet > old_joint_targets;
2557 if (in_target_mode) {
2558 old_targets = this->targets();
2559 old_joint_targets = this->jointTargets();
2560 this->eraseAllTargets();
2561 }
2562
2563 // perform inference in each connected component
2564 this->makeInference();
2565
2566 // keep track of the hard evidence
2567 Instantiation instantiations;
2568 for (const auto& ev: this->hardEvidence()) {
2569 const auto& variable = this->BN().variable(ev.first);
2570 instantiations.add(variable);
2571 instantiations.chgVal(variable, ev.second);
2572 }
2573
2574 // for each clique, get its argmax
2575 NodeProperty< bool > clique2marked = _JT_->nodesPropertyFromVal(false);
2576 std::function< void(NodeId, NodeId) > diffuse_marks =
2577 [&clique2marked, &diffuse_marks, &instantiations, this](NodeId clique, NodeId clique_from) {
2578 clique2marked[clique] = true;
2579
2580 // compute the joint of the clique tensor and the messages that
2581 // were sent by all its neighbors. Then extract only the variables
2582 // that have no value yet
2583 auto clique_nodes = _JT_->clique(clique);
2584 auto pot = unnormalizedJointPosterior_(clique_nodes);
2585 auto pot_argmax = pot->extract(instantiations).argmax();
2586 delete pot;
2587 const auto& new_instantiation = *(pot_argmax.first.begin());
2588
2589 // update the instantiation of the MPE variables
2590 for (const auto node: clique_nodes) {
2591 const auto& variable = this->BN().variable(node);
2592 if (!instantiations.contains(variable)) {
2593 instantiations.add(variable);
2594 instantiations.chgVal(variable, new_instantiation.val(variable));
2595 }
2596 }
2597
2598 // go on with the diffusion on this connected component
2599 for (const auto neigh: _JT_->neighbours(clique))
2600 if ((neigh != clique_from) && !clique2marked[neigh]) diffuse_marks(neigh, clique);
2601 };
2602
2603 // here we compute the values of the variables corresponding to MPE on every
2604 // connected component
2605 for (const auto& cliqueProp: clique2marked) {
2606 const auto clique = cliqueProp.first;
2607 if (!clique2marked[clique]) diffuse_marks(clique, clique);
2608 }
2609
2610 // put back the relevant tensor type selected by the user as well as the
2611 // projection operator and the targets selected by the user
2612 _find_relevant_tensor_type_ = old_relevant_type;
2613
2614 // if we changed the projection operator, put back the old one
2615 if (projection_op_changed) { this->_setProjectionFunction_(old_projection_op); }
2616
2617 // if, prior to the MPE request, we had targets, get them back
2618 if (in_target_mode) {
2619 for (const auto node: old_targets) {
2620 this->addTarget(node);
2621 }
2622 for (const auto& set: old_joint_targets) {
2623 this->addJointTarget(set);
2624 }
2625 }
2626
2627 // return the MPE instantiation as well as its probability
2628 return instantiations;
2629 }
2630
2631 template < typename GUM_SCALAR >
2632 std::pair< Instantiation, GUM_SCALAR > LazyPropagation< GUM_SCALAR >::mpeLog2Posterior() {
2633 // get the instantiation of the variables w.r.t. MPE
2634 const auto instantiation = mpe();
2635
2636 // here, we have the instantiation of all the variables. Now, we have to
2637 // compute the posterior probability of this instantiation. To do so,
2638 // we will extract from the CPTs the probas of the nodes, get their log2
2639 // and add them (in order to get the log2 of the joint). We do this
2640 // for all the nodes except those that received soft evidence: for them,
2641 // we need to multiply their conditional probability given the values of
2642 // their parents by their evidence vector.
2643 auto proba = (GUM_SCALAR)0.0;
2644 auto node_proba = (GUM_SCALAR)0.0;
2645
2646 for (const auto node: this->BN().dag()) {
2647 const auto& cpt = this->BN().cpt(node);
2648 if (!this->hasSoftEvidence(node)) {
2649 node_proba = cpt[instantiation];
2650 } else {
2651 const auto& ev = *(this->evidence()[node]);
2652 node_proba = cpt[instantiation] * ev[instantiation];
2653 }
2654
2655 if (node_proba == (GUM_SCALAR)0)
2656 return {instantiation, std::numeric_limits< GUM_SCALAR >::lowest()};
2657 proba += (GUM_SCALAR)std::log2(node_proba);
2658 }
2659
2660 if (!this->hasEvidence()) return {instantiation, proba};
2661 else return {instantiation, proba - std::log2(this->evidenceProbability())};
2662 }
2663
2664} /* namespace gum */
2665
2666#endif // DOXYGEN_SHOULD_SKIP_THIS
The BayesBall algorithm (as described by Schachter).
Detect barren nodes for inference in Bayesian networks.
An algorithm for converting a join tree into a binary join tree.
Exception : a similar element already exists.
<agrum/BN/inference/evidenceInference.h>
Exception : fatal (unknown ?) error.
Class representing the minimal interface for Bayesian network with no numerical data.
Definition IBayesNet.h:75
Exception : several evidence are incompatible together (proba=0).
Exception: at least one argument passed to a function is not what was expected.
<agrum/BN/inference/jointTargetedInference.h>
LazyPropagation(const IBayesNet< GUM_SCALAR > *BN, RelevantTensorsFinderType=RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS, FindBarrenNodesType=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
Exception : the element we looked for cannot be found.
Size size() const noexcept
Returns the number of elements in the set.
Definition set_tpl.h:636
iterator_safe beginSafe() const
The usual safe begin iterator to parse the set.
Definition set_tpl.h:414
const iterator_safe & endSafe() const noexcept
The usual safe end iterator to parse the set.
Definition set_tpl.h:426
bool exists(const Key &k) const
Indicates whether a given elements belong to the set.
Definition set_tpl.h:533
void insert(const Key &k)
Inserts a new element into the set.
Definition set_tpl.h:539
void erase(const Key &k)
Erases an element from the set.
Definition set_tpl.h:582
Exception : a looked-for element could not be found.
d-separation analysis (as described in Koller & Friedman 2009)
#define GUM_ERROR(type, msg)
Definition exceptions.h:72
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
Header files of gum::Instantiation.
gum is the global namespace for all aGrUM entities
Definition agrum.h:46
FindBarrenNodesType
type of algorithm to determine barren nodes
Set< const DiscreteVariable * > VariableSet
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
RelevantTensorsFinderType
type of algorithm for determining the relevant tensors for combinations using some d-separation analy...