aGrUM 2.3.2
a C++ library for (probabilistic) graphical models
ShaferShenoyInference_tpl.h
Go to the documentation of this file.
1/****************************************************************************
2 * This file is part of the aGrUM/pyAgrum library. *
3 * *
4 * Copyright (c) 2005-2025 by *
5 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
6 * - Christophe GONZALES(_at_AMU) *
7 * *
8 * The aGrUM/pyAgrum library is free software; you can redistribute it *
9 * and/or modify it under the terms of either : *
10 * *
11 * - the GNU Lesser General Public License as published by *
12 * the Free Software Foundation, either version 3 of the License, *
13 * or (at your option) any later version, *
14 * - the MIT license (MIT), *
15 * - or both in dual license, as here. *
16 * *
17 * (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) *
18 * *
19 * This aGrUM/pyAgrum library is distributed in the hope that it will be *
20 * useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
21 * INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS *
22 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR *
26 * OTHER DEALINGS IN THE SOFTWARE. *
27 * *
28 * See LICENCES for more details. *
29 * *
30 * SPDX-FileCopyrightText: Copyright 2005-2025 *
31 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
32 * - Christophe GONZALES(_at_AMU) *
33 * SPDX-License-Identifier: LGPL-3.0-or-later OR MIT *
34 * *
35 * Contact : info_at_agrum_dot_org *
36 * homepage : http://agrum.gitlab.io *
37 * gitlab : https://gitlab.com/agrumery/agrum *
38 * *
39 ****************************************************************************/
40#pragma once
41
42
50
51#ifndef DOXYGEN_SHOULD_SKIP_THIS
52# include <algorithm>
53
61
62namespace gum {
63 // default constructor
64 template < typename GUM_SCALAR >
67 RelevantTensorsFinderType relevant_type,
68 FindBarrenNodesType barren_type,
69 bool use_binary_join_tree) :
70 JointTargetedInference< GUM_SCALAR >(BN), EvidenceInference< GUM_SCALAR >(BN),
71 _use_binary_join_tree_(use_binary_join_tree) {
72 // sets the relevant tensor and the barren nodes finding algorithm
73 _findRelevantTensors_
74 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_;
75 setRelevantTensorsFinderType(relevant_type);
76 setFindBarrenNodesType(barren_type);
77
78 // create a default triangulation (the user can change it afterwards)
79 _triangulation_ = new DefaultTriangulation;
80
81 // for debugging purposes
82 GUM_CONSTRUCTOR(ShaferShenoyInference);
83 }
84
85 // destructor
86 template < typename GUM_SCALAR >
87 INLINE ShaferShenoyInference< GUM_SCALAR >::~ShaferShenoyInference() {
88 // remove all the tensors created during the last message passing
89 for (const auto& pot: _arc_to_created_tensors_)
90 delete pot.second;
91
92 // remove all the tensors in _clique_ss_tensor_ that do not belong
93 // to _clique_tensors_ : in this case, those tensors have been
94 // created by combination of the corresponding list of tensors in
95 // _clique_tensors_. In other words, the size of this list is strictly
96 // greater than 1.
97 for (auto pot: _clique_ss_tensor_) {
98 if (_clique_tensors_[pot.first].size() > 1) delete pot.second;
99 }
100
101 for (auto potset: _clique_tensors_) {
102 for (auto pot: potset.second)
103 delete pot;
104 }
105
106 // remove all the posteriors computed
107 for (const auto& pot: _target_posteriors_)
108 delete pot.second;
109 for (const auto& pot: _joint_target_posteriors_)
110 delete pot.second;
111
112 // remove the junction tree and the triangulation algorithm
113 if (_JT_ != nullptr) delete _JT_;
114 if (_junctionTree_ != nullptr) delete _junctionTree_;
115 delete _triangulation_;
116
117 // for debugging purposes
118 GUM_DESTRUCTOR(ShaferShenoyInference);
119 }
120
122 template < typename GUM_SCALAR >
123 void ShaferShenoyInference< GUM_SCALAR >::setTriangulation(
124 const Triangulation& new_triangulation) {
125 delete _triangulation_;
126 _triangulation_ = new_triangulation.newFactory();
127 _is_new_jt_needed_ = true;
128 this->setOutdatedStructureState_();
129 }
130
132 template < typename GUM_SCALAR >
133 INLINE const JoinTree* ShaferShenoyInference< GUM_SCALAR >::joinTree() {
134 if (_is_new_jt_needed_) _createNewJT_();
135
136 return _JT_;
137 }
138
140 template < typename GUM_SCALAR >
141 INLINE const JunctionTree* ShaferShenoyInference< GUM_SCALAR >::junctionTree() {
142 if (_is_new_jt_needed_) _createNewJT_();
143
144 return _junctionTree_;
145 }
146
148 template < typename GUM_SCALAR >
149 void ShaferShenoyInference< GUM_SCALAR >::setRelevantTensorsFinderType(
150 RelevantTensorsFinderType type) {
151 if (type != _find_relevant_tensor_type_) {
152 switch (type) {
153 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
154 _findRelevantTensors_
155 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_;
156 break;
157
158 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES :
159 _findRelevantTensors_
160 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation_;
161 break;
162
163 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 :
164 _findRelevantTensors_
165 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation3_;
166 break;
167
168 case RelevantTensorsFinderType::FIND_ALL :
169 _findRelevantTensors_ = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsGetAll_;
170 break;
171
172 default :
174 "setRelevantTensorsFinderType for type " << (unsigned int)type
175 << " is not implemented yet");
176 }
177
178 _find_relevant_tensor_type_ = type;
179
180 // indicate that all messages need be reconstructed to take into account
181 // the change in d-separation analysis
182 _invalidateAllMessages_();
183 }
184 }
185
187 template < typename GUM_SCALAR >
188 INLINE void ShaferShenoyInference< GUM_SCALAR >::_setProjectionFunction_(
189 Tensor< GUM_SCALAR > (*proj)(const Tensor< GUM_SCALAR >&, const gum::VariableSet&)) {
190 _projection_op_ = proj;
191
192 // indicate that all messages need be reconstructed to take into account
193 // the change in of the projection operator
194 _invalidateAllMessages_();
195 }
196
198 template < typename GUM_SCALAR >
199 INLINE void ShaferShenoyInference< GUM_SCALAR >::_setCombinationFunction_(
200 Tensor< GUM_SCALAR > (*comb)(const Tensor< GUM_SCALAR >&, const Tensor< GUM_SCALAR >&)) {
201 _combination_op_ = comb;
202
203 // indicate that all messages need be reconstructed to take into account
204 // the change of the combination operator
205 _invalidateAllMessages_();
206 }
207
209 template < typename GUM_SCALAR >
210 void ShaferShenoyInference< GUM_SCALAR >::_invalidateAllMessages_() {
211 // remove all the messages computed
212 for (auto& pot: _separator_tensors_)
213 pot.second = nullptr;
214
215 for (auto& mess_computed: _messages_computed_)
216 mess_computed.second = false;
217
218 // remove all the created tensors kept on the arcs
219 for (const auto& pot: _arc_to_created_tensors_)
220 if (pot.second != nullptr) delete pot.second;
221 _arc_to_created_tensors_.clear();
222
223 // remove all the posteriors
224 for (const auto& pot: _target_posteriors_)
225 delete pot.second;
226 _target_posteriors_.clear();
227 for (const auto& pot: _joint_target_posteriors_)
228 delete pot.second;
229 _joint_target_posteriors_.clear();
230
231 // indicate that new messages need be computed
232 if (this->isInferenceReady() || this->isInferenceDone()) this->setOutdatedTensorsState_();
233 }
234
236 template < typename GUM_SCALAR >
237 void ShaferShenoyInference< GUM_SCALAR >::setFindBarrenNodesType(FindBarrenNodesType type) {
238 if (type != _barren_nodes_type_) {
239 // WARNING: if a new type is added here, method _createJT_ should certainly
240 // be updated as well, in particular its step 2.
241 switch (type) {
242 case FindBarrenNodesType::FIND_BARREN_NODES :
243 case FindBarrenNodesType::FIND_NO_BARREN_NODES : break;
244
245 default :
247 "setFindBarrenNodesType for type " << (unsigned int)type
248 << " is not implemented yet");
249 }
250
251 _barren_nodes_type_ = type;
252
253 // tensorly, we may need to reconstruct a junction tree
254 this->setOutdatedStructureState_();
255 }
256 }
257
259 template < typename GUM_SCALAR >
260 INLINE void ShaferShenoyInference< GUM_SCALAR >::onEvidenceAdded_(const NodeId id,
261 bool isHardEvidence) {
262 // if we have a new hard evidence, this modifies the undigraph over which
263 // the join tree is created. This is also the case if id is not a node of
264 // of the undigraph
265 if (isHardEvidence || !_graph_.exists(id)) _is_new_jt_needed_ = true;
266 else {
267 try {
268 _evidence_changes_.insert(id, EvidenceChangeType::EVIDENCE_ADDED);
269 } catch (DuplicateElement const&) {
270 // here, the evidence change already existed. This necessarily means
271 // that the current saved change is an EVIDENCE_ERASED. So if we
272 // erased the evidence and added some again, this corresponds to an
273 // EVIDENCE_MODIFIED
274 _evidence_changes_[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
275 }
276 }
277 }
278
280 template < typename GUM_SCALAR >
281 INLINE void ShaferShenoyInference< GUM_SCALAR >::onEvidenceErased_(const NodeId id,
282 bool isHardEvidence) {
283 // if we delete a hard evidence, this modifies the undigraph over which
284 // the join tree is created.
285 if (isHardEvidence) _is_new_jt_needed_ = true;
286 else {
287 try {
288 _evidence_changes_.insert(id, EvidenceChangeType::EVIDENCE_ERASED);
289 } catch (DuplicateElement const&) {
290 // here, the evidence change already existed and it is necessarily an
291 // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
292 // been added and is now erased, this is similar to not having created
293 // it. If the evidence was only modified, it already existed in the
294 // last inference and we should now indicate that it has been removed.
295 if (_evidence_changes_[id] == EvidenceChangeType::EVIDENCE_ADDED)
296 _evidence_changes_.erase(id);
297 else _evidence_changes_[id] = EvidenceChangeType::EVIDENCE_ERASED;
298 }
299 }
300 }
301
303 template < typename GUM_SCALAR >
304 void ShaferShenoyInference< GUM_SCALAR >::onAllEvidenceErased_(bool has_hard_evidence) {
305 if (has_hard_evidence || !this->hardEvidenceNodes().empty()) _is_new_jt_needed_ = true;
306 else {
307 for (const auto node: this->softEvidenceNodes()) {
308 try {
309 _evidence_changes_.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
310 } catch (DuplicateElement const&) {
311 // here, the evidence change already existed and it is necessarily an
312 // EVIDENCE_ADDED or an EVIDENCE_MODIFIED. So, if the evidence has
313 // been added and is now erased, this is similar to not having created
314 // it. If the evidence was only modified, it already existed in the
315 // last inference and we should now indicate that it has been removed.
316 if (_evidence_changes_[node] == EvidenceChangeType::EVIDENCE_ADDED)
317 _evidence_changes_.erase(node);
318 else _evidence_changes_[node] = EvidenceChangeType::EVIDENCE_ERASED;
319 }
320 }
321 }
322 }
323
325 template < typename GUM_SCALAR >
326 INLINE void ShaferShenoyInference< GUM_SCALAR >::onEvidenceChanged_(const NodeId id,
327 bool hasChangedSoftHard) {
328 if (hasChangedSoftHard) _is_new_jt_needed_ = true;
329 else {
330 try {
331 _evidence_changes_.insert(id, EvidenceChangeType::EVIDENCE_MODIFIED);
332 } catch (DuplicateElement const&) {
333 // here, the evidence change already existed and it is necessarily an
334 // EVIDENCE_ADDED. So we should keep this state to indicate that this
335 // evidence is new w.r.t. the last inference
336 }
337 }
338 }
339
341 template < typename GUM_SCALAR >
342 INLINE void ShaferShenoyInference< GUM_SCALAR >::onModelChanged_(const GraphicalModel* bn) {}
343
345 template < typename GUM_SCALAR >
346 INLINE void ShaferShenoyInference< GUM_SCALAR >::onMarginalTargetAdded_(const NodeId id) {
347 // if the graph does not contain the node, either this is due to the fact that
348 // the node has received a hard evidence or because it was d-separated from the
349 // target nodes during the last inference. In the latter case, we should change
350 // the graph, hence we should recompute the JT
351 if (!_graph_.exists(id) && !_hard_ev_nodes_.contains(id)) { _is_new_jt_needed_ = true; }
352 }
353
355 template < typename GUM_SCALAR >
356 INLINE void ShaferShenoyInference< GUM_SCALAR >::onMarginalTargetErased_(const NodeId id) {}
357
359 template < typename GUM_SCALAR >
360 INLINE void ShaferShenoyInference< GUM_SCALAR >::onJointTargetAdded_(const NodeSet& set) {
361 // if there is no current joint tree, obviously, we need one.
362 if (_JT_ == nullptr) {
363 _is_new_jt_needed_ = true;
364 return;
365 }
366
367 // here, we will remove from set the nodes that received hard evidence and try
368 // to find a clique in the current _JT_ that can contain the resulting set. If
369 // we cannot find one, we must recompute the _JT_
370 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
371 int elim_number = std::numeric_limits< int >::max();
372 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
373 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
374 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
375 elim_order.insert(JT_elim_order[i], (int)i);
376 NodeSet unobserved_set(set.size());
377 for (const auto node: set) {
378 if (!_graph_.exists(node)) {
379 if (!_hard_ev_nodes_.contains(node)) {
380 _is_new_jt_needed_ = true;
381 return;
382 }
383 } else {
384 unobserved_set.insert(node);
385 if (elim_order[node] < elim_number) {
386 elim_number = elim_order[node];
387 first_eliminated_node = node;
388 }
389 }
390 }
391
392 if (!unobserved_set.empty()) {
393 // here, first_eliminated_node contains the first var (node or one of its
394 // parents) eliminated => the clique created during its elimination
395 // should contain all the nodes in unobserved_set
396 const auto clique_id = _node_to_clique_[first_eliminated_node];
397 const auto& clique = _JT_->clique(clique_id);
398 for (const auto node: unobserved_set) {
399 if (!clique.contains(node)) {
400 _is_new_jt_needed_ = true;
401 return;
402 }
403 }
404 }
405 }
406
408 template < typename GUM_SCALAR >
409 INLINE void ShaferShenoyInference< GUM_SCALAR >::onJointTargetErased_(const NodeSet& set) {}
410
412 template < typename GUM_SCALAR >
413 INLINE void ShaferShenoyInference< GUM_SCALAR >::onAllMarginalTargetsAdded_() {
414 for (const auto node: this->BN().dag()) {
415 // if the graph does not contain the node, either this is due to the fact
416 // that the node has received a hard evidence or because it was d-separated
417 // from the target nodes during the last inference. In the latter case, we
418 // should change the graph, hence we should recompute the JT
419 if (!_graph_.exists(node) && !_hard_ev_nodes_.contains(node)) {
420 _is_new_jt_needed_ = true;
421 return;
422 }
423 }
424 }
425
427 template < typename GUM_SCALAR >
428 INLINE void ShaferShenoyInference< GUM_SCALAR >::onAllMarginalTargetsErased_() {}
429
431 template < typename GUM_SCALAR >
432 INLINE void ShaferShenoyInference< GUM_SCALAR >::onAllJointTargetsErased_() {}
433
435 template < typename GUM_SCALAR >
436 INLINE void ShaferShenoyInference< GUM_SCALAR >::onAllTargetsErased_() {}
437
438 // check whether a new junction tree is really needed for the next inference
439 template < typename GUM_SCALAR >
440 bool ShaferShenoyInference< GUM_SCALAR >::_isNewJTNeeded_() const {
441 // if we do not have a JT or if _new_jt_needed_ is set to true, then
442 // we know that we need to create a new join tree
443 if ((_JT_ == nullptr) || _is_new_jt_needed_) return true;
444
445 // if some targets do not belong to the join tree and, consequently, to the
446 // undirected graph that was used to construct the join tree, then we need
447 // to create a new JT. This situation may occur if we constructed the
448 // join tree after pruning irrelevant/barren nodes from the BN.
449 // However, note that the nodes that received hard evidence do not belong to
450 // the graph and, therefore, should not be taken into account
451 const auto& hard_ev_nodes = this->hardEvidenceNodes();
452 for (const auto node: this->targets()) {
453 if (!_graph_.exists(node) && !hard_ev_nodes.exists(node)) return true;
454 }
455
456 // now, do the same for the joint targets
457 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
458 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
459 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
460 elim_order.insert(JT_elim_order[i], (int)i);
461 NodeSet unobserved_set;
462
463 for (const auto& joint_target: this->jointTargets()) {
464 // here, we need to check that at least one clique contains all the
465 // nodes of the joint target.
466 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
467 int elim_number = std::numeric_limits< int >::max();
468 unobserved_set.clear();
469 for (const auto node: joint_target) {
470 if (!_graph_.exists(node)) {
471 if (!hard_ev_nodes.exists(node)) return true;
472 } else {
473 unobserved_set.insert(node);
474 if (elim_order[node] < elim_number) {
475 elim_number = elim_order[node];
476 first_eliminated_node = node;
477 }
478 }
479 }
480 if (!unobserved_set.empty()) {
481 // here, first_eliminated_node contains the first var (node or one of its
482 // parents) eliminated => the clique created during its elimination
483 // should contain all the nodes in unobserved_set
484 const auto clique_id = _node_to_clique_[first_eliminated_node];
485 const auto& clique = _JT_->clique(clique_id);
486 for (const auto node: unobserved_set) {
487 if (!clique.contains(node)) return true;
488 }
489 }
490 }
491
492 // if some new evidence have been added on nodes that do not belong
493 // to _graph_, then we tensorly have to reconstruct the join tree
494 for (const auto& change: _evidence_changes_) {
495 if ((change.second == EvidenceChangeType::EVIDENCE_ADDED) && !_graph_.exists(change.first))
496 return true;
497 }
498
499 // here, the current JT is exactly what we need for the next inference
500 return false;
501 }
502
504 template < typename GUM_SCALAR >
505 void ShaferShenoyInference< GUM_SCALAR >::_createNewJT_() {
506 // to create the JT, we first create the moral graph of the BN in the
507 // following way in order to take into account the barren nodes and the
508 // nodes that received evidence:
509 // 1/ we create an undirected graph containing only the nodes and no edge
510 // 2/ if we take into account barren nodes, remove them from the graph
511 // 3/ if we take d-separation into account, remove the d-separated nodes
512 // 4/ add edges so that each node and its parents in the BN form a clique
513 // 5/ add edges so that joint targets form a clique of the moral graph
514 // 6/ remove the nodes that received hard evidence (by step 4/, their
515 // parents are linked by edges, which is necessary for inference)
516 //
517 // At the end of step 6/, we have our moral graph and we can triangulate it
518 // to get the new junction tree
519
520 // 1/ create an undirected graph containing only the nodes and no edge
521 const auto& bn = this->BN();
522 _graph_.clear();
523 for (const auto node: bn.dag())
524 _graph_.addNodeWithId(node);
525
526 // identify the target nodes
527 NodeSet target_nodes = this->targets();
528 for (const auto& nodeset: this->jointTargets()) {
529 target_nodes += nodeset;
530 }
531
532 // 2/ if we wish to exploit barren nodes, we shall remove them from the
533 // BN. To do so: we identify all the nodes that are not targets and have
534 // received no evidence and such that their descendants are neither
535 // targets nor evidence nodes. Such nodes can be safely discarded from
536 // the BN without altering the inference output
537 if ((this->nbrTargets() != bn.dag().size())
538 && (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES)) {
539 // check that all the nodes are not targets, otherwise, there is no
540 // barren node
541 if (target_nodes.size() != bn.size()) {
542 BarrenNodesFinder finder(&(bn.dag()));
543 finder.setTargets(&target_nodes);
544
545 NodeSet evidence_nodes(this->evidence().size());
546 for (const auto& pair: this->evidence()) {
547 evidence_nodes.insert(pair.first);
548 }
549 finder.setEvidence(&evidence_nodes);
550
551 NodeSet barren_nodes = finder.barrenNodes();
552
553 // remove the barren nodes from the moral graph
554 for (const auto node: barren_nodes) {
555 _graph_.eraseNode(node);
556 }
557 }
558 }
559
560 // 3/ if we wish to exploit d-separation, remove all the nodes that are
561 // d-separated from our targets. Of course, if all the nodes are targets,
562 // no need to perform a d-separation analysis
563 if (this->nbrTargets() != bn.dag().size()) {
564 NodeSet requisite_nodes;
565 bool dsep_analysis = false;
566 switch (_find_relevant_tensor_type_) {
567 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
568 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES : {
569 BayesBall::requisiteNodes(bn.dag(),
570 target_nodes,
571 this->hardEvidenceNodes(),
572 this->softEvidenceNodes(),
573 requisite_nodes);
574 dsep_analysis = true;
575 } break;
576
577 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 : {
578 dSeparationAlgorithm dsep;
579 dsep.requisiteNodes(bn.dag(),
580 target_nodes,
581 this->hardEvidenceNodes(),
582 this->softEvidenceNodes(),
583 requisite_nodes);
584 dsep_analysis = true;
585 } break;
586
587 case RelevantTensorsFinderType::FIND_ALL : break;
588
589 default : GUM_ERROR(FatalError, "not implemented yet")
590 }
591
592 // remove all the nodes that are not requisite
593 if (dsep_analysis) {
594 for (auto iter = _graph_.beginSafe(); iter != _graph_.endSafe(); ++iter) {
595 if (!requisite_nodes.contains(*iter) && !this->hardEvidenceNodes().contains(*iter)) {
596 _graph_.eraseNode(*iter);
597 }
598 }
599 }
600 }
601
602 // 4/ add edges so that each node and its parents in the BN form a clique
603 for (const auto node: _graph_) {
604 const NodeSet& parents = bn.parents(node);
605 for (auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
606 // before adding an edge between node and its parent, check that the
607 // parent belongs to the graph. Actually, when d-separated nodes are
608 // removed, it may be the case that the parents of hard evidence nodes
609 // are removed. But the latter still exist in the graph.
610 if (_graph_.existsNode(*iter1)) {
611 _graph_.addEdge(*iter1, node);
612
613 auto iter2 = iter1;
614 for (++iter2; iter2 != parents.cend(); ++iter2) {
615 // before adding an edge, check that both extremities belong to
616 // the graph. Actually, when d-separated nodes are removed, it may
617 // be the case that the parents of hard evidence nodes are removed.
618 // But the latter still exist in the graph.
619 if (_graph_.existsNode(*iter2)) _graph_.addEdge(*iter1, *iter2);
620 }
621 }
622 }
623 }
624
625 // 5/ if there exist some joint targets, we shall add new edges
626 // into the moral graph in order to ensure that there exists a clique
627 // containing each joint
628 for (const auto& nodeset: this->jointTargets()) {
629 for (auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
630 auto iter2 = iter1;
631 for (++iter2; iter2 != nodeset.cend(); ++iter2) {
632 _graph_.addEdge(*iter1, *iter2);
633 }
634 }
635 }
636
637 // 6/ remove all the nodes that received hard evidence
638 _hard_ev_nodes_ = this->hardEvidenceNodes();
639 for (const auto node: _hard_ev_nodes_) {
640 _graph_.eraseNode(node);
641 }
642
643
644 // now, we can compute the new junction tree. To speed-up computations
645 // (essentially, those of a distribution phase), we construct from this
646 // junction tree a binary join tree
647 if (_JT_ != nullptr) delete _JT_;
648 if (_junctionTree_ != nullptr) delete _junctionTree_;
649
650 _triangulation_->setGraph(&_graph_, &(this->domainSizes()));
651 const JunctionTree& triang_jt = _triangulation_->junctionTree();
652 if (_use_binary_join_tree_) {
653 BinaryJoinTreeConverterDefault bjt_converter;
654 NodeSet emptyset;
655 _JT_ = new CliqueGraph(bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
656 } else {
657 _JT_ = new CliqueGraph(triang_jt);
658 }
659 _junctionTree_ = new CliqueGraph(triang_jt);
660
661
662 // indicate, for each node of the moral graph, a clique in _JT_ that can
663 // contain its conditional probability table
664 _node_to_clique_.clear();
665 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
666 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
667 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
668 elim_order.insert(JT_elim_order[i], (int)i);
669 const DAG& dag = bn.dag();
670 for (const auto node: _graph_) {
671 // get the variables in the tensor of node (and its parents)
672 NodeId first_eliminated_node = node;
673 int elim_number = elim_order[first_eliminated_node];
674
675 for (const auto parent: dag.parents(node)) {
676 if (_graph_.existsNode(parent) && (elim_order[parent] < elim_number)) {
677 elim_number = elim_order[parent];
678 first_eliminated_node = parent;
679 }
680 }
681
682 // first_eliminated_node contains the first var (node or one of its
683 // parents) eliminated => the clique created during its elimination
684 // contains node and all of its parents => it can contain the tensor
685 // assigned to the node in the BN
686 _node_to_clique_.insert(node,
687 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
688 }
689
690 // do the same for the nodes that received hard evidence. Here, we only store
691 // the nodes for which at least one parent belongs to _graph_ (otherwise
692 // their CPT is just a constant real number).
693 for (const auto node: _hard_ev_nodes_) {
694 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
695 int elim_number = std::numeric_limits< int >::max();
696
697 for (const auto parent: dag.parents(node)) {
698 if (_graph_.exists(parent) && (elim_order[parent] < elim_number)) {
699 elim_number = elim_order[parent];
700 first_eliminated_node = parent;
701 }
702 }
703
704 // first_eliminated_node contains the first var (node or one of its
705 // parents) eliminated => the clique created during its elimination
706 // contains node and all of its parents => it can contain the tensor
707 // assigned to the node in the BN
708 if (elim_number != std::numeric_limits< int >::max()) {
709 _node_to_clique_.insert(node,
710 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
711 }
712 }
713
714 // indicate for each joint_target a clique that contains it
715 _joint_target_to_clique_.clear();
716 for (const auto& set: this->jointTargets()) {
717 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
718 int elim_number = std::numeric_limits< int >::max();
719
720 // do not take into account the nodes that received hard evidence
721 // (since they do not belong to the join tree)
722 for (const auto node: set) {
723 if (!_hard_ev_nodes_.contains(node)) {
724 // the clique we are looking for is the one that was created when
725 // the first element of nodeset was eliminated
726 if (elim_order[node] < elim_number) {
727 elim_number = elim_order[node];
728 first_eliminated_node = node;
729 }
730 }
731 }
732
733 if (elim_number != std::numeric_limits< int >::max()) {
734 _joint_target_to_clique_.insert(
735 set,
736 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
737 }
738 }
739
740 // compute the roots of _JT_'s connected components
741 _computeJoinTreeRoots_();
742
743 // remove all the tensors stored into the cliques. Note that these include
744 // the CPTs resulting from the projections of hard evidence as well as the
745 // CPTs of the soft evidence
746 for (const auto& pot: _clique_ss_tensor_) {
747 if (_clique_tensors_[pot.first].size() > 1) delete pot.second;
748 }
749 _clique_ss_tensor_.clear();
750 for (const auto& potlist: _clique_tensors_)
751 for (const auto pot: potlist.second)
752 delete pot;
753 _clique_tensors_.clear();
754
755 // remove all the tensors created during the last inference
756 for (const auto& pot: _arc_to_created_tensors_)
757 delete pot.second;
758 _arc_to_created_tensors_.clear();
759
760 // remove all the tensors created to take into account hard evidence
761 // during the last inference (they have already been deleted from memory
762 // by the clearing of _clique_tensors_).
763 _node_to_hard_ev_projected_CPTs_.clear();
764
765 // remove all the soft evidence.
766 _node_to_soft_evidence_.clear();
767
768 // create empty tensor lists into the cliques of the joint tree as well
769 // as empty lists of evidence
770 _ScheduleMultiDimSet_ empty_set;
771 for (const auto node: *_JT_) {
772 _clique_tensors_.insert(node, empty_set);
773 _clique_ss_tensor_.insert(node, nullptr);
774 }
775
776 // remove all the constants created due to projections of CPTs that were
777 // defined over only hard evidence nodes
778 _constants_.clear();
779
780 // create empty messages and indicate that no message has been computed yet
781 _separator_tensors_.clear();
782 _messages_computed_.clear();
783 for (const auto& edge: _JT_->edges()) {
784 const Arc arc1(edge.first(), edge.second());
785 _separator_tensors_.insert(arc1, nullptr);
786 _messages_computed_.insert(arc1, false);
787 const Arc arc2(edge.second(), edge.first());
788 _separator_tensors_.insert(arc2, nullptr);
789 _messages_computed_.insert(arc2, false);
790 }
791
792 // remove all the posteriors computed so far
793 for (const auto& pot: _target_posteriors_)
794 delete pot.second;
795 _target_posteriors_.clear();
796 for (const auto& pot: _joint_target_posteriors_)
797 delete pot.second;
798 _joint_target_posteriors_.clear();
799
800 // here, we determine whether we should use schedules during the inference.
801 // the rule is: if the sum of the domain sizes of the cliques is greater
802 // than a threshold, use schedules
803 double overall_size = 0;
804 for (const auto clique: *_JT_) {
805 double clique_size = 1.0;
806 for (const auto node: _JT_->clique(clique))
807 clique_size *= this->domainSizes()[node];
808 overall_size += clique_size;
809 }
810 _use_schedules_ = (overall_size > _schedule_threshold_);
811
812 // we shall now add all the tensors of the soft evidence to the cliques
813 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
814 for (const auto node: this->softEvidenceNodes()) {
815 if (_node_to_clique_.exists(node)) {
816 auto ev_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*evidence[node], false);
817 _node_to_soft_evidence_.insert(node, ev_pot);
818 _clique_tensors_[_node_to_clique_[node]].insert(ev_pot);
819 }
820 }
821
822 // put all the CPTs of the Bayes net nodes into the cliques
823 // here, beware: all the tensors that are defined over some nodes
824 // including hard evidence must be projected so that these nodes are
825 // removed from the tensor
826 if (_use_schedules_) {
827 Schedule schedule;
828 _initializeJTCliques_(schedule);
829 } else {
830 _initializeJTCliques_();
831 }
832
833
834 // indicate that the data structures are up to date.
835 _evidence_changes_.clear();
836 _is_new_jt_needed_ = false;
837 }
838
840 template < typename GUM_SCALAR >
841 void ShaferShenoyInference< GUM_SCALAR >::_initializeJTCliques_() {
842 const auto& bn = this->BN();
843 const DAG& dag = bn.dag();
844
845 // put all the CPTs of the Bayes net nodes into the cliques
846 // here, beware: all the tensors that are defined over some nodes
847 // including hard evidence must be projected so that these nodes are
848 // removed from the tensor
849 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
850 const NodeProperty< Idx >& hard_evidence = this->hardEvidence();
851
852 for (const auto node: dag) {
853 if (_graph_.exists(node) || _hard_ev_nodes_.contains(node)) {
854 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
855
856 // get the list of nodes with hard evidence in cpt
857 NodeSet hard_nodes;
858 const auto& variables = cpt.variablesSequence();
859 bool graph_contains_nodes = false;
860 for (const auto var: variables) {
861 NodeId xnode = bn.nodeId(*var);
862 if (_hard_ev_nodes_.contains(xnode)) hard_nodes.insert(xnode);
863 else if (_graph_.exists(xnode)) graph_contains_nodes = true;
864 }
865
866 // if hard_nodes contains hard evidence nodes, perform a projection
867 // and insert the result into the appropriate clique, else insert
868 // directly cpt into the clique
869 if (hard_nodes.empty()) {
870 auto sched_cpt = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(cpt, false);
871 _clique_tensors_[_node_to_clique_[node]].insert(sched_cpt);
872 } else {
873 // marginalize out the hard evidence nodes: if the cpt is defined
874 // only over nodes that received hard evidence, do not consider it
875 // as a tensor anymore but as a constant
876 // TODO substitute constants by 0-dimensional tensors
877 if (hard_nodes.size() == variables.size()) {
878 Instantiation inst(cpt);
879 for (Size i = 0; i < hard_nodes.size(); ++i) {
880 inst.chgVal(*variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
881 }
882 _constants_.insert(node, cpt.get(inst));
883 } else {
884 // here, we have a CPT defined over some nodes that received hard
885 // evidence and other nodes that did not receive it. If none of the
886 // latter belong to the graph, then the CPT is useless for inference
887 if (!graph_contains_nodes) continue;
888
889 // prepare the projection with a combine and project instance
890 gum::VariableSet hard_variables;
891 _TensorSet_ marg_cpt_set(1 + hard_nodes.size());
892 marg_cpt_set.insert(&cpt);
893 for (const auto xnode: hard_nodes) {
894 marg_cpt_set.insert(evidence[xnode]);
895 hard_variables.insert(&(bn.variable(xnode)));
896 }
897
898 // perform the combination of those tensors and their projection
899 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
900 _combination_op_,
901 _projection_op_);
902
903 _TensorSet_ new_cpt_list = combine_and_project.execute(marg_cpt_set, hard_variables);
904
905 // there should be only one tensor in new_cpt_list
906 if (new_cpt_list.size() != 1) {
907 for (const auto pot: new_cpt_list)
908 if (!marg_cpt_set.contains(pot)) delete pot;
909
911 "the projection of a tensor containing " << "hard evidence is empty!");
912 }
913 auto new_pot = const_cast< Tensor< GUM_SCALAR >* >(*(new_cpt_list.begin()));
914 auto projected_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*new_pot));
915 delete new_pot;
916
917 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
918 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
919 }
920 }
921 }
922 }
923
924 // now, in _clique_tensors_, for each clique, we have the list of
925 // tensors that must be combined in order to produce the Shafer-Shenoy's
926 // tensor stored into the clique. So, perform this combination and
927 // store the result in _clique_ss_tensor_
928 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
929 for (const auto& xpotset: _clique_tensors_) {
930 const auto& potset = xpotset.second;
931 if (potset.size() > 0) {
932 // here, there will be an entry in _clique_ss_tensor_
933 // If there is only one element in potset, this element shall be
934 // stored into _clique_ss_tensor_, else all the elements of potset
935 // shall be combined and their result shall be stored
936 if (potset.size() == 1) {
937 _clique_ss_tensor_[xpotset.first] = *(potset.cbegin());
938 } else {
939 _TensorSet_ p_potset(potset.size());
940 for (const auto pot: potset)
941 p_potset.insert(
942 &(static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot)->multiDim()));
943
944 Tensor< GUM_SCALAR >* joint
945 = const_cast< Tensor< GUM_SCALAR >* >(fast_combination.execute(p_potset));
946 _clique_ss_tensor_[xpotset.first]
947 = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*joint));
948 delete joint;
949 }
950 }
951 }
952 }
953
955 template < typename GUM_SCALAR >
956 void ShaferShenoyInference< GUM_SCALAR >::_initializeJTCliques_(Schedule& schedule) {
957 const auto& bn = this->BN();
958 const DAG& dag = bn.dag();
959
960 // put all the CPTs of the Bayes net nodes into the cliques
961 // here, beware: all the tensors that are defined over some nodes
962 // including hard evidence must be projected so that these nodes are
963 // removed from the tensor
964 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
965 const NodeProperty< Idx >& hard_evidence = this->hardEvidence();
966
967 for (const auto node: dag) {
968 if (_graph_.exists(node) || _hard_ev_nodes_.contains(node)) {
969 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
970
971 // get the list of nodes with hard evidence in cpt
972 NodeSet hard_nodes;
973 const auto& variables = cpt.variablesSequence();
974 bool graph_contains_nodes = false;
975 for (const auto var: variables) {
976 NodeId xnode = bn.nodeId(*var);
977 if (_hard_ev_nodes_.contains(xnode)) hard_nodes.insert(xnode);
978 else if (_graph_.exists(xnode)) graph_contains_nodes = true;
979 }
980
981 // if hard_nodes contains hard evidence nodes, perform a projection
982 // and insert the result into the appropriate clique, else insert
983 // directly cpt into the clique
984 if (hard_nodes.empty()) {
985 auto sched_cpt = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(cpt, false);
986 _clique_tensors_[_node_to_clique_[node]].insert(sched_cpt);
987 } else {
988 // marginalize out the hard evidence nodes: if the cpt is defined
989 // only over nodes that received hard evidence, do not consider it
990 // as a tensor anymore but as a constant
991 // TODO substitute constants by 0-dimensional tensors
992 if (hard_nodes.size() == variables.size()) {
993 Instantiation inst(cpt);
994 for (Size i = 0; i < hard_nodes.size(); ++i) {
995 inst.chgVal(*variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
996 }
997 _constants_.insert(node, cpt.get(inst));
998 } else {
999 // here, we have a CPT defined over some nodes that received hard
1000 // evidence and other nodes that did not receive it. If none of the
1001 // latter belong to the graph, then the CPT is useless for inference
1002 if (!graph_contains_nodes) continue;
1003
1004 // prepare the projection with a combine and project instance
1005 gum::VariableSet hard_variables;
1006 _ScheduleMultiDimSet_ marg_cpt_set(1 + hard_nodes.size());
1007 const IScheduleMultiDim* sched_cpt
1008 = schedule.insertTable< Tensor< GUM_SCALAR > >(cpt, false);
1009 marg_cpt_set.insert(sched_cpt);
1010
1011 for (const auto xnode: hard_nodes) {
1012 const IScheduleMultiDim* pot
1013 = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[xnode], false);
1014 marg_cpt_set.insert(pot);
1015 hard_variables.insert(&(bn.variable(xnode)));
1016 }
1017
1018 // perform the combination of those tensors and their projection
1019 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1020 _combination_op_,
1021 _projection_op_);
1022
1023 _ScheduleMultiDimSet_ new_cpt_list
1024 = combine_and_project.schedule(schedule, marg_cpt_set, hard_variables);
1025
1026 // there should be only one tensor in new_cpt_list
1027 if (new_cpt_list.size() != 1) {
1029 "the projection of a tensor containing " << "hard evidence is empty!");
1030 }
1031 auto projected_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1032 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1033 *new_cpt_list.begin()));
1034 const_cast< ScheduleOperator* >(schedule.scheduleMultiDimCreator(projected_pot))
1035 ->makeResultsPersistent(true);
1036 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1037 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1038 }
1039 }
1040 }
1041 }
1042 this->scheduler().execute(schedule);
1043
1044 // now, in _clique_tensors_, for each clique, we have the list of
1045 // tensors that must be combined in order to produce the Shafer-Shenoy's
1046 // tensor stored into the clique. So, perform this combination and
1047 // store the result in _clique_ss_tensor_
1048 schedule.clear();
1049 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1050 for (const auto& xpotset: _clique_tensors_) {
1051 const auto& potset = xpotset.second;
1052 if (potset.size() > 0) {
1053 // here, there will be an entry in _clique_ss_tensor_
1054 // If there is only one element in potset, this element shall be
1055 // stored into _clique_ss_tensor_, else all the elements of potset
1056 // shall be combined and their result shall be stored
1057 if (potset.size() == 1) {
1058 _clique_ss_tensor_[xpotset.first] = *(potset.cbegin());
1059 } else {
1060 // add the tables to combine into the schedule
1061 for (const auto pot: potset) {
1062 schedule.emplaceScheduleMultiDim(*pot);
1063 }
1064
1065 auto joint = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1066 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1067 fast_combination.schedule(schedule, potset)));
1068 const_cast< ScheduleOperator* >(schedule.scheduleMultiDimCreator(joint))
1069 ->makeResultsPersistent(true);
1070 _clique_ss_tensor_[xpotset.first] = joint;
1071 }
1072 }
1073 }
1074 this->scheduler().execute(schedule);
1075 }
1076
1078 template < typename GUM_SCALAR >
1079 void ShaferShenoyInference< GUM_SCALAR >::updateOutdatedStructure_() {
1080 // check if a new JT is really needed. If so, create it
1081 if (_isNewJTNeeded_()) {
1082 _createNewJT_();
1083 } else {
1084 // here, we can answer the next queries without reconstructing all the
1085 // junction tree. All we need to do is to indicate that we should
1086 // update the tensors and messages for these queries
1087 updateOutdatedTensors_();
1088 }
1089 }
1090
1092 template < typename GUM_SCALAR >
1093 void ShaferShenoyInference< GUM_SCALAR >::_diffuseMessageInvalidations_(
1094 NodeId from_id,
1095 NodeId to_id,
1096 NodeSet& invalidated_cliques) {
1097 // invalidate the current clique
1098 invalidated_cliques.insert(to_id);
1099
1100 // invalidate the current arc
1101 const Arc arc(from_id, to_id);
1102 bool& message_computed = _messages_computed_[arc];
1103 if (message_computed) {
1104 message_computed = false;
1105 _separator_tensors_[arc] = nullptr;
1106 if (_arc_to_created_tensors_.exists(arc)) {
1107 delete _arc_to_created_tensors_[arc];
1108 _arc_to_created_tensors_.erase(arc);
1109 }
1110
1111 // go on with the diffusion
1112 for (const auto node_id: _JT_->neighbours(to_id)) {
1113 if (node_id != from_id) _diffuseMessageInvalidations_(to_id, node_id, invalidated_cliques);
1114 }
1115 }
1116 }
1117
1120 template < typename GUM_SCALAR >
1121 void ShaferShenoyInference< GUM_SCALAR >::updateOutdatedTensors_() {
1122 // for each clique, indicate whether the tensor stored into
1123 // _clique_ss_tensor_[clique] is the result of a combination. In this
1124 // case, it has been allocated by the combination and will need to be
1125 // deallocated if its clique has been invalidated
1126 NodeProperty< bool > ss_tensor_to_deallocate(_clique_tensors_.size());
1127 for (const auto& potset: _clique_tensors_) {
1128 ss_tensor_to_deallocate.insert(potset.first, (potset.second.size() > 1));
1129 }
1130
1131 // compute the set of CPTs that were projected due to hard evidence and
1132 // whose hard evidence have changed, so that they need a new projection.
1133 // By the way, remove these CPTs since they are no more needed
1134 // Here only the values of the hard evidence can have changed (else a
1135 // fully new join tree would have been computed).
1136 // Note also that we know that the CPTs still contain some variable(s) after
1137 // the projection (else they should be constants)
1138 NodeSet hard_nodes_changed(_hard_ev_nodes_.size());
1139 for (const auto node: _hard_ev_nodes_)
1140 if (_evidence_changes_.exists(node)) hard_nodes_changed.insert(node);
1141
1142 NodeSet nodes_with_projected_CPTs_changed;
1143 const auto& bn = this->BN();
1144 for (auto pot_iter = _node_to_hard_ev_projected_CPTs_.beginSafe();
1145 pot_iter != _node_to_hard_ev_projected_CPTs_.endSafe();
1146 ++pot_iter) {
1147 for (const auto var: bn.cpt(pot_iter.key()).variablesSequence()) {
1148 if (hard_nodes_changed.contains(bn.nodeId(*var))) {
1149 nodes_with_projected_CPTs_changed.insert(pot_iter.key());
1150 delete pot_iter.val();
1151 _clique_tensors_[_node_to_clique_[pot_iter.key()]].erase(pot_iter.val());
1152 _node_to_hard_ev_projected_CPTs_.erase(pot_iter);
1153 break;
1154 }
1155 }
1156 }
1157
1158
1159 // invalidate all the messages that are no more correct: start from each of
1160 // the nodes whose soft evidence has changed and perform a diffusion from
1161 // the clique into which the soft evidence has been entered, indicating that
1162 // the messages spreading from this clique are now invalid. At the same time,
1163 // if there were tensors created on the arcs over which the messages were
1164 // sent, remove them from memory. For all the cliques that received some
1165 // projected CPT that should now be changed, do the same.
1166 NodeSet invalidated_cliques(_JT_->size());
1167 for (const auto& pair: _evidence_changes_) {
1168 if (_node_to_clique_.exists(pair.first)) {
1169 const auto clique = _node_to_clique_[pair.first];
1170 invalidated_cliques.insert(clique);
1171 for (const auto neighbor: _JT_->neighbours(clique)) {
1172 _diffuseMessageInvalidations_(clique, neighbor, invalidated_cliques);
1173 }
1174 }
1175 }
1176
1177 // now, add to the set of invalidated cliques those that contain projected
1178 // CPTs that were changed.
1179 for (const auto node: nodes_with_projected_CPTs_changed) {
1180 const auto clique = _node_to_clique_[node];
1181 invalidated_cliques.insert(clique);
1182 for (const auto neighbor: _JT_->neighbours(clique)) {
1183 _diffuseMessageInvalidations_(clique, neighbor, invalidated_cliques);
1184 }
1185 }
1186
1187 // now that we know the cliques whose set of tensors have been changed,
1188 // we can discard their corresponding Shafer-Shenoy tensor
1189 for (const auto clique: invalidated_cliques) {
1190 if (ss_tensor_to_deallocate[clique]) {
1191 delete _clique_ss_tensor_[clique];
1192 _clique_ss_tensor_[clique] = nullptr;
1193 }
1194 }
1195
1196
1197 // now we shall remove all the posteriors that belong to the
1198 // invalidated cliques. First, cope only with the nodes that did not
1199 // receive hard evidence since the other nodes do not belong to the
1200 // join tree
1201 for (auto iter = _target_posteriors_.beginSafe(); iter != _target_posteriors_.endSafe();
1202 ++iter) {
1203 if (_graph_.exists(iter.key())
1204 && (invalidated_cliques.exists(_node_to_clique_[iter.key()]))) {
1205 delete iter.val();
1206 _target_posteriors_.erase(iter);
1207 }
1208 }
1209
1210 // now cope with the nodes that received hard evidence
1211 for (auto iter = _target_posteriors_.beginSafe(); iter != _target_posteriors_.endSafe();
1212 ++iter) {
1213 if (hard_nodes_changed.contains(iter.key())) {
1214 delete iter.val();
1215 _target_posteriors_.erase(iter);
1216 }
1217 }
1218
1219 // finally, cope with joint targets. Notably, remove the joint posteriors whose
1220 // nodes have all received changed evidence
1221 for (auto iter = _joint_target_posteriors_.beginSafe();
1222 iter != _joint_target_posteriors_.endSafe();
1223 ++iter) {
1224 if (invalidated_cliques.exists(_joint_target_to_clique_[iter.key()])) {
1225 delete iter.val();
1226 _joint_target_posteriors_.erase(iter);
1227 } else {
1228 // check for sets in which all nodes have received evidence
1229 bool has_unevidenced_node = false;
1230 for (const auto node: iter.key()) {
1231 if (!hard_nodes_changed.exists(node)) {
1232 has_unevidenced_node = true;
1233 break;
1234 }
1235 }
1236 if (!has_unevidenced_node) {
1237 delete iter.val();
1238 _joint_target_posteriors_.erase(iter);
1239 }
1240 }
1241 }
1242
1243 // remove all the evidence that were entered into _node_to_soft_evidence_
1244 // and _clique_ss_tensor_ and add the new soft ones
1245 for (const auto& pot_pair: _node_to_soft_evidence_) {
1246 delete pot_pair.second;
1247 _clique_tensors_[_node_to_clique_[pot_pair.first]].erase(pot_pair.second);
1248 }
1249 _node_to_soft_evidence_.clear();
1250
1251 const auto& evidence = this->evidence();
1252 for (const auto node: this->softEvidenceNodes()) {
1253 auto ev_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*evidence[node], false);
1254 _node_to_soft_evidence_.insert(node, ev_pot);
1255 _clique_tensors_[_node_to_clique_[node]].insert(ev_pot);
1256 }
1257
1258
1259 // Now add the projections of the CPTs due to newly changed hard evidence:
1260 // if we are performing updateOutdatedTensors_, this means that the
1261 // set of nodes that received hard evidence has not changed, only
1262 // their instantiations can have changed. So, if there is an entry
1263 // for node in _constants_, there will still be such an entry after
1264 // performing the new projections. Idem for _node_to_hard_ev_projected_CPTs_
1265 if (_use_schedules_) {
1266 Schedule schedule;
1267 for (const auto node: nodes_with_projected_CPTs_changed) {
1268 // perform the projection with a combine and project instance
1269 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
1270 const auto& variables = cpt.variablesSequence();
1271 _ScheduleMultiDimSet_ marg_cpt_set;
1272 const auto sched_cpt = schedule.insertTable< Tensor< GUM_SCALAR > >(cpt, false);
1273 marg_cpt_set.insert(sched_cpt);
1274
1275 gum::VariableSet hard_variables;
1276 for (const auto var: variables) {
1277 NodeId xnode = bn.nodeId(*var);
1278 if (_hard_ev_nodes_.exists(xnode)) {
1279 const auto pot = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[xnode], false);
1280 marg_cpt_set.insert(pot);
1281 hard_variables.insert(var);
1282 }
1283 }
1284
1285 // perform the combination of those tensors and their projection
1286 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1287 _combination_op_,
1288 _projection_op_);
1289
1290 _ScheduleMultiDimSet_ new_cpt_list
1291 = combine_and_project.schedule(schedule, marg_cpt_set, hard_variables);
1292
1293 // there should be only one tensor in new_cpt_list
1294 if (new_cpt_list.size() != 1) {
1296 "the projection of a tensor containing " << "hard evidence is empty!");
1297 }
1298 auto projected_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1299 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(*new_cpt_list.begin()));
1300 const_cast< ScheduleOperator* >(schedule.scheduleMultiDimCreator(projected_pot))
1301 ->makeResultsPersistent(true);
1302 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1303 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1304 }
1305
1306 // here, the list of tensors stored in the invalidated cliques have
1307 // been updated. So, now, we can combine them to produce the Shafer-Shenoy
1308 // tensor stored into the clique
1309 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1310 for (const auto clique: invalidated_cliques) {
1311 const auto& potset = _clique_tensors_[clique];
1312
1313 if (potset.size() > 0) {
1314 // here, there will be an entry in _clique_ss_tensor_
1315 // If there is only one element in potset, this element shall be
1316 // stored into _clique_ss_tensor_, else all the elements of potset
1317 // shall be combined and their result shall be stored
1318 if (potset.size() == 1) {
1319 _clique_ss_tensor_[clique] = *(potset.cbegin());
1320 } else {
1321 for (const auto pot: potset)
1322 if (!schedule.existsScheduleMultiDim(pot->id()))
1323 schedule.emplaceScheduleMultiDim(*pot);
1324 auto joint = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1325 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
1326 fast_combination.schedule(schedule, potset)));
1327 const_cast< ScheduleOperator* >(schedule.scheduleMultiDimCreator(joint))
1328 ->makeResultsPersistent(true);
1329 _clique_ss_tensor_[clique] = joint;
1330 }
1331 }
1332 }
1333 this->scheduler().execute(schedule);
1334 } else {
1335 for (const auto node: nodes_with_projected_CPTs_changed) {
1336 // perform the projection with a combine and project instance
1337 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
1338 const auto& variables = cpt.variablesSequence();
1339 _TensorSet_ marg_cpt_set(1 + variables.size());
1340 marg_cpt_set.insert(&cpt);
1341
1342 gum::VariableSet hard_variables;
1343 for (const auto var: variables) {
1344 NodeId xnode = bn.nodeId(*var);
1345 if (_hard_ev_nodes_.exists(xnode)) {
1346 marg_cpt_set.insert(evidence[xnode]);
1347 hard_variables.insert(var);
1348 }
1349 }
1350
1351 // perform the combination of those tensors and their projection
1352 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1353 _combination_op_,
1354 _projection_op_);
1355
1356 _TensorSet_ new_cpt_list = combine_and_project.execute(marg_cpt_set, hard_variables);
1357
1358 // there should be only one tensor in new_cpt_list
1359 if (new_cpt_list.size() != 1) {
1360 for (const auto pot: new_cpt_list)
1361 if (!marg_cpt_set.contains(pot)) delete pot;
1362
1364 "the projection of a tensor containing " << "hard evidence is empty!");
1365 }
1366 Tensor< GUM_SCALAR >* xprojected_pot
1367 = const_cast< Tensor< GUM_SCALAR >* >(*new_cpt_list.begin());
1368 auto projected_pot
1369 = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*xprojected_pot));
1370 delete xprojected_pot;
1371 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1372 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1373 }
1374
1375 // here, the list of tensors stored in the invalidated cliques have
1376 // been updated. So, now, we can combine them to produce the Shafer-Shenoy
1377 // tensor stored into the clique
1378 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1379 for (const auto clique: invalidated_cliques) {
1380 const auto& potset = _clique_tensors_[clique];
1381
1382 if (potset.size() > 0) {
1383 // here, there will be an entry in _clique_ss_tensor_
1384 // If there is only one element in potset, this element shall be
1385 // stored into _clique_ss_tensor_, else all the elements of potset
1386 // shall be combined and their result shall be stored
1387 if (potset.size() == 1) {
1388 _clique_ss_tensor_[clique] = *(potset.cbegin());
1389 } else {
1390 _TensorSet_ p_potset(potset.size());
1391 for (const auto pot: potset)
1392 p_potset.insert(&(
1393 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot)->multiDim()));
1394
1395 Tensor< GUM_SCALAR >* joint
1396 = const_cast< Tensor< GUM_SCALAR >* >(fast_combination.execute(p_potset));
1397 _clique_ss_tensor_[clique]
1398 = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*joint));
1399 delete joint;
1400 }
1401 }
1402 }
1403 }
1404
1405 // update the constants
1406 const auto& hard_evidence = this->hardEvidence();
1407 for (auto& node_cst: _constants_) {
1408 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
1409 const auto& variables = cpt.variablesSequence();
1410 Instantiation inst;
1411 for (const auto var: variables)
1412 inst << *var;
1413 for (const auto var: variables) {
1414 inst.chgVal(*var, hard_evidence[bn.nodeId(*var)]);
1415 }
1416 node_cst.second = cpt.get(inst);
1417 }
1418
1419 // indicate that all changes have been performed
1420 _evidence_changes_.clear();
1421 }
1422
1424 template < typename GUM_SCALAR >
1425 void ShaferShenoyInference< GUM_SCALAR >::_computeJoinTreeRoots_() {
1426 // get the set of cliques in which we can find the targets and joint_targets.
1427 // Due to hard evidence, the cliques related to a given target node
1428 // might not exist, hence the try..catch.
1429 NodeSet clique_targets;
1430 for (const auto node: this->targets()) {
1431 try {
1432 clique_targets.insert(_node_to_clique_[node]);
1433 } catch (Exception const&) {}
1434 }
1435 for (const auto& set: this->jointTargets()) {
1436 try {
1437 clique_targets.insert(_joint_target_to_clique_[set]);
1438 } catch (Exception const&) {}
1439 }
1440
1441 // put in a vector these cliques and their sizes
1442 std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
1443 const auto& bn = this->BN();
1444 std::size_t i = 0;
1445 for (const auto clique_id: clique_targets) {
1446 const auto& clique = _JT_->clique(clique_id);
1447 Size dom_size = 1;
1448 for (const auto node: clique) {
1449 dom_size *= bn.variable(node).domainSize();
1450 }
1451 possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
1452 ++i;
1453 }
1454
1455 // sort the cliques by increasing domain size
1456 std::sort(possible_roots.begin(),
1457 possible_roots.end(),
1458 [](const std::pair< NodeId, Size >& a, const std::pair< NodeId, Size >& b) -> bool {
1459 return a.second < b.second;
1460 });
1461
1462 // pick up the clique with the smallest size in each connected component
1463 NodeProperty< bool > marked = _JT_->nodesPropertyFromVal(false);
1464 std::function< void(NodeId, NodeId) > diffuse_marks
1465 = [&marked, &diffuse_marks, this](NodeId node, NodeId from) {
1466 if (!marked[node]) {
1467 marked[node] = true;
1468 for (const auto neigh: _JT_->neighbours(node))
1469 if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1470 }
1471 };
1472 _roots_.clear();
1473 for (const auto& xclique: possible_roots) {
1474 NodeId clique = xclique.first;
1475 if (!marked[clique]) {
1476 _roots_.insert(clique);
1477 diffuse_marks(clique, clique);
1478 }
1479 }
1480 }
1481
1482 // find the tensors d-connected to a set of variables
1483 template < typename GUM_SCALAR >
1484 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsGetAll_(
1485 Set< const IScheduleMultiDim* >& pot_list,
1486 gum::VariableSet& kept_vars) {}
1487
1488 // find the tensors d-connected to a set of variables
1489 template < typename GUM_SCALAR >
1490 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation_(
1491 Set< const IScheduleMultiDim* >& pot_list,
1492 gum::VariableSet& kept_vars) {
1493 // find the node ids of the kept variables
1494 NodeSet kept_ids(kept_vars.size());
1495 const auto& bn = this->BN();
1496 for (const auto var: kept_vars) {
1497 kept_ids.insert(bn.nodeId(*var));
1498 }
1499
1500 // determine the set of tensors d-connected with the kept variables
1501 NodeSet requisite_nodes;
1502 BayesBall::requisiteNodes(bn.dag(),
1503 kept_ids,
1504 this->hardEvidenceNodes(),
1505 this->softEvidenceNodes(),
1506 requisite_nodes);
1507 for (auto iter = pot_list.beginSafe(); iter != pot_list.endSafe(); ++iter) {
1508 const Sequence< const DiscreteVariable* >& vars = (*iter)->variablesSequence();
1509 bool found = false;
1510 for (const auto var: vars) {
1511 if (requisite_nodes.exists(bn.nodeId(*var))) {
1512 found = true;
1513 break;
1514 }
1515 }
1516
1517 if (!found) { pot_list.erase(iter); }
1518 }
1519 }
1520
1521 // find the tensors d-connected to a set of variables
1522 template < typename GUM_SCALAR >
1523 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_(
1524 Set< const IScheduleMultiDim* >& pot_list,
1525 gum::VariableSet& kept_vars) {
1526 // find the node ids of the kept variables
1527 NodeSet kept_ids(kept_vars.size());
1528 const auto& bn = this->BN();
1529 for (const auto var: kept_vars) {
1530 kept_ids.insert(bn.nodeId(*var));
1531 }
1532
1533 // determine the set of tensors d-connected with the kept variables
1534 BayesBall::relevantTensors(bn,
1535 kept_ids,
1536 this->hardEvidenceNodes(),
1537 this->softEvidenceNodes(),
1538 pot_list);
1539 }
1540
1541 // find the tensors d-connected to a set of variables
1542 template < typename GUM_SCALAR >
1543 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation3_(
1544 Set< const IScheduleMultiDim* >& pot_list,
1545 gum::VariableSet& kept_vars) {
1546 // find the node ids of the kept variables
1547 NodeSet kept_ids(kept_vars.size());
1548 const auto& bn = this->BN();
1549 for (const auto var: kept_vars) {
1550 kept_ids.insert(bn.nodeId(*var));
1551 }
1552
1553 // determine the set of tensors d-connected with the kept variables
1554 dSeparationAlgorithm dsep;
1555 dsep.relevantTensors(bn,
1556 kept_ids,
1557 this->hardEvidenceNodes(),
1558 this->softEvidenceNodes(),
1559 pot_list);
1560 }
1561
1562 // find the tensors d-connected to a set of variables
1563 template < typename GUM_SCALAR >
1564 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsXX_(
1565 Set< const IScheduleMultiDim* >& pot_list,
1566 gum::VariableSet& kept_vars) {
1567 switch (_find_relevant_tensor_type_) {
1568 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
1569 _findRelevantTensorsWithdSeparation2_(pot_list, kept_vars);
1570 break;
1571
1572 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES :
1573 _findRelevantTensorsWithdSeparation_(pot_list, kept_vars);
1574 break;
1575
1576 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 :
1577 _findRelevantTensorsWithdSeparation3_(pot_list, kept_vars);
1578 break;
1579
1580 case RelevantTensorsFinderType::FIND_ALL :
1581 _findRelevantTensorsGetAll_(pot_list, kept_vars);
1582 break;
1583
1584 default : GUM_ERROR(FatalError, "not implemented yet")
1585 }
1586 }
1587
1588 // remove barren variables using schedules
1589 template < typename GUM_SCALAR >
1590 Set< const IScheduleMultiDim* >
1591 ShaferShenoyInference< GUM_SCALAR >::_removeBarrenVariables_(Schedule& schedule,
1592 _ScheduleMultiDimSet_& pot_list,
1593 gum::VariableSet& del_vars) {
1594 // remove from del_vars the variables that received some evidence:
1595 // only those that did not receive evidence can be barren variables
1596 gum::VariableSet the_del_vars = del_vars;
1597 for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe(); ++iter) {
1598 NodeId id = this->BN().nodeId(**iter);
1599 if (this->hardEvidenceNodes().exists(id) || this->softEvidenceNodes().exists(id)) {
1600 the_del_vars.erase(iter);
1601 }
1602 }
1603
1604 // assign to each random variable the set of tensors that contain it
1605 HashTable< const DiscreteVariable*, _ScheduleMultiDimSet_ > var2pots(the_del_vars.size());
1606 _ScheduleMultiDimSet_ empty_pot_set;
1607 for (const auto pot: pot_list) {
1608 const auto& vars = pot->variablesSequence();
1609 for (const auto var: vars) {
1610 if (the_del_vars.exists(var)) {
1611 if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1612 var2pots[var].insert(pot);
1613 }
1614 }
1615 }
1616
1617 // each variable with only one tensor is necessarily a barren variable
1618 // assign to each tensor with barren nodes its set of barren variables
1619 HashTable< const IScheduleMultiDim*, gum::VariableSet > pot2barren_var;
1620 gum::VariableSet empty_var_set;
1621 for (const auto& elt: var2pots) {
1622 if (elt.second.size() == 1) { // here we have a barren variable
1623 const IScheduleMultiDim* pot = *(elt.second.begin());
1624 if (!pot2barren_var.exists(pot)) { pot2barren_var.insert(pot, empty_var_set); }
1625 pot2barren_var[pot].insert(elt.first); // insert the barren variable
1626 }
1627 }
1628
1629 // for each tensor with barren variables, marginalize them.
1630 // if the tensor has only barren variables, simply remove them from the
1631 // set of tensors, else just project the tensor
1632 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1633 _ScheduleMultiDimSet_ projected_pots;
1634 for (const auto& elt: pot2barren_var) {
1635 // remove the current tensor from pot_list as, anyway, we will change it
1636 const IScheduleMultiDim* pot = elt.first;
1637 pot_list.erase(pot);
1638
1639 // check whether we need to add a projected new tensor or not (i.e.,
1640 // whether there exist non-barren variables or not)
1641 if (pot->variablesSequence().size() != elt.second.size()) {
1642 const IScheduleMultiDim* new_pot = projector.schedule(schedule, pot, elt.second);
1643 // here, there is no need to enforce that new_pot is persistent since,
1644 // if this is needed, the function that called _removeBarrenVariables_ will
1645 // do it
1646 pot_list.insert(new_pot);
1647 projected_pots.insert(new_pot);
1648 }
1649 }
1650
1651 return projected_pots;
1652 }
1653
1654 // remove barren variables directly without schedules
1655 template < typename GUM_SCALAR >
1656 Set< const Tensor< GUM_SCALAR >* >
1657 ShaferShenoyInference< GUM_SCALAR >::_removeBarrenVariables_(_TensorSet_& pot_list,
1658 gum::VariableSet& del_vars) {
1659 // remove from del_vars the variables that received some evidence:
1660 // only those that did not receive evidence can be barren variables
1661 gum::VariableSet the_del_vars = del_vars;
1662 for (auto iter = the_del_vars.beginSafe(); iter != the_del_vars.endSafe(); ++iter) {
1663 NodeId id = this->BN().nodeId(**iter);
1664 if (this->hardEvidenceNodes().exists(id) || this->softEvidenceNodes().exists(id)) {
1665 the_del_vars.erase(iter);
1666 }
1667 }
1668
1669 // assign to each random variable the set of tensors that contain it
1670 HashTable< const DiscreteVariable*, _TensorSet_ > var2pots(the_del_vars.size());
1671 _TensorSet_ empty_pot_set;
1672 for (const auto pot: pot_list) {
1673 const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1674 for (const auto var: vars) {
1675 if (the_del_vars.exists(var)) {
1676 if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1677 var2pots[var].insert(pot);
1678 }
1679 }
1680 }
1681
1682 // each variable with only one tensor is a barren variable
1683 // assign to each tensor with barren nodes its set of barren variables
1684 HashTable< const Tensor< GUM_SCALAR >*, gum::VariableSet > pot2barren_var;
1685 gum::VariableSet empty_var_set;
1686 for (const auto& elt: var2pots) {
1687 if (elt.second.size() == 1) { // here we have a barren variable
1688 const Tensor< GUM_SCALAR >* pot = *(elt.second.begin());
1689 if (!pot2barren_var.exists(pot)) { pot2barren_var.insert(pot, empty_var_set); }
1690 pot2barren_var[pot].insert(elt.first); // insert the barren variable
1691 }
1692 }
1693
1694 // for each tensor with barren variables, marginalize them.
1695 // if the tensor has only barren variables, simply remove them from the
1696 // set of tensors, else just project the tensor
1697 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1698 _TensorSet_ projected_pots;
1699 for (const auto& elt: pot2barren_var) {
1700 // remove the current tensor from pot_list as, anyway, we will change it
1701 const Tensor< GUM_SCALAR >* pot = elt.first;
1702 pot_list.erase(pot);
1703
1704 // check whether we need to add a projected new tensor or not (i.e.,
1705 // whether there exist non-barren variables or not)
1706 if (pot->variablesSequence().size() != elt.second.size()) {
1707 const Tensor< GUM_SCALAR >* new_pot = projector.execute(*pot, elt.second);
1708 pot_list.insert(new_pot);
1709 projected_pots.insert(new_pot);
1710 }
1711 }
1712
1713 return projected_pots;
1714 }
1715
1716 // performs the collect phase of Shafer-Shenoy using schedules
1717 template < typename GUM_SCALAR >
1718 INLINE void ShaferShenoyInference< GUM_SCALAR >::_collectMessage_(Schedule& schedule,
1719 NodeId id,
1720 NodeId from) {
1721 for (const auto other: _JT_->neighbours(id)) {
1722 if ((other != from) && !_messages_computed_[Arc(other, id)])
1723 _collectMessage_(schedule, other, id);
1724 }
1725
1726 if ((id != from) && !_messages_computed_[Arc(id, from)]) {
1727 _produceMessage_(schedule, id, from);
1728 }
1729 }
1730
1731 // performs the collect phase of Shafer-Shenoy without schedules
1732 template < typename GUM_SCALAR >
1733 INLINE void ShaferShenoyInference< GUM_SCALAR >::_collectMessage_(NodeId id, NodeId from) {
1734 for (const auto other: _JT_->neighbours(id)) {
1735 if ((other != from) && !_messages_computed_[Arc(other, id)]) _collectMessage_(other, id);
1736 }
1737
1738 if ((id != from) && !_messages_computed_[Arc(id, from)]) { _produceMessage_(id, from); }
1739 }
1740
1741 // remove variables del_vars from the list of tensors pot_list
1742 template < typename GUM_SCALAR >
1743 const IScheduleMultiDim* ShaferShenoyInference< GUM_SCALAR >::_marginalizeOut_(
1744 Schedule& schedule,
1745 Set< const IScheduleMultiDim* > pot_list,
1746 gum::VariableSet& del_vars,
1747 gum::VariableSet& kept_vars) {
1748 // use d-separation analysis to check which tensors shall be combined
1749 //_findRelevantTensorsXX_(pot_list, kept_vars);
1750
1751 // if pot list is empty, do nothing. This may happen when there are only barren variables
1752 if (pot_list.empty()) {
1753 return new ScheduleMultiDim< Tensor< GUM_SCALAR > >(Tensor< GUM_SCALAR >());
1754 }
1755
1756 // now, let's guarantee that all the tensors to be combined and projected
1757 // belong to the schedule
1758 for (const auto pot: pot_list) {
1759 if (!schedule.existsScheduleMultiDim(pot->id())) schedule.emplaceScheduleMultiDim(*pot);
1760 }
1761
1762 // remove the tensors corresponding to barren variables if we want
1763 // to exploit barren nodes
1764 _ScheduleMultiDimSet_ barren_projected_tensors;
1765 if (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
1766 barren_projected_tensors = _removeBarrenVariables_(schedule, pot_list, del_vars);
1767 }
1768
1769 // Combine and project the tensors
1770 _ScheduleMultiDimSet_ new_pot_list;
1771 if (pot_list.size() == 1) { // only one tensor, so just project it
1772 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1773 auto xpot = projector.schedule(schedule, *(pot_list.begin()), del_vars);
1774 new_pot_list.insert(xpot);
1775 } else if (pot_list.size() > 1) {
1776 // create a combine and project operator that will perform the
1777 // marginalization
1778 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(_combination_op_,
1779 _projection_op_);
1780 new_pot_list = combine_and_project.schedule(schedule, pot_list, del_vars);
1781 }
1782
1783 // remove all the tensors that were created due to projections of
1784 // barren nodes and that are not part of the new_pot_list: these
1785 // tensors were just temporary tensors
1786 for (auto barren_pot: barren_projected_tensors) {
1787 if (!new_pot_list.exists(barren_pot))
1788 schedule.emplaceDeletion(
1789 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >& >(*barren_pot));
1790 }
1791
1792 // combine all the remaining tensors in order to create only one resulting tensor
1793 if (new_pot_list.empty())
1794 return new ScheduleMultiDim< Tensor< GUM_SCALAR > >(Tensor< GUM_SCALAR >());
1795 if (new_pot_list.size() == 1) return *(new_pot_list.begin());
1796 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1797 return fast_combination.schedule(schedule, new_pot_list);
1798 }
1799
1800 // remove variables del_vars from the list of tensors pot_list
1801 template < typename GUM_SCALAR >
1802 const IScheduleMultiDim* ShaferShenoyInference< GUM_SCALAR >::_marginalizeOut_(
1803 Set< const IScheduleMultiDim* >& pot_list,
1804 gum::VariableSet& del_vars,
1805 gum::VariableSet& kept_vars) {
1806 // if pot list is empty, do nothing. This may happen when there are only barren variables
1807 if (pot_list.empty()) {
1808 return new ScheduleMultiDim< Tensor< GUM_SCALAR > >(Tensor< GUM_SCALAR >());
1809 }
1810
1811 _TensorSet_ xpot_list(pot_list.size());
1812 for (auto pot: pot_list)
1813 xpot_list.insert(
1814 &(static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot)->multiDim()));
1815
1816 // use d-separation analysis to check which tensors shall be combined
1817 // _findRelevantTensorsXX_(pot_list, kept_vars);
1818
1819 // remove the tensors corresponding to barren variables if we want
1820 // to exploit barren nodes
1821 _TensorSet_ barren_projected_tensors;
1822 if (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
1823 barren_projected_tensors = _removeBarrenVariables_(xpot_list, del_vars);
1824 }
1825
1826 // Combine and project the remaining tensors
1827 _TensorSet_ xnew_pot_list;
1828 if (xpot_list.size() == 1) {
1829 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1830 auto xpot = projector.execute(**(xpot_list.begin()), del_vars);
1831 xnew_pot_list.insert(xpot);
1832 } else if (xpot_list.size() > 1) {
1833 // create a combine and project operator that will perform the
1834 // marginalization
1835 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(_combination_op_,
1836 _projection_op_);
1837 xnew_pot_list = combine_and_project.execute(xpot_list, del_vars);
1838 }
1839
1840 // combine all the remaining tensors in order to create only one resulting tensor
1841 const Tensor< GUM_SCALAR >* xres_pot;
1842 ScheduleMultiDim< Tensor< GUM_SCALAR > >* res_pot;
1843 if (xnew_pot_list.size() == 1) {
1844 xres_pot = *(xnew_pot_list.begin());
1845 } else if (xnew_pot_list.size() > 1) {
1846 // combine all the tensors that resulted from the above combine and
1847 // projet execution
1848 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1849 xres_pot = fast_combination.execute(xnew_pot_list);
1850 for (const auto pot: xnew_pot_list) {
1851 if (!xpot_list.contains(pot) && (pot != xres_pot)) delete pot;
1852 }
1853 } else {
1854 xres_pot = new Tensor< GUM_SCALAR >();
1855 }
1856
1857 // transform xres_pot into a ScheduleMultiDim
1858 if (xpot_list.contains(xres_pot))
1859 res_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*xres_pot, false);
1860 else {
1861 res_pot = new ScheduleMultiDim< Tensor< GUM_SCALAR > >(
1862 std::move(const_cast< Tensor< GUM_SCALAR >& >(*xres_pot)));
1863 delete xres_pot;
1864 }
1865
1866 // remove all the tensors that were created due to projections of
1867 // barren nodes and that are not part of the new_pot_list: these
1868 // tensors were just temporary tensors
1869 for (const auto barren_pot: barren_projected_tensors) {
1870 if (!xnew_pot_list.exists(barren_pot)) delete barren_pot;
1871 }
1872
1873 return res_pot;
1874 }
1875
1876 // creates the message sent by clique from_id to clique to_id
1877 template < typename GUM_SCALAR >
1878 void ShaferShenoyInference< GUM_SCALAR >::_produceMessage_(Schedule& schedule,
1879 NodeId from_id,
1880 NodeId to_id) {
1881 // get the tensors of the clique.
1882 _ScheduleMultiDimSet_ pot_list;
1883 if (_clique_ss_tensor_[from_id] != nullptr) pot_list.insert(_clique_ss_tensor_[from_id]);
1884
1885 // add the messages sent by adjacent nodes to from_id.
1886 for (const auto other_id: _JT_->neighbours(from_id)) {
1887 if (other_id != to_id) {
1888 const auto separator_pot = _separator_tensors_[Arc(other_id, from_id)];
1889 if (separator_pot != nullptr) pot_list.insert(separator_pot);
1890 }
1891 }
1892
1893 // get the set of variables that need be removed from the tensors
1894 const NodeSet& from_clique = _JT_->clique(from_id);
1895 const NodeSet& separator = _JT_->separator(from_id, to_id);
1896 gum::VariableSet del_vars(from_clique.size());
1897 gum::VariableSet kept_vars(separator.size());
1898 const auto& bn = this->BN();
1899
1900 for (const auto node: from_clique) {
1901 if (!separator.contains(node)) {
1902 del_vars.insert(&(bn.variable(node)));
1903 } else {
1904 kept_vars.insert(&(bn.variable(node)));
1905 }
1906 }
1907
1908 // pot_list now contains all the tensors to multiply and marginalize
1909 // => combine the messages
1910 const IScheduleMultiDim* new_pot = _marginalizeOut_(schedule, pot_list, del_vars, kept_vars);
1911
1912 // keep track of the newly created tensor
1913 const Arc arc(from_id, to_id);
1914 if (!pot_list.exists(new_pot)) {
1915 if (!_arc_to_created_tensors_.exists(arc)) {
1916 _arc_to_created_tensors_.insert(arc, new_pot);
1917
1918 // do not forget to make the ScheduleMultiDim persistent
1919 auto op = schedule.scheduleMultiDimCreator(new_pot);
1920 if (op != nullptr) const_cast< ScheduleOperator* >(op)->makeResultsPersistent(true);
1921 }
1922 }
1923
1924 _separator_tensors_[arc] = new_pot;
1925 _messages_computed_[arc] = true;
1926 }
1927
1928 // creates the message sent by clique from_id to clique to_id
1929 template < typename GUM_SCALAR >
1930 void ShaferShenoyInference< GUM_SCALAR >::_produceMessage_(NodeId from_id, NodeId to_id) {
1931 // get the tensors of the clique.
1932 _ScheduleMultiDimSet_ pot_list;
1933 if (_clique_ss_tensor_[from_id] != nullptr) pot_list.insert(_clique_ss_tensor_[from_id]);
1934
1935 // add the messages sent by adjacent nodes to from_id.
1936 for (const auto other_id: _JT_->neighbours(from_id)) {
1937 if (other_id != to_id) {
1938 const auto separator_pot = _separator_tensors_[Arc(other_id, from_id)];
1939 if (separator_pot != nullptr) pot_list.insert(separator_pot);
1940 }
1941 }
1942
1943 // get the set of variables that need be removed from the tensors
1944 const NodeSet& from_clique = _JT_->clique(from_id);
1945 const NodeSet& separator = _JT_->separator(from_id, to_id);
1946 gum::VariableSet del_vars(from_clique.size());
1947 gum::VariableSet kept_vars(separator.size());
1948 const auto& bn = this->BN();
1949
1950 for (const auto node: from_clique) {
1951 if (!separator.contains(node)) {
1952 del_vars.insert(&(bn.variable(node)));
1953 } else {
1954 kept_vars.insert(&(bn.variable(node)));
1955 }
1956 }
1957
1958 // pot_list now contains all the tensors to multiply and marginalize
1959 // => combine the messages
1960 const IScheduleMultiDim* new_pot = _marginalizeOut_(pot_list, del_vars, kept_vars);
1961
1962 // keep track of the newly created tensor
1963 const Arc arc(from_id, to_id);
1964 if (!pot_list.exists(new_pot)) {
1965 if (!_arc_to_created_tensors_.exists(arc)) { _arc_to_created_tensors_.insert(arc, new_pot); }
1966 }
1967
1968 _separator_tensors_[arc] = new_pot;
1969 _messages_computed_[arc] = true;
1970 }
1971
1972 // performs a whole inference
1973 template < typename GUM_SCALAR >
1974 INLINE void ShaferShenoyInference< GUM_SCALAR >::makeInference_() {
1975 if (_use_schedules_) {
1976 Schedule schedule;
1977
1978 // collect messages for all single targets
1979 for (const auto node: this->targets()) {
1980 // perform only collects in the join tree for nodes that have
1981 // not received hard evidence (those that received hard evidence were
1982 // not included into the join tree for speed-up reasons)
1983 if (_graph_.exists(node)) {
1984 _collectMessage_(schedule, _node_to_clique_[node], _node_to_clique_[node]);
1985 }
1986 }
1987
1988 // collect messages for all set targets
1989 // by parsing _joint_target_to_clique_, we ensure that the cliques that
1990 // are referenced belong to the join tree (even if some of the nodes in
1991 // their associated joint_target do not belong to _graph_)
1992 for (const auto& set: _joint_target_to_clique_)
1993 _collectMessage_(schedule, set.second, set.second);
1994
1995 // really perform the computations
1996 this->scheduler().execute(schedule);
1997 } else {
1998 // collect messages for all single targets
1999 for (const auto node: this->targets()) {
2000 // perform only collects in the join tree for nodes that have
2001 // not received hard evidence (those that received hard evidence were
2002 // not included into the join tree for speed-up reasons)
2003 if (_graph_.exists(node)) {
2004 _collectMessage_(_node_to_clique_[node], _node_to_clique_[node]);
2005 }
2006 }
2007
2008 // collect messages for all set targets
2009 // by parsing _joint_target_to_clique_, we ensure that the cliques that
2010 // are referenced belong to the join tree (even if some of the nodes in
2011 // their associated joint_target do not belong to _graph_)
2012 for (const auto& set: _joint_target_to_clique_)
2013 _collectMessage_(set.second, set.second);
2014 }
2015 }
2016
2018 template < typename GUM_SCALAR >
2019 Tensor< GUM_SCALAR >*
2020 ShaferShenoyInference< GUM_SCALAR >::unnormalizedJointPosterior_(NodeId id) {
2021 if (_use_schedules_) {
2022 Schedule schedule;
2023 return _unnormalizedJointPosterior_(schedule, id);
2024 } else {
2025 return _unnormalizedJointPosterior_(id);
2026 }
2027 }
2028
2030 template < typename GUM_SCALAR >
2031 Tensor< GUM_SCALAR >*
2032 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(Schedule& schedule,
2033 NodeId id) {
2034 const auto& bn = this->BN();
2035
2036 // hard evidence do not belong to the join tree
2037 // # TODO: check for sets of inconsistent hard evidence
2038 if (this->hardEvidenceNodes().contains(id)) {
2039 return new Tensor< GUM_SCALAR >(*(this->evidence()[id]));
2040 }
2041
2042 auto& scheduler = this->scheduler();
2043
2044 // if we still need to perform some inference task, do it (this should
2045 // already have been done by makeInference_)
2046 const NodeId clique_of_id = _node_to_clique_[id];
2047 _collectMessage_(schedule, clique_of_id, clique_of_id);
2048
2049 // now we just need to create the product of the tensors of the clique
2050 // containing id with the messages received by this clique and
2051 // marginalize out all variables except id
2052 _ScheduleMultiDimSet_ pot_list;
2053 if (_clique_ss_tensor_[clique_of_id] != nullptr)
2054 pot_list.insert(_clique_ss_tensor_[clique_of_id]);
2055
2056 // add the messages sent by adjacent nodes to targetClique
2057 for (const auto other: _JT_->neighbours(clique_of_id))
2058 pot_list.insert(_separator_tensors_[Arc(other, clique_of_id)]);
2059
2060 // get the set of variables that need be removed from the tensors
2061 const NodeSet& nodes = _JT_->clique(clique_of_id);
2062 gum::VariableSet kept_vars{&(bn.variable(id))};
2063 gum::VariableSet del_vars(nodes.size());
2064 for (const auto node: nodes) {
2065 if (node != id) del_vars.insert(&(bn.variable(node)));
2066 }
2067
2068 // pot_list now contains all the tensors to multiply and marginalize
2069 // => combine the messages
2070 auto resulting_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2071 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2072 _marginalizeOut_(schedule, pot_list, del_vars, kept_vars)));
2073 Tensor< GUM_SCALAR >* joint = nullptr;
2074
2075 scheduler.execute(schedule);
2076
2077 // if pot already existed, create a copy, so that we can put it into
2078 // the _target_posteriors_ property
2079 if (pot_list.exists(resulting_pot)) {
2080 joint = new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2081 } else {
2082 joint = resulting_pot->exportMultiDim();
2083 }
2084
2085 // check that the joint posterior is different from a 0 vector: this would
2086 // indicate that some hard evidence are not compatible (their joint
2087 // probability is equal to 0)
2088 bool nonzero_found = false;
2089 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2090 if (joint->get(inst)) {
2091 nonzero_found = true;
2092 break;
2093 }
2094 }
2095 if (!nonzero_found) {
2096 // remove joint from memory to avoid memory leaks
2097 delete joint;
2099 "some evidence entered into the Bayes "
2100 "net are incompatible (their joint proba = 0)");
2101 }
2102 return joint;
2103 }
2104
2106 template < typename GUM_SCALAR >
2107 Tensor< GUM_SCALAR >*
2108 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(NodeId id) {
2109 const auto& bn = this->BN();
2110
2111 // hard evidence do not belong to the join tree
2112 // # TODO: check for sets of inconsistent hard evidence
2113 if (this->hardEvidenceNodes().contains(id)) {
2114 return new Tensor< GUM_SCALAR >(*(this->evidence()[id]));
2115 }
2116
2117 // if we still need to perform some inference task, do it (this should
2118 // already have been done by makeInference_)
2119 NodeId clique_of_id = _node_to_clique_[id];
2120 _collectMessage_(clique_of_id, clique_of_id);
2121
2122 // now we just need to create the product of the tensors of the clique
2123 // containing id with the messages received by this clique and
2124 // marginalize out all variables except id
2125 _ScheduleMultiDimSet_ pot_list;
2126 if (_clique_ss_tensor_[clique_of_id] != nullptr)
2127 pot_list.insert(_clique_ss_tensor_[clique_of_id]);
2128
2129 // add the messages sent by adjacent nodes to targetClique
2130 for (const auto other: _JT_->neighbours(clique_of_id))
2131 pot_list.insert(_separator_tensors_[Arc(other, clique_of_id)]);
2132
2133 // get the set of variables that need be removed from the tensors
2134 const NodeSet& nodes = _JT_->clique(clique_of_id);
2135 gum::VariableSet kept_vars{&(bn.variable(id))};
2136 gum::VariableSet del_vars(nodes.size());
2137 for (const auto node: nodes) {
2138 if (node != id) del_vars.insert(&(bn.variable(node)));
2139 }
2140
2141 // pot_list now contains all the tensors to multiply and marginalize
2142 // => combine the messages
2143 auto resulting_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2144 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2145 _marginalizeOut_(pot_list, del_vars, kept_vars)));
2146 Tensor< GUM_SCALAR >* joint = nullptr;
2147
2148 // if pot already existed, create a copy, so that we can put it into
2149 // the _target_posteriors_ property
2150 if (pot_list.exists(resulting_pot)) {
2151 joint = new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2152 } else {
2153 joint = resulting_pot->exportMultiDim();
2154 delete resulting_pot;
2155 }
2156
2157 // check that the joint posterior is different from a 0 vector: this would
2158 // indicate that some hard evidence are not compatible (their joint
2159 // probability is equal to 0)
2160 bool nonzero_found = false;
2161 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2162 if (joint->get(inst)) {
2163 nonzero_found = true;
2164 break;
2165 }
2166 }
2167 if (!nonzero_found) {
2168 // remove joint from memory to avoid memory leaks
2169 delete joint;
2171 "some evidence entered into the Bayes "
2172 "net are incompatible (their joint proba = 0)");
2173 }
2174 return joint;
2175 }
2176
2178 template < typename GUM_SCALAR >
2179 const Tensor< GUM_SCALAR >& ShaferShenoyInference< GUM_SCALAR >::posterior_(NodeId id) {
2180 // check if we have already computed the posterior
2181 if (_target_posteriors_.exists(id)) { return *(_target_posteriors_[id]); }
2182
2183 // compute the joint posterior and normalize
2184 auto joint = unnormalizedJointPosterior_(id);
2185 if (joint->sum() != 1) // hard test for ReadOnly CPT (as aggregator)
2186 joint->normalize();
2187 _target_posteriors_.insert(id, joint);
2188
2189 return *joint;
2190 }
2191
2193 template < typename GUM_SCALAR >
2194 Tensor< GUM_SCALAR >*
2195 ShaferShenoyInference< GUM_SCALAR >::unnormalizedJointPosterior_(const NodeSet& set) {
2196 if (_use_schedules_) {
2197 Schedule schedule;
2198 return _unnormalizedJointPosterior_(schedule, set);
2199 } else {
2200 return _unnormalizedJointPosterior_(set);
2201 }
2202 }
2203
2205 template < typename GUM_SCALAR >
2206 Tensor< GUM_SCALAR >*
2207 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(Schedule& schedule,
2208 const NodeSet& set) {
2209 // hard evidence do not belong to the join tree, so extract the nodes
2210 // from targets that are not hard evidence
2211 NodeSet targets = set, hard_ev_nodes;
2212 for (const auto node: this->hardEvidenceNodes()) {
2213 if (targets.contains(node)) {
2214 targets.erase(node);
2215 hard_ev_nodes.insert(node);
2216 }
2217 }
2218
2219 auto& scheduler = this->scheduler();
2220
2221 // if all the nodes have received hard evidence, then compute the
2222 // joint posterior directly by multiplying the hard evidence tensors
2223 const auto& evidence = this->evidence();
2224 if (targets.empty()) {
2225 if (set.size() == 1) {
2226 return new Tensor< GUM_SCALAR >(*evidence[*set.begin()]);
2227 } else {
2228 _ScheduleMultiDimSet_ pot_list;
2229 for (const auto node: set) {
2230 auto new_pot_ev = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[node], false);
2231 pot_list.insert(new_pot_ev);
2232 }
2233
2234 // combine all the tensors of the nodes in set
2235 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2236 const IScheduleMultiDim* pot = fast_combination.schedule(schedule, pot_list);
2237 auto schedule_pot = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2238 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(pot));
2239 scheduler.execute(schedule);
2240 auto result = schedule_pot->exportMultiDim();
2241
2242 return result;
2243 }
2244 }
2245
2246
2247 // if we still need to perform some inference task, do it: so, first,
2248 // determine the clique on which we should perform collect to compute
2249 // the unnormalized joint posterior of a set of nodes containing "targets"
2250 NodeId clique_of_set;
2251 try {
2252 clique_of_set = _joint_target_to_clique_[set];
2253 } catch (NotFound const&) {
2254 // here, the precise set of targets does not belong to the set of targets
2255 // defined by the user. So we will try to find a clique in the junction
2256 // tree that contains "targets":
2257
2258 // 1/ we should check that all the nodes belong to the join tree
2259 for (const auto node: targets) {
2260 if (!_graph_.exists(node)) {
2262 "The variable " << this->BN().variable(node).name() << "(" << node
2263 << ") does not belong to this optimized inference.")
2264 }
2265 }
2266
2267 // 2/ the clique created by the first eliminated node among target is the
2268 // one we are looking for
2269 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
2270
2271 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
2272 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
2273 elim_order.insert(JT_elim_order[i], (int)i);
2274 NodeId first_eliminated_node = *(targets.begin());
2275 int elim_number = elim_order[first_eliminated_node];
2276 for (const auto node: targets) {
2277 if (elim_order[node] < elim_number) {
2278 elim_number = elim_order[node];
2279 first_eliminated_node = node;
2280 }
2281 }
2282
2283 clique_of_set = _triangulation_->createdJunctionTreeClique(first_eliminated_node);
2284
2285
2286 // 3/ check that clique_of_set contains the all the nodes in the target
2287 const NodeSet& clique_nodes = _JT_->clique(clique_of_set);
2288 for (const auto node: targets) {
2289 if (!clique_nodes.contains(node)) {
2291 this->BN().names(set) << "(" << set << ")"
2292 << " is not addressable in this optimized inference.")
2293 }
2294 }
2295
2296 // add the discovered clique to _joint_target_to_clique_
2297 _joint_target_to_clique_.insert(set, clique_of_set);
2298 }
2299
2300 // now perform a collect on the clique
2301 _collectMessage_(schedule, clique_of_set, clique_of_set);
2302
2303 // now we just need to create the product of the tensors of the clique
2304 // containing set with the messages received by this clique and
2305 // marginalize out all variables except set
2306 _ScheduleMultiDimSet_ pot_list;
2307 if (_clique_ss_tensor_[clique_of_set] != nullptr) {
2308 auto pot = _clique_ss_tensor_[clique_of_set];
2309 if (!schedule.existsScheduleMultiDim(pot->id())) schedule.emplaceScheduleMultiDim(*pot);
2310 pot_list.insert(_clique_ss_tensor_[clique_of_set]);
2311 }
2312
2313 // add the messages sent by adjacent nodes to targetClique
2314 for (const auto other: _JT_->neighbours(clique_of_set)) {
2315 const auto pot = _separator_tensors_[Arc(other, clique_of_set)];
2316 if (pot != nullptr) pot_list.insert(pot);
2317 }
2318
2319
2320 // get the set of variables that need be removed from the tensors
2321 const NodeSet& nodes = _JT_->clique(clique_of_set);
2322 gum::VariableSet del_vars(nodes.size());
2323 gum::VariableSet kept_vars(targets.size());
2324 const auto& bn = this->BN();
2325 for (const auto node: nodes) {
2326 if (!targets.contains(node)) {
2327 del_vars.insert(&(bn.variable(node)));
2328 } else {
2329 kept_vars.insert(&(bn.variable(node)));
2330 }
2331 }
2332
2333 // pot_list now contains all the tensors to multiply and marginalize
2334 // => combine the messages
2335 const IScheduleMultiDim* new_pot = _marginalizeOut_(schedule, pot_list, del_vars, kept_vars);
2336 scheduler.execute(schedule);
2337 ScheduleMultiDim< Tensor< GUM_SCALAR > >* resulting_pot
2338 = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2339 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(new_pot));
2340
2341 // if pot already existed, create a copy, so that we can put it into
2342 // the _target_posteriors_ property
2343 Tensor< GUM_SCALAR >* joint = nullptr;
2344 if (pot_list.exists(resulting_pot)) {
2345 joint = new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2346 } else {
2347 joint = resulting_pot->exportMultiDim();
2348 }
2349
2350 // check that the joint posterior is different from a 0 vector: this would
2351 // indicate that some hard evidence are not compatible
2352 bool nonzero_found = false;
2353 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2354 if ((*joint)[inst]) {
2355 nonzero_found = true;
2356 break;
2357 }
2358 }
2359 if (!nonzero_found) {
2360 // remove joint from memory to avoid memory leaks
2361 delete joint;
2363 "some evidence entered into the Bayes "
2364 "net are incompatible (their joint proba = 0)");
2365 }
2366
2367 // if there exists some nodes in the set of targets that received hard evidence,
2368 // then the joint Tensor needs be multiplied by the evidence tensors of
2369 // these nodes
2370 if (!hard_ev_nodes.empty()) {
2371 _TensorSet_ pot_list;
2372 pot_list.insert(joint);
2373 const auto& hard_evidence = this->evidence();
2374 for (const auto node: hard_ev_nodes)
2375 pot_list.insert(hard_evidence[node]);
2376 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > combine(_combination_op_);
2377 Tensor< GUM_SCALAR >* new_joint = combine.execute(pot_list);
2378 delete joint;
2379 joint = new_joint;
2380 }
2381
2382 return joint;
2383 }
2384
2386 template < typename GUM_SCALAR >
2387 Tensor< GUM_SCALAR >*
2388 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(const NodeSet& set) {
2389 // hard evidence do not belong to the join tree, so extract the nodes
2390 // from targets that are not hard evidence
2391 NodeSet targets = set, hard_ev_nodes;
2392 for (const auto node: this->hardEvidenceNodes()) {
2393 if (targets.contains(node)) {
2394 targets.erase(node);
2395 hard_ev_nodes.insert(node);
2396 }
2397 }
2398
2399 // if all the nodes have received hard evidence, then compute the
2400 // joint posterior directly by multiplying the hard evidence tensors
2401 const auto& evidence = this->evidence();
2402 if (targets.empty()) {
2403 if (set.size() == 1) {
2404 return new Tensor< GUM_SCALAR >(*evidence[*set.begin()]);
2405 } else {
2406 _TensorSet_ pot_list;
2407 for (const auto node: set) {
2408 pot_list.insert(evidence[node]);
2409 }
2410
2411 // combine all the tensors of the nodes in set
2412 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2413 const Tensor< GUM_SCALAR >* pot = fast_combination.execute(pot_list);
2414
2415 return const_cast< Tensor< GUM_SCALAR >* >(pot);
2416 }
2417 }
2418
2419
2420 // if we still need to perform some inference task, do it: so, first,
2421 // determine the clique on which we should perform collect to compute
2422 // the unnormalized joint posterior of a set of nodes containing "targets"
2423 NodeId clique_of_set;
2424 try {
2425 clique_of_set = _joint_target_to_clique_[set];
2426 } catch (NotFound const&) {
2427 // here, the precise set of targets does not belong to the set of targets
2428 // defined by the user. So we will try to find a clique in the junction
2429 // tree that contains "targets":
2430
2431 // 1/ we should check that all the nodes belong to the join tree
2432 for (const auto node: targets) {
2433 if (!_graph_.exists(node)) {
2435 node << " cannot be a query in the optimized inference (w.r.t the declared "
2436 "targets/evidence)")
2437 }
2438 }
2439
2440 // 2/ the clique created by the first eliminated node among target is the
2441 // one we are looking for
2442 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
2443
2444 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
2445 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
2446 elim_order.insert(JT_elim_order[i], (int)i);
2447 NodeId first_eliminated_node = *(targets.begin());
2448 int elim_number = elim_order[first_eliminated_node];
2449 for (const auto node: targets) {
2450 if (elim_order[node] < elim_number) {
2451 elim_number = elim_order[node];
2452 first_eliminated_node = node;
2453 }
2454 }
2455
2456 clique_of_set = _triangulation_->createdJunctionTreeClique(first_eliminated_node);
2457
2458 // 3/ check that clique_of_set contains the all the nodes in the target
2459 const NodeSet& clique_nodes = _JT_->clique(clique_of_set);
2460 for (const auto node: targets) {
2461 if (!clique_nodes.contains(node)) {
2463 this->BN().names(set) << "(" << set << ")"
2464 << " is not addressable in this optimized inference.")
2465 }
2466 }
2467
2468 // add the discovered clique to _joint_target_to_clique_
2469 _joint_target_to_clique_.insert(set, clique_of_set);
2470 }
2471
2472 // now perform a collect on the clique
2473 _collectMessage_(clique_of_set, clique_of_set);
2474
2475 // now we just need to create the product of the tensors of the clique
2476 // containing set with the messages received by this clique and
2477 // marginalize out all variables except set
2478 _ScheduleMultiDimSet_ pot_list;
2479 if (_clique_ss_tensor_[clique_of_set] != nullptr) {
2480 auto pot = _clique_ss_tensor_[clique_of_set];
2481 if (pot != nullptr) pot_list.insert(_clique_ss_tensor_[clique_of_set]);
2482 }
2483
2484 // add the messages sent by adjacent nodes to targetClique
2485 for (const auto other: _JT_->neighbours(clique_of_set)) {
2486 const auto pot = _separator_tensors_[Arc(other, clique_of_set)];
2487 if (pot != nullptr) pot_list.insert(pot);
2488 }
2489
2490 // get the set of variables that need be removed from the tensors
2491 const NodeSet& nodes = _JT_->clique(clique_of_set);
2492 gum::VariableSet del_vars(nodes.size());
2493 gum::VariableSet kept_vars(targets.size());
2494 const auto& bn = this->BN();
2495 for (const auto node: nodes) {
2496 if (!targets.contains(node)) {
2497 del_vars.insert(&(bn.variable(node)));
2498 } else {
2499 kept_vars.insert(&(bn.variable(node)));
2500 }
2501 }
2502
2503 // pot_list now contains all the tensors to multiply and marginalize
2504 // => combine the messages
2505 const IScheduleMultiDim* new_pot = _marginalizeOut_(pot_list, del_vars, kept_vars);
2506 ScheduleMultiDim< Tensor< GUM_SCALAR > >* resulting_pot
2507 = const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(
2508 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR > >* >(new_pot));
2509
2510 // if pot already existed, create a copy, so that we can put it into
2511 // the _target_posteriors_ property
2512 Tensor< GUM_SCALAR >* joint = nullptr;
2513 if (pot_list.exists(resulting_pot)) {
2514 joint = new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2515 } else {
2516 joint = resulting_pot->exportMultiDim();
2517 delete new_pot;
2518 }
2519
2520 // check that the joint posterior is different from a 0 vector: this would
2521 // indicate that some hard evidence are not compatible
2522 bool nonzero_found = false;
2523 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2524 if ((*joint)[inst]) {
2525 nonzero_found = true;
2526 break;
2527 }
2528 }
2529 if (!nonzero_found) {
2530 // remove joint from memory to avoid memory leaks
2531 delete joint;
2533 "some evidence entered into the Bayes "
2534 "net are incompatible (their joint proba = 0)");
2535 }
2536
2537 // if there exists some nodes in the set of targets that received hard evidence,
2538 // then the joint Tensor needs be multiplied by the evidence tensors of
2539 // these nodes
2540 if (!hard_ev_nodes.empty()) {
2541 _TensorSet_ pot_list;
2542 pot_list.insert(joint);
2543 const auto& hard_evidence = this->evidence();
2544 for (const auto node: hard_ev_nodes)
2545 pot_list.insert(hard_evidence[node]);
2546 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > combine(_combination_op_);
2547 Tensor< GUM_SCALAR >* new_joint = combine.execute(pot_list);
2548 delete joint;
2549 joint = new_joint;
2550 }
2551
2552 return joint;
2553 }
2554
2556 template < typename GUM_SCALAR >
2557 const Tensor< GUM_SCALAR >&
2558 ShaferShenoyInference< GUM_SCALAR >::jointPosterior_(const NodeSet& set) {
2559 // check if we have already computed the posterior
2560 if (_joint_target_posteriors_.exists(set)) { return *(_joint_target_posteriors_[set]); }
2561
2562 // compute the joint posterior and normalize
2563 auto joint = unnormalizedJointPosterior_(set);
2564 joint->normalize();
2565 _joint_target_posteriors_.insert(set, joint);
2566
2567 return *joint;
2568 }
2569
2571 template < typename GUM_SCALAR >
2572 const Tensor< GUM_SCALAR >&
2573 ShaferShenoyInference< GUM_SCALAR >::jointPosterior_(const NodeSet& wanted_target,
2574 const NodeSet& declared_target) {
2575 // check if we have already computed the posterior of wanted_target
2576 if (_joint_target_posteriors_.exists(wanted_target))
2577 return *(_joint_target_posteriors_[wanted_target]);
2578
2579 // here, we will have to compute the posterior of declared_target and
2580 // marginalize out all the variables that do not belong to wanted_target
2581
2582 // check if we have already computed the posterior of declared_target
2583 if (!_joint_target_posteriors_.exists(declared_target)) { jointPosterior_(declared_target); }
2584
2585 // marginalize out all the variables that do not belong to wanted_target
2586 const auto& bn = this->BN();
2587 gum::VariableSet del_vars;
2588 for (const auto node: declared_target)
2589 if (!wanted_target.contains(node)) del_vars.insert(&(bn.variable(node)));
2590 auto pot
2591 = new Tensor< GUM_SCALAR >(_joint_target_posteriors_[declared_target]->sumOut(del_vars));
2592
2593 // save the result into the cache
2594 _joint_target_posteriors_.insert(wanted_target, pot);
2595
2596 return *pot;
2597 }
2598
2599 template < typename GUM_SCALAR >
2600 GUM_SCALAR ShaferShenoyInference< GUM_SCALAR >::evidenceProbability() {
2601 // here, we should check that _find_relevant_tensor_type_ is equal to
2602 // FIND_ALL. Otherwise, the computations could be wrong.
2603 RelevantTensorsFinderType old_relevant_type = _find_relevant_tensor_type_;
2604
2605 // if the relevant tensors finder is not equal to FIND_ALL, all the
2606 // current computations may lead to incorrect results, so we shall
2607 // discard them
2608 if (old_relevant_type != RelevantTensorsFinderType::FIND_ALL) {
2609 _find_relevant_tensor_type_ = RelevantTensorsFinderType::FIND_ALL;
2610 _is_new_jt_needed_ = true;
2611 this->setOutdatedStructureState_();
2612 }
2613
2614 // perform inference in each connected component
2615 this->makeInference();
2616
2617 // for each connected component, select a variable X and compute the
2618 // joint probability of X and evidence e. Then marginalize-out X to get
2619 // p(e) in this connected component. Finally, multiply all the p(e) that
2620 // we got and the elements in _constants_. The result is the probability
2621 // of evidence
2622
2623 GUM_SCALAR prob_ev = 1;
2624 for (const auto root: _roots_) {
2625 // get a node in the clique
2626 NodeId node = *(_JT_->clique(root).begin());
2627 Tensor< GUM_SCALAR >* tmp = unnormalizedJointPosterior_(node);
2628 prob_ev *= tmp->sum();
2629 delete tmp;
2630 }
2631
2632 for (const auto& projected_cpt: _constants_)
2633 prob_ev *= projected_cpt.second;
2634
2635 // put back the relevant tensor type selected by the user
2636 _find_relevant_tensor_type_ = old_relevant_type;
2637
2638 return prob_ev;
2639 }
2640
2641} /* namespace gum */
2642
2643#endif // DOXYGEN_SHOULD_SKIP_THIS
The BayesBall algorithm (as described by Schachter).
Detect barren nodes for inference in Bayesian networks.
An algorithm for converting a join tree into a binary join tree.
Exception : a similar element already exists.
<agrum/BN/inference/evidenceInference.h>
Exception : fatal (unknown ?) error.
Class representing the minimal interface for Bayesian network with no numerical data.
Definition IBayesNet.h:75
Exception : several evidence are incompatible together (proba=0).
Exception: at least one argument passed to a function is not what was expected.
<agrum/BN/inference/jointTargetedInference.h>
Exception : the element we looked for cannot be found.
Size size() const noexcept
Returns the number of elements in the set.
Definition set_tpl.h:636
iterator_safe beginSafe() const
The usual safe begin iterator to parse the set.
Definition set_tpl.h:414
const iterator_safe & endSafe() const noexcept
The usual safe end iterator to parse the set.
Definition set_tpl.h:426
bool exists(const Key &k) const
Indicates whether a given elements belong to the set.
Definition set_tpl.h:533
void insert(const Key &k)
Inserts a new element into the set.
Definition set_tpl.h:539
void erase(const Key &k)
Erases an element from the set.
Definition set_tpl.h:582
ShaferShenoyInference(const IBayesNet< GUM_SCALAR > *BN, RelevantTensorsFinderType=RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS, FindBarrenNodesType barren_type=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
Exception : a looked-for element could not be found.
d-separation analysis (as described in Koller & Friedman 2009)
#define GUM_ERROR(type, msg)
Definition exceptions.h:72
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
Header files of gum::Instantiation.
gum is the global namespace for all aGrUM entities
Definition agrum.h:46
FindBarrenNodesType
type of algorithm to determine barren nodes
Set< const DiscreteVariable * > VariableSet
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
RelevantTensorsFinderType
type of algorithm for determining the relevant tensors for combinations using some d-separation analy...