51#ifndef DOXYGEN_SHOULD_SKIP_THIS
64 template <
typename GUM_SCALAR >
69 bool use_binary_join_tree) :
71 _use_binary_join_tree_(use_binary_join_tree) {
74 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_;
75 setRelevantTensorsFinderType(relevant_type);
76 setFindBarrenNodesType(barren_type);
79 _triangulation_ =
new DefaultTriangulation;
82 GUM_CONSTRUCTOR(ShaferShenoyInference);
86 template <
typename GUM_SCALAR >
87 INLINE ShaferShenoyInference< GUM_SCALAR >::~ShaferShenoyInference() {
89 for (
const auto& pot: _arc_to_created_tensors_)
97 for (
auto pot: _clique_ss_tensor_) {
98 if (_clique_tensors_[pot.first].size() > 1)
delete pot.second;
101 for (
auto potset: _clique_tensors_) {
102 for (
auto pot: potset.second)
107 for (
const auto& pot: _target_posteriors_)
109 for (
const auto& pot: _joint_target_posteriors_)
113 if (_JT_ !=
nullptr)
delete _JT_;
114 if (_junctionTree_ !=
nullptr)
delete _junctionTree_;
115 delete _triangulation_;
118 GUM_DESTRUCTOR(ShaferShenoyInference);
122 template <
typename GUM_SCALAR >
123 void ShaferShenoyInference< GUM_SCALAR >::setTriangulation(
124 const Triangulation& new_triangulation) {
125 delete _triangulation_;
126 _triangulation_ = new_triangulation.newFactory();
127 _is_new_jt_needed_ =
true;
128 this->setOutdatedStructureState_();
132 template <
typename GUM_SCALAR >
133 INLINE
const JoinTree* ShaferShenoyInference< GUM_SCALAR >::joinTree() {
134 if (_is_new_jt_needed_) _createNewJT_();
140 template <
typename GUM_SCALAR >
141 INLINE
const JunctionTree* ShaferShenoyInference< GUM_SCALAR >::junctionTree() {
142 if (_is_new_jt_needed_) _createNewJT_();
144 return _junctionTree_;
148 template <
typename GUM_SCALAR >
149 void ShaferShenoyInference< GUM_SCALAR >::setRelevantTensorsFinderType(
150 RelevantTensorsFinderType type) {
151 if (type != _find_relevant_tensor_type_) {
153 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
154 _findRelevantTensors_
155 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_;
158 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES :
159 _findRelevantTensors_
160 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation_;
163 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 :
164 _findRelevantTensors_
165 = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation3_;
168 case RelevantTensorsFinderType::FIND_ALL :
169 _findRelevantTensors_ = &ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsGetAll_;
174 "setRelevantTensorsFinderType for type " << (
unsigned int)type
175 <<
" is not implemented yet");
178 _find_relevant_tensor_type_ = type;
182 _invalidateAllMessages_();
187 template <
typename GUM_SCALAR >
188 INLINE
void ShaferShenoyInference< GUM_SCALAR >::_setProjectionFunction_(
189 Tensor< GUM_SCALAR > (*proj)(
const Tensor< GUM_SCALAR >&,
const gum::VariableSet&)) {
190 _projection_op_ = proj;
194 _invalidateAllMessages_();
198 template <
typename GUM_SCALAR >
199 INLINE
void ShaferShenoyInference< GUM_SCALAR >::_setCombinationFunction_(
200 Tensor< GUM_SCALAR > (*comb)(
const Tensor< GUM_SCALAR >&,
const Tensor< GUM_SCALAR >&)) {
201 _combination_op_ = comb;
205 _invalidateAllMessages_();
209 template <
typename GUM_SCALAR >
210 void ShaferShenoyInference< GUM_SCALAR >::_invalidateAllMessages_() {
212 for (
auto& pot: _separator_tensors_)
213 pot.second =
nullptr;
215 for (
auto& mess_computed: _messages_computed_)
216 mess_computed.second =
false;
219 for (
const auto& pot: _arc_to_created_tensors_)
220 if (pot.second !=
nullptr)
delete pot.second;
221 _arc_to_created_tensors_.clear();
224 for (
const auto& pot: _target_posteriors_)
226 _target_posteriors_.clear();
227 for (
const auto& pot: _joint_target_posteriors_)
229 _joint_target_posteriors_.clear();
232 if (this->isInferenceReady() || this->isInferenceDone()) this->setOutdatedTensorsState_();
236 template <
typename GUM_SCALAR >
237 void ShaferShenoyInference< GUM_SCALAR >::setFindBarrenNodesType(FindBarrenNodesType type) {
238 if (type != _barren_nodes_type_) {
242 case FindBarrenNodesType::FIND_BARREN_NODES :
243 case FindBarrenNodesType::FIND_NO_BARREN_NODES :
break;
247 "setFindBarrenNodesType for type " << (
unsigned int)type
248 <<
" is not implemented yet");
251 _barren_nodes_type_ = type;
254 this->setOutdatedStructureState_();
259 template <
typename GUM_SCALAR >
260 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onEvidenceAdded_(
const NodeId
id,
261 bool isHardEvidence) {
265 if (isHardEvidence || !_graph_.exists(
id)) _is_new_jt_needed_ =
true;
268 _evidence_changes_.insert(
id, EvidenceChangeType::EVIDENCE_ADDED);
274 _evidence_changes_[id] = EvidenceChangeType::EVIDENCE_MODIFIED;
280 template <
typename GUM_SCALAR >
281 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onEvidenceErased_(
const NodeId
id,
282 bool isHardEvidence) {
285 if (isHardEvidence) _is_new_jt_needed_ =
true;
288 _evidence_changes_.insert(
id, EvidenceChangeType::EVIDENCE_ERASED);
295 if (_evidence_changes_[
id] == EvidenceChangeType::EVIDENCE_ADDED)
296 _evidence_changes_.erase(
id);
297 else _evidence_changes_[id] = EvidenceChangeType::EVIDENCE_ERASED;
303 template <
typename GUM_SCALAR >
304 void ShaferShenoyInference< GUM_SCALAR >::onAllEvidenceErased_(
bool has_hard_evidence) {
305 if (has_hard_evidence || !this->hardEvidenceNodes().empty()) _is_new_jt_needed_ =
true;
307 for (
const auto node: this->softEvidenceNodes()) {
309 _evidence_changes_.insert(node, EvidenceChangeType::EVIDENCE_ERASED);
316 if (_evidence_changes_[node] == EvidenceChangeType::EVIDENCE_ADDED)
317 _evidence_changes_.erase(node);
318 else _evidence_changes_[node] = EvidenceChangeType::EVIDENCE_ERASED;
325 template <
typename GUM_SCALAR >
326 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onEvidenceChanged_(
const NodeId
id,
327 bool hasChangedSoftHard) {
328 if (hasChangedSoftHard) _is_new_jt_needed_ =
true;
331 _evidence_changes_.insert(
id, EvidenceChangeType::EVIDENCE_MODIFIED);
341 template <
typename GUM_SCALAR >
342 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onModelChanged_(
const GraphicalModel* bn) {}
345 template <
typename GUM_SCALAR >
346 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onMarginalTargetAdded_(
const NodeId
id) {
351 if (!_graph_.exists(
id) && !_hard_ev_nodes_.contains(
id)) { _is_new_jt_needed_ =
true; }
355 template <
typename GUM_SCALAR >
356 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onMarginalTargetErased_(
const NodeId
id) {}
359 template <
typename GUM_SCALAR >
360 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onJointTargetAdded_(
const NodeSet& set) {
362 if (_JT_ ==
nullptr) {
363 _is_new_jt_needed_ =
true;
370 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
371 int elim_number = std::numeric_limits< int >::max();
372 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
373 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
374 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
375 elim_order.insert(JT_elim_order[i], (
int)i);
376 NodeSet unobserved_set(set.size());
377 for (
const auto node: set) {
378 if (!_graph_.exists(node)) {
379 if (!_hard_ev_nodes_.contains(node)) {
380 _is_new_jt_needed_ =
true;
384 unobserved_set.insert(node);
385 if (elim_order[node] < elim_number) {
386 elim_number = elim_order[node];
387 first_eliminated_node = node;
392 if (!unobserved_set.empty()) {
396 const auto clique_id = _node_to_clique_[first_eliminated_node];
397 const auto& clique = _JT_->clique(clique_id);
398 for (
const auto node: unobserved_set) {
399 if (!clique.contains(node)) {
400 _is_new_jt_needed_ =
true;
408 template <
typename GUM_SCALAR >
409 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onJointTargetErased_(
const NodeSet& set) {}
412 template <
typename GUM_SCALAR >
413 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onAllMarginalTargetsAdded_() {
414 for (
const auto node: this->BN().dag()) {
419 if (!_graph_.exists(node) && !_hard_ev_nodes_.contains(node)) {
420 _is_new_jt_needed_ =
true;
427 template <
typename GUM_SCALAR >
428 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onAllMarginalTargetsErased_() {}
431 template <
typename GUM_SCALAR >
432 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onAllJointTargetsErased_() {}
435 template <
typename GUM_SCALAR >
436 INLINE
void ShaferShenoyInference< GUM_SCALAR >::onAllTargetsErased_() {}
439 template <
typename GUM_SCALAR >
440 bool ShaferShenoyInference< GUM_SCALAR >::_isNewJTNeeded_()
const {
443 if ((_JT_ ==
nullptr) || _is_new_jt_needed_)
return true;
451 const auto& hard_ev_nodes = this->hardEvidenceNodes();
452 for (
const auto node: this->targets()) {
453 if (!_graph_.exists(node) && !hard_ev_nodes.exists(node))
return true;
457 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
458 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
459 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
460 elim_order.insert(JT_elim_order[i], (
int)i);
463 for (
const auto& joint_target: this->jointTargets()) {
466 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
467 int elim_number = std::numeric_limits< int >::max();
468 unobserved_set.clear();
469 for (
const auto node: joint_target) {
470 if (!_graph_.exists(node)) {
471 if (!hard_ev_nodes.exists(node))
return true;
473 unobserved_set.insert(node);
474 if (elim_order[node] < elim_number) {
475 elim_number = elim_order[node];
476 first_eliminated_node = node;
480 if (!unobserved_set.empty()) {
484 const auto clique_id = _node_to_clique_[first_eliminated_node];
485 const auto& clique = _JT_->clique(clique_id);
486 for (
const auto node: unobserved_set) {
487 if (!clique.contains(node))
return true;
494 for (
const auto& change: _evidence_changes_) {
495 if ((change.second == EvidenceChangeType::EVIDENCE_ADDED) && !_graph_.exists(change.first))
504 template <
typename GUM_SCALAR >
505 void ShaferShenoyInference< GUM_SCALAR >::_createNewJT_() {
521 const auto& bn = this->BN();
523 for (
const auto node: bn.dag())
524 _graph_.addNodeWithId(node);
527 NodeSet target_nodes = this->targets();
528 for (
const auto& nodeset: this->jointTargets()) {
529 target_nodes += nodeset;
537 if ((this->nbrTargets() != bn.dag().size())
538 && (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES)) {
541 if (target_nodes.size() != bn.size()) {
542 BarrenNodesFinder finder(&(bn.dag()));
543 finder.setTargets(&target_nodes);
545 NodeSet evidence_nodes(this->evidence().size());
546 for (
const auto& pair: this->evidence()) {
547 evidence_nodes.insert(pair.first);
549 finder.setEvidence(&evidence_nodes);
551 NodeSet barren_nodes = finder.barrenNodes();
554 for (
const auto node: barren_nodes) {
555 _graph_.eraseNode(node);
563 if (this->nbrTargets() != bn.dag().size()) {
565 bool dsep_analysis =
false;
566 switch (_find_relevant_tensor_type_) {
567 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
568 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES : {
569 BayesBall::requisiteNodes(bn.dag(),
571 this->hardEvidenceNodes(),
572 this->softEvidenceNodes(),
574 dsep_analysis =
true;
577 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 : {
578 dSeparationAlgorithm dsep;
579 dsep.requisiteNodes(bn.dag(),
581 this->hardEvidenceNodes(),
582 this->softEvidenceNodes(),
584 dsep_analysis =
true;
587 case RelevantTensorsFinderType::FIND_ALL :
break;
594 for (
auto iter = _graph_.beginSafe(); iter != _graph_.endSafe(); ++iter) {
595 if (!requisite_nodes.contains(*iter) && !this->hardEvidenceNodes().contains(*iter)) {
596 _graph_.eraseNode(*iter);
603 for (
const auto node: _graph_) {
604 const NodeSet& parents = bn.parents(node);
605 for (
auto iter1 = parents.cbegin(); iter1 != parents.cend(); ++iter1) {
610 if (_graph_.existsNode(*iter1)) {
611 _graph_.addEdge(*iter1, node);
614 for (++iter2; iter2 != parents.cend(); ++iter2) {
619 if (_graph_.existsNode(*iter2)) _graph_.addEdge(*iter1, *iter2);
628 for (
const auto& nodeset: this->jointTargets()) {
629 for (
auto iter1 = nodeset.cbegin(); iter1 != nodeset.cend(); ++iter1) {
631 for (++iter2; iter2 != nodeset.cend(); ++iter2) {
632 _graph_.addEdge(*iter1, *iter2);
638 _hard_ev_nodes_ = this->hardEvidenceNodes();
639 for (
const auto node: _hard_ev_nodes_) {
640 _graph_.eraseNode(node);
647 if (_JT_ !=
nullptr)
delete _JT_;
648 if (_junctionTree_ !=
nullptr)
delete _junctionTree_;
650 _triangulation_->setGraph(&_graph_, &(this->domainSizes()));
651 const JunctionTree& triang_jt = _triangulation_->junctionTree();
652 if (_use_binary_join_tree_) {
653 BinaryJoinTreeConverterDefault bjt_converter;
655 _JT_ =
new CliqueGraph(bjt_converter.convert(triang_jt, this->domainSizes(), emptyset));
657 _JT_ =
new CliqueGraph(triang_jt);
659 _junctionTree_ =
new CliqueGraph(triang_jt);
664 _node_to_clique_.clear();
665 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
666 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
667 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
668 elim_order.insert(JT_elim_order[i], (
int)i);
669 const DAG& dag = bn.dag();
670 for (
const auto node: _graph_) {
672 NodeId first_eliminated_node = node;
673 int elim_number = elim_order[first_eliminated_node];
675 for (
const auto parent: dag.parents(node)) {
676 if (_graph_.existsNode(parent) && (elim_order[parent] < elim_number)) {
677 elim_number = elim_order[parent];
678 first_eliminated_node = parent;
686 _node_to_clique_.insert(node,
687 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
693 for (
const auto node: _hard_ev_nodes_) {
694 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
695 int elim_number = std::numeric_limits< int >::max();
697 for (
const auto parent: dag.parents(node)) {
698 if (_graph_.exists(parent) && (elim_order[parent] < elim_number)) {
699 elim_number = elim_order[parent];
700 first_eliminated_node = parent;
708 if (elim_number != std::numeric_limits< int >::max()) {
709 _node_to_clique_.insert(node,
710 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
715 _joint_target_to_clique_.clear();
716 for (
const auto& set: this->jointTargets()) {
717 NodeId first_eliminated_node = std::numeric_limits< NodeId >::max();
718 int elim_number = std::numeric_limits< int >::max();
722 for (
const auto node: set) {
723 if (!_hard_ev_nodes_.contains(node)) {
726 if (elim_order[node] < elim_number) {
727 elim_number = elim_order[node];
728 first_eliminated_node = node;
733 if (elim_number != std::numeric_limits< int >::max()) {
734 _joint_target_to_clique_.insert(
736 _triangulation_->createdJunctionTreeClique(first_eliminated_node));
741 _computeJoinTreeRoots_();
746 for (
const auto& pot: _clique_ss_tensor_) {
747 if (_clique_tensors_[pot.first].size() > 1)
delete pot.second;
749 _clique_ss_tensor_.clear();
750 for (
const auto& potlist: _clique_tensors_)
751 for (
const auto pot: potlist.second)
753 _clique_tensors_.clear();
756 for (
const auto& pot: _arc_to_created_tensors_)
758 _arc_to_created_tensors_.clear();
763 _node_to_hard_ev_projected_CPTs_.clear();
766 _node_to_soft_evidence_.clear();
770 _ScheduleMultiDimSet_ empty_set;
771 for (
const auto node: *_JT_) {
772 _clique_tensors_.insert(node, empty_set);
773 _clique_ss_tensor_.insert(node,
nullptr);
781 _separator_tensors_.clear();
782 _messages_computed_.clear();
783 for (
const auto& edge: _JT_->edges()) {
784 const Arc arc1(edge.first(), edge.second());
785 _separator_tensors_.insert(arc1,
nullptr);
786 _messages_computed_.insert(arc1,
false);
787 const Arc arc2(edge.second(), edge.first());
788 _separator_tensors_.insert(arc2,
nullptr);
789 _messages_computed_.insert(arc2,
false);
793 for (
const auto& pot: _target_posteriors_)
795 _target_posteriors_.clear();
796 for (
const auto& pot: _joint_target_posteriors_)
798 _joint_target_posteriors_.clear();
803 double overall_size = 0;
804 for (
const auto clique: *_JT_) {
805 double clique_size = 1.0;
806 for (
const auto node: _JT_->clique(clique))
807 clique_size *= this->domainSizes()[node];
808 overall_size += clique_size;
810 _use_schedules_ = (overall_size > _schedule_threshold_);
813 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
814 for (
const auto node: this->softEvidenceNodes()) {
815 if (_node_to_clique_.exists(node)) {
816 auto ev_pot =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*evidence[node],
false);
817 _node_to_soft_evidence_.insert(node, ev_pot);
818 _clique_tensors_[_node_to_clique_[node]].insert(ev_pot);
826 if (_use_schedules_) {
828 _initializeJTCliques_(schedule);
830 _initializeJTCliques_();
835 _evidence_changes_.clear();
836 _is_new_jt_needed_ =
false;
840 template <
typename GUM_SCALAR >
841 void ShaferShenoyInference< GUM_SCALAR >::_initializeJTCliques_() {
842 const auto& bn = this->BN();
843 const DAG& dag = bn.dag();
849 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
850 const NodeProperty< Idx >& hard_evidence = this->hardEvidence();
852 for (
const auto node: dag) {
853 if (_graph_.exists(node) || _hard_ev_nodes_.contains(node)) {
854 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
858 const auto& variables = cpt.variablesSequence();
859 bool graph_contains_nodes =
false;
860 for (
const auto var: variables) {
861 NodeId xnode = bn.nodeId(*var);
862 if (_hard_ev_nodes_.contains(xnode)) hard_nodes.insert(xnode);
863 else if (_graph_.exists(xnode)) graph_contains_nodes =
true;
869 if (hard_nodes.empty()) {
870 auto sched_cpt =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(cpt,
false);
871 _clique_tensors_[_node_to_clique_[node]].insert(sched_cpt);
877 if (hard_nodes.size() == variables.size()) {
878 Instantiation inst(cpt);
879 for (Size i = 0; i < hard_nodes.size(); ++i) {
880 inst.chgVal(*variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
882 _constants_.insert(node, cpt.get(inst));
887 if (!graph_contains_nodes)
continue;
891 _TensorSet_ marg_cpt_set(1 + hard_nodes.size());
892 marg_cpt_set.insert(&cpt);
893 for (
const auto xnode: hard_nodes) {
894 marg_cpt_set.insert(evidence[xnode]);
895 hard_variables.
insert(&(bn.variable(xnode)));
899 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
903 _TensorSet_ new_cpt_list = combine_and_project.execute(marg_cpt_set, hard_variables);
906 if (new_cpt_list.size() != 1) {
907 for (
const auto pot: new_cpt_list)
908 if (!marg_cpt_set.contains(pot))
delete pot;
911 "the projection of a tensor containing " <<
"hard evidence is empty!");
913 auto new_pot =
const_cast< Tensor< GUM_SCALAR >*
>(*(new_cpt_list.begin()));
914 auto projected_pot =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*new_pot));
917 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
918 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
928 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
929 for (
const auto& xpotset: _clique_tensors_) {
930 const auto& potset = xpotset.second;
931 if (potset.size() > 0) {
936 if (potset.size() == 1) {
937 _clique_ss_tensor_[xpotset.first] = *(potset.cbegin());
939 _TensorSet_ p_potset(potset.size());
940 for (
const auto pot: potset)
942 &(
static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(pot)->multiDim()));
944 Tensor< GUM_SCALAR >* joint
945 =
const_cast< Tensor< GUM_SCALAR >*
>(fast_combination.execute(p_potset));
946 _clique_ss_tensor_[xpotset.first]
947 =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*joint));
955 template <
typename GUM_SCALAR >
956 void ShaferShenoyInference< GUM_SCALAR >::_initializeJTCliques_(Schedule& schedule) {
957 const auto& bn = this->BN();
958 const DAG& dag = bn.dag();
964 const NodeProperty< const Tensor< GUM_SCALAR >* >& evidence = this->evidence();
965 const NodeProperty< Idx >& hard_evidence = this->hardEvidence();
967 for (
const auto node: dag) {
968 if (_graph_.exists(node) || _hard_ev_nodes_.contains(node)) {
969 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
973 const auto& variables = cpt.variablesSequence();
974 bool graph_contains_nodes =
false;
975 for (
const auto var: variables) {
976 NodeId xnode = bn.nodeId(*var);
977 if (_hard_ev_nodes_.contains(xnode)) hard_nodes.insert(xnode);
978 else if (_graph_.exists(xnode)) graph_contains_nodes =
true;
984 if (hard_nodes.empty()) {
985 auto sched_cpt =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(cpt,
false);
986 _clique_tensors_[_node_to_clique_[node]].insert(sched_cpt);
992 if (hard_nodes.size() == variables.size()) {
993 Instantiation inst(cpt);
994 for (Size i = 0; i < hard_nodes.size(); ++i) {
995 inst.chgVal(*variables[i], hard_evidence[bn.nodeId(*(variables[i]))]);
997 _constants_.insert(node, cpt.get(inst));
1002 if (!graph_contains_nodes)
continue;
1006 _ScheduleMultiDimSet_ marg_cpt_set(1 + hard_nodes.size());
1007 const IScheduleMultiDim* sched_cpt
1008 = schedule.insertTable< Tensor< GUM_SCALAR > >(cpt,
false);
1009 marg_cpt_set.insert(sched_cpt);
1011 for (
const auto xnode: hard_nodes) {
1012 const IScheduleMultiDim* pot
1013 = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[xnode],
false);
1014 marg_cpt_set.insert(pot);
1015 hard_variables.
insert(&(bn.variable(xnode)));
1019 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1023 _ScheduleMultiDimSet_ new_cpt_list
1024 = combine_and_project.schedule(schedule, marg_cpt_set, hard_variables);
1027 if (new_cpt_list.size() != 1) {
1029 "the projection of a tensor containing " <<
"hard evidence is empty!");
1031 auto projected_pot =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
1032 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
1033 *new_cpt_list.begin()));
1034 const_cast< ScheduleOperator*
>(schedule.scheduleMultiDimCreator(projected_pot))
1035 ->makeResultsPersistent(
true);
1036 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1037 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1042 this->scheduler().execute(schedule);
1049 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1050 for (
const auto& xpotset: _clique_tensors_) {
1051 const auto& potset = xpotset.second;
1052 if (potset.size() > 0) {
1057 if (potset.size() == 1) {
1058 _clique_ss_tensor_[xpotset.first] = *(potset.cbegin());
1061 for (
const auto pot: potset) {
1062 schedule.emplaceScheduleMultiDim(*pot);
1065 auto joint =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
1066 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
1067 fast_combination.schedule(schedule, potset)));
1068 const_cast< ScheduleOperator*
>(schedule.scheduleMultiDimCreator(joint))
1069 ->makeResultsPersistent(
true);
1070 _clique_ss_tensor_[xpotset.first] = joint;
1074 this->scheduler().execute(schedule);
1078 template <
typename GUM_SCALAR >
1079 void ShaferShenoyInference< GUM_SCALAR >::updateOutdatedStructure_() {
1081 if (_isNewJTNeeded_()) {
1087 updateOutdatedTensors_();
1092 template <
typename GUM_SCALAR >
1093 void ShaferShenoyInference< GUM_SCALAR >::_diffuseMessageInvalidations_(
1096 NodeSet& invalidated_cliques) {
1098 invalidated_cliques.insert(to_id);
1101 const Arc arc(from_id, to_id);
1102 bool& message_computed = _messages_computed_[arc];
1103 if (message_computed) {
1104 message_computed =
false;
1105 _separator_tensors_[arc] =
nullptr;
1106 if (_arc_to_created_tensors_.exists(arc)) {
1107 delete _arc_to_created_tensors_[arc];
1108 _arc_to_created_tensors_.erase(arc);
1112 for (
const auto node_id: _JT_->neighbours(to_id)) {
1113 if (node_id != from_id) _diffuseMessageInvalidations_(to_id, node_id, invalidated_cliques);
1120 template <
typename GUM_SCALAR >
1121 void ShaferShenoyInference< GUM_SCALAR >::updateOutdatedTensors_() {
1126 NodeProperty< bool > ss_tensor_to_deallocate(_clique_tensors_.size());
1127 for (
const auto& potset: _clique_tensors_) {
1128 ss_tensor_to_deallocate.insert(potset.first, (potset.second.size() > 1));
1138 NodeSet hard_nodes_changed(_hard_ev_nodes_.size());
1139 for (
const auto node: _hard_ev_nodes_)
1140 if (_evidence_changes_.exists(node)) hard_nodes_changed.insert(node);
1142 NodeSet nodes_with_projected_CPTs_changed;
1143 const auto& bn = this->BN();
1144 for (
auto pot_iter = _node_to_hard_ev_projected_CPTs_.beginSafe();
1145 pot_iter != _node_to_hard_ev_projected_CPTs_.endSafe();
1147 for (
const auto var: bn.cpt(pot_iter.key()).variablesSequence()) {
1148 if (hard_nodes_changed.contains(bn.nodeId(*var))) {
1149 nodes_with_projected_CPTs_changed.insert(pot_iter.key());
1150 delete pot_iter.val();
1151 _clique_tensors_[_node_to_clique_[pot_iter.key()]].erase(pot_iter.val());
1152 _node_to_hard_ev_projected_CPTs_.erase(pot_iter);
1166 NodeSet invalidated_cliques(_JT_->size());
1167 for (
const auto& pair: _evidence_changes_) {
1168 if (_node_to_clique_.exists(pair.first)) {
1169 const auto clique = _node_to_clique_[pair.first];
1170 invalidated_cliques.insert(clique);
1171 for (
const auto neighbor: _JT_->neighbours(clique)) {
1172 _diffuseMessageInvalidations_(clique, neighbor, invalidated_cliques);
1179 for (
const auto node: nodes_with_projected_CPTs_changed) {
1180 const auto clique = _node_to_clique_[node];
1181 invalidated_cliques.insert(clique);
1182 for (
const auto neighbor: _JT_->neighbours(clique)) {
1183 _diffuseMessageInvalidations_(clique, neighbor, invalidated_cliques);
1189 for (
const auto clique: invalidated_cliques) {
1190 if (ss_tensor_to_deallocate[clique]) {
1191 delete _clique_ss_tensor_[clique];
1192 _clique_ss_tensor_[clique] =
nullptr;
1201 for (
auto iter = _target_posteriors_.beginSafe(); iter != _target_posteriors_.endSafe();
1203 if (_graph_.exists(iter.key())
1204 && (invalidated_cliques.exists(_node_to_clique_[iter.key()]))) {
1206 _target_posteriors_.erase(iter);
1211 for (
auto iter = _target_posteriors_.beginSafe(); iter != _target_posteriors_.endSafe();
1213 if (hard_nodes_changed.contains(iter.key())) {
1215 _target_posteriors_.erase(iter);
1221 for (
auto iter = _joint_target_posteriors_.beginSafe();
1222 iter != _joint_target_posteriors_.endSafe();
1224 if (invalidated_cliques.exists(_joint_target_to_clique_[iter.key()])) {
1226 _joint_target_posteriors_.erase(iter);
1229 bool has_unevidenced_node =
false;
1230 for (
const auto node: iter.key()) {
1231 if (!hard_nodes_changed.exists(node)) {
1232 has_unevidenced_node =
true;
1236 if (!has_unevidenced_node) {
1238 _joint_target_posteriors_.erase(iter);
1245 for (
const auto& pot_pair: _node_to_soft_evidence_) {
1246 delete pot_pair.second;
1247 _clique_tensors_[_node_to_clique_[pot_pair.first]].erase(pot_pair.second);
1249 _node_to_soft_evidence_.clear();
1251 const auto& evidence = this->evidence();
1252 for (
const auto node: this->softEvidenceNodes()) {
1253 auto ev_pot =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*evidence[node],
false);
1254 _node_to_soft_evidence_.insert(node, ev_pot);
1255 _clique_tensors_[_node_to_clique_[node]].insert(ev_pot);
1265 if (_use_schedules_) {
1267 for (
const auto node: nodes_with_projected_CPTs_changed) {
1269 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
1270 const auto& variables = cpt.variablesSequence();
1271 _ScheduleMultiDimSet_ marg_cpt_set;
1272 const auto sched_cpt = schedule.insertTable< Tensor< GUM_SCALAR > >(cpt,
false);
1273 marg_cpt_set.insert(sched_cpt);
1276 for (
const auto var: variables) {
1277 NodeId xnode = bn.nodeId(*var);
1278 if (_hard_ev_nodes_.exists(xnode)) {
1279 const auto pot = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[xnode],
false);
1280 marg_cpt_set.insert(pot);
1281 hard_variables.
insert(var);
1286 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1290 _ScheduleMultiDimSet_ new_cpt_list
1291 = combine_and_project.schedule(schedule, marg_cpt_set, hard_variables);
1294 if (new_cpt_list.size() != 1) {
1296 "the projection of a tensor containing " <<
"hard evidence is empty!");
1298 auto projected_pot =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
1299 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(*new_cpt_list.begin()));
1300 const_cast< ScheduleOperator*
>(schedule.scheduleMultiDimCreator(projected_pot))
1301 ->makeResultsPersistent(
true);
1302 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1303 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1309 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1310 for (
const auto clique: invalidated_cliques) {
1311 const auto& potset = _clique_tensors_[clique];
1313 if (potset.size() > 0) {
1318 if (potset.size() == 1) {
1319 _clique_ss_tensor_[clique] = *(potset.cbegin());
1321 for (
const auto pot: potset)
1322 if (!schedule.existsScheduleMultiDim(pot->id()))
1323 schedule.emplaceScheduleMultiDim(*pot);
1324 auto joint =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
1325 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
1326 fast_combination.schedule(schedule, potset)));
1327 const_cast< ScheduleOperator*
>(schedule.scheduleMultiDimCreator(joint))
1328 ->makeResultsPersistent(
true);
1329 _clique_ss_tensor_[clique] = joint;
1333 this->scheduler().execute(schedule);
1335 for (
const auto node: nodes_with_projected_CPTs_changed) {
1337 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node);
1338 const auto& variables = cpt.variablesSequence();
1339 _TensorSet_ marg_cpt_set(1 + variables.size());
1340 marg_cpt_set.insert(&cpt);
1343 for (
const auto var: variables) {
1344 NodeId xnode = bn.nodeId(*var);
1345 if (_hard_ev_nodes_.exists(xnode)) {
1346 marg_cpt_set.insert(evidence[xnode]);
1347 hard_variables.
insert(var);
1352 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(
1356 _TensorSet_ new_cpt_list = combine_and_project.execute(marg_cpt_set, hard_variables);
1359 if (new_cpt_list.size() != 1) {
1360 for (
const auto pot: new_cpt_list)
1361 if (!marg_cpt_set.contains(pot))
delete pot;
1364 "the projection of a tensor containing " <<
"hard evidence is empty!");
1366 Tensor< GUM_SCALAR >* xprojected_pot
1367 =
const_cast< Tensor< GUM_SCALAR >*
>(*new_cpt_list.begin());
1369 =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*xprojected_pot));
1370 delete xprojected_pot;
1371 _clique_tensors_[_node_to_clique_[node]].insert(projected_pot);
1372 _node_to_hard_ev_projected_CPTs_.insert(node, projected_pot);
1378 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1379 for (
const auto clique: invalidated_cliques) {
1380 const auto& potset = _clique_tensors_[clique];
1382 if (potset.size() > 0) {
1387 if (potset.size() == 1) {
1388 _clique_ss_tensor_[clique] = *(potset.cbegin());
1390 _TensorSet_ p_potset(potset.size());
1391 for (
const auto pot: potset)
1393 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(pot)->multiDim()));
1395 Tensor< GUM_SCALAR >* joint
1396 =
const_cast< Tensor< GUM_SCALAR >*
>(fast_combination.execute(p_potset));
1397 _clique_ss_tensor_[clique]
1398 =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(std::move(*joint));
1406 const auto& hard_evidence = this->hardEvidence();
1407 for (
auto& node_cst: _constants_) {
1408 const Tensor< GUM_SCALAR >& cpt = bn.cpt(node_cst.first);
1409 const auto& variables = cpt.variablesSequence();
1411 for (
const auto var: variables)
1413 for (
const auto var: variables) {
1414 inst.chgVal(*var, hard_evidence[bn.nodeId(*var)]);
1416 node_cst.second = cpt.get(inst);
1420 _evidence_changes_.clear();
1424 template <
typename GUM_SCALAR >
1425 void ShaferShenoyInference< GUM_SCALAR >::_computeJoinTreeRoots_() {
1430 for (
const auto node: this->targets()) {
1432 clique_targets.insert(_node_to_clique_[node]);
1433 }
catch (Exception
const&) {}
1435 for (
const auto& set: this->jointTargets()) {
1437 clique_targets.insert(_joint_target_to_clique_[set]);
1438 }
catch (Exception
const&) {}
1442 std::vector< std::pair< NodeId, Size > > possible_roots(clique_targets.size());
1443 const auto& bn = this->BN();
1445 for (
const auto clique_id: clique_targets) {
1446 const auto& clique = _JT_->clique(clique_id);
1448 for (
const auto node: clique) {
1449 dom_size *= bn.variable(node).domainSize();
1451 possible_roots[i] = std::pair< NodeId, Size >(clique_id, dom_size);
1456 std::sort(possible_roots.begin(),
1457 possible_roots.end(),
1458 [](
const std::pair< NodeId, Size >& a,
const std::pair< NodeId, Size >& b) ->
bool {
1459 return a.second < b.second;
1463 NodeProperty< bool > marked = _JT_->nodesPropertyFromVal(
false);
1464 std::function< void(NodeId, NodeId) > diffuse_marks
1465 = [&marked, &diffuse_marks,
this](NodeId node, NodeId from) {
1466 if (!marked[node]) {
1467 marked[node] =
true;
1468 for (
const auto neigh: _JT_->neighbours(node))
1469 if ((neigh != from) && !marked[neigh]) diffuse_marks(neigh, node);
1473 for (
const auto& xclique: possible_roots) {
1474 NodeId clique = xclique.first;
1475 if (!marked[clique]) {
1476 _roots_.insert(clique);
1477 diffuse_marks(clique, clique);
1483 template <
typename GUM_SCALAR >
1484 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsGetAll_(
1485 Set< const IScheduleMultiDim* >& pot_list,
1489 template <
typename GUM_SCALAR >
1490 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation_(
1491 Set< const IScheduleMultiDim* >& pot_list,
1495 const auto& bn = this->BN();
1496 for (
const auto var: kept_vars) {
1497 kept_ids.insert(bn.nodeId(*var));
1502 BayesBall::requisiteNodes(bn.dag(),
1504 this->hardEvidenceNodes(),
1505 this->softEvidenceNodes(),
1507 for (
auto iter = pot_list.beginSafe(); iter != pot_list.endSafe(); ++iter) {
1508 const Sequence< const DiscreteVariable* >& vars = (*iter)->variablesSequence();
1510 for (
const auto var: vars) {
1511 if (requisite_nodes.exists(bn.nodeId(*var))) {
1517 if (!found) { pot_list.erase(iter); }
1522 template <
typename GUM_SCALAR >
1523 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation2_(
1524 Set< const IScheduleMultiDim* >& pot_list,
1528 const auto& bn = this->BN();
1529 for (
const auto var: kept_vars) {
1530 kept_ids.insert(bn.nodeId(*var));
1534 BayesBall::relevantTensors(bn,
1536 this->hardEvidenceNodes(),
1537 this->softEvidenceNodes(),
1542 template <
typename GUM_SCALAR >
1543 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsWithdSeparation3_(
1544 Set< const IScheduleMultiDim* >& pot_list,
1548 const auto& bn = this->BN();
1549 for (
const auto var: kept_vars) {
1550 kept_ids.insert(bn.nodeId(*var));
1554 dSeparationAlgorithm dsep;
1555 dsep.relevantTensors(bn,
1557 this->hardEvidenceNodes(),
1558 this->softEvidenceNodes(),
1563 template <
typename GUM_SCALAR >
1564 void ShaferShenoyInference< GUM_SCALAR >::_findRelevantTensorsXX_(
1565 Set< const IScheduleMultiDim* >& pot_list,
1567 switch (_find_relevant_tensor_type_) {
1568 case RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS :
1569 _findRelevantTensorsWithdSeparation2_(pot_list, kept_vars);
1572 case RelevantTensorsFinderType::DSEP_BAYESBALL_NODES :
1573 _findRelevantTensorsWithdSeparation_(pot_list, kept_vars);
1576 case RelevantTensorsFinderType::DSEP_KOLLER_FRIEDMAN_2009 :
1577 _findRelevantTensorsWithdSeparation3_(pot_list, kept_vars);
1580 case RelevantTensorsFinderType::FIND_ALL :
1581 _findRelevantTensorsGetAll_(pot_list, kept_vars);
1589 template <
typename GUM_SCALAR >
1590 Set< const IScheduleMultiDim* >
1591 ShaferShenoyInference< GUM_SCALAR >::_removeBarrenVariables_(Schedule& schedule,
1592 _ScheduleMultiDimSet_& pot_list,
1597 for (
auto iter = the_del_vars.
beginSafe(); iter != the_del_vars.
endSafe(); ++iter) {
1598 NodeId
id = this->BN().nodeId(**iter);
1599 if (this->hardEvidenceNodes().exists(
id) || this->softEvidenceNodes().exists(
id)) {
1600 the_del_vars.
erase(iter);
1605 HashTable< const DiscreteVariable*, _ScheduleMultiDimSet_ > var2pots(the_del_vars.
size());
1606 _ScheduleMultiDimSet_ empty_pot_set;
1607 for (
const auto pot: pot_list) {
1608 const auto& vars = pot->variablesSequence();
1609 for (
const auto var: vars) {
1610 if (the_del_vars.
exists(var)) {
1611 if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1612 var2pots[var].insert(pot);
1619 HashTable< const IScheduleMultiDim*, gum::VariableSet > pot2barren_var;
1621 for (
const auto& elt: var2pots) {
1622 if (elt.second.size() == 1) {
1623 const IScheduleMultiDim* pot = *(elt.second.begin());
1624 if (!pot2barren_var.exists(pot)) { pot2barren_var.insert(pot, empty_var_set); }
1625 pot2barren_var[pot].insert(elt.first);
1632 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1633 _ScheduleMultiDimSet_ projected_pots;
1634 for (
const auto& elt: pot2barren_var) {
1636 const IScheduleMultiDim* pot = elt.first;
1637 pot_list.erase(pot);
1641 if (pot->variablesSequence().size() != elt.second.size()) {
1642 const IScheduleMultiDim* new_pot = projector.schedule(schedule, pot, elt.second);
1646 pot_list.insert(new_pot);
1647 projected_pots.insert(new_pot);
1651 return projected_pots;
1655 template <
typename GUM_SCALAR >
1656 Set< const Tensor< GUM_SCALAR >* >
1657 ShaferShenoyInference< GUM_SCALAR >::_removeBarrenVariables_(_TensorSet_& pot_list,
1662 for (
auto iter = the_del_vars.
beginSafe(); iter != the_del_vars.
endSafe(); ++iter) {
1663 NodeId
id = this->BN().nodeId(**iter);
1664 if (this->hardEvidenceNodes().exists(
id) || this->softEvidenceNodes().exists(
id)) {
1665 the_del_vars.
erase(iter);
1670 HashTable< const DiscreteVariable*, _TensorSet_ > var2pots(the_del_vars.
size());
1671 _TensorSet_ empty_pot_set;
1672 for (
const auto pot: pot_list) {
1673 const Sequence< const DiscreteVariable* >& vars = pot->variablesSequence();
1674 for (
const auto var: vars) {
1675 if (the_del_vars.
exists(var)) {
1676 if (!var2pots.exists(var)) { var2pots.insert(var, empty_pot_set); }
1677 var2pots[var].insert(pot);
1686 for (
const auto& elt: var2pots) {
1687 if (elt.second.size() == 1) {
1688 const Tensor< GUM_SCALAR >* pot = *(elt.second.begin());
1689 if (!pot2barren_var.exists(pot)) { pot2barren_var.insert(pot, empty_var_set); }
1690 pot2barren_var[pot].insert(elt.first);
1697 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1698 _TensorSet_ projected_pots;
1699 for (
const auto& elt: pot2barren_var) {
1701 const Tensor< GUM_SCALAR >* pot = elt.first;
1702 pot_list.erase(pot);
1706 if (pot->variablesSequence().size() != elt.second.size()) {
1707 const Tensor< GUM_SCALAR >* new_pot = projector.execute(*pot, elt.second);
1708 pot_list.insert(new_pot);
1709 projected_pots.insert(new_pot);
1713 return projected_pots;
1717 template <
typename GUM_SCALAR >
1718 INLINE
void ShaferShenoyInference< GUM_SCALAR >::_collectMessage_(Schedule& schedule,
1721 for (
const auto other: _JT_->neighbours(
id)) {
1722 if ((other != from) && !_messages_computed_[Arc(other,
id)])
1723 _collectMessage_(schedule, other,
id);
1726 if ((
id != from) && !_messages_computed_[Arc(
id, from)]) {
1727 _produceMessage_(schedule,
id, from);
1732 template <
typename GUM_SCALAR >
1733 INLINE
void ShaferShenoyInference< GUM_SCALAR >::_collectMessage_(NodeId
id, NodeId from) {
1734 for (
const auto other: _JT_->neighbours(
id)) {
1735 if ((other != from) && !_messages_computed_[Arc(other,
id)]) _collectMessage_(other,
id);
1738 if ((
id != from) && !_messages_computed_[Arc(
id, from)]) { _produceMessage_(
id, from); }
1742 template <
typename GUM_SCALAR >
1743 const IScheduleMultiDim* ShaferShenoyInference< GUM_SCALAR >::_marginalizeOut_(
1745 Set< const IScheduleMultiDim* > pot_list,
1752 if (pot_list.empty()) {
1753 return new ScheduleMultiDim< Tensor< GUM_SCALAR > >(Tensor< GUM_SCALAR >());
1758 for (
const auto pot: pot_list) {
1759 if (!schedule.existsScheduleMultiDim(pot->id())) schedule.emplaceScheduleMultiDim(*pot);
1764 _ScheduleMultiDimSet_ barren_projected_tensors;
1765 if (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
1766 barren_projected_tensors = _removeBarrenVariables_(schedule, pot_list, del_vars);
1770 _ScheduleMultiDimSet_ new_pot_list;
1771 if (pot_list.size() == 1) {
1772 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1773 auto xpot = projector.schedule(schedule, *(pot_list.begin()), del_vars);
1774 new_pot_list.insert(xpot);
1775 }
else if (pot_list.size() > 1) {
1778 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(_combination_op_,
1780 new_pot_list = combine_and_project.schedule(schedule, pot_list, del_vars);
1786 for (
auto barren_pot: barren_projected_tensors) {
1787 if (!new_pot_list.exists(barren_pot))
1788 schedule.emplaceDeletion(
1789 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>& >(*barren_pot));
1793 if (new_pot_list.empty())
1794 return new ScheduleMultiDim< Tensor< GUM_SCALAR > >(Tensor< GUM_SCALAR >());
1795 if (new_pot_list.size() == 1)
return *(new_pot_list.begin());
1796 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1797 return fast_combination.schedule(schedule, new_pot_list);
1801 template <
typename GUM_SCALAR >
1802 const IScheduleMultiDim* ShaferShenoyInference< GUM_SCALAR >::_marginalizeOut_(
1803 Set< const IScheduleMultiDim* >& pot_list,
1807 if (pot_list.empty()) {
1808 return new ScheduleMultiDim< Tensor< GUM_SCALAR > >(Tensor< GUM_SCALAR >());
1811 _TensorSet_ xpot_list(pot_list.size());
1812 for (
auto pot: pot_list)
1814 &(
static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(pot)->multiDim()));
1821 _TensorSet_ barren_projected_tensors;
1822 if (_barren_nodes_type_ == FindBarrenNodesType::FIND_BARREN_NODES) {
1823 barren_projected_tensors = _removeBarrenVariables_(xpot_list, del_vars);
1827 _TensorSet_ xnew_pot_list;
1828 if (xpot_list.size() == 1) {
1829 MultiDimProjection< Tensor< GUM_SCALAR > > projector(_projection_op_);
1830 auto xpot = projector.execute(**(xpot_list.begin()), del_vars);
1831 xnew_pot_list.insert(xpot);
1832 }
else if (xpot_list.size() > 1) {
1835 MultiDimCombineAndProjectDefault< Tensor< GUM_SCALAR > > combine_and_project(_combination_op_,
1837 xnew_pot_list = combine_and_project.execute(xpot_list, del_vars);
1841 const Tensor< GUM_SCALAR >* xres_pot;
1842 ScheduleMultiDim< Tensor< GUM_SCALAR > >* res_pot;
1843 if (xnew_pot_list.size() == 1) {
1844 xres_pot = *(xnew_pot_list.begin());
1845 }
else if (xnew_pot_list.size() > 1) {
1848 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
1849 xres_pot = fast_combination.execute(xnew_pot_list);
1850 for (
const auto pot: xnew_pot_list) {
1851 if (!xpot_list.contains(pot) && (pot != xres_pot))
delete pot;
1854 xres_pot =
new Tensor< GUM_SCALAR >();
1858 if (xpot_list.contains(xres_pot))
1859 res_pot =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(*xres_pot,
false);
1861 res_pot =
new ScheduleMultiDim< Tensor< GUM_SCALAR > >(
1862 std::move(
const_cast< Tensor< GUM_SCALAR >&
>(*xres_pot)));
1869 for (
const auto barren_pot: barren_projected_tensors) {
1870 if (!xnew_pot_list.exists(barren_pot))
delete barren_pot;
1877 template <
typename GUM_SCALAR >
1878 void ShaferShenoyInference< GUM_SCALAR >::_produceMessage_(Schedule& schedule,
1882 _ScheduleMultiDimSet_ pot_list;
1883 if (_clique_ss_tensor_[from_id] !=
nullptr) pot_list.insert(_clique_ss_tensor_[from_id]);
1886 for (
const auto other_id: _JT_->neighbours(from_id)) {
1887 if (other_id != to_id) {
1888 const auto separator_pot = _separator_tensors_[Arc(other_id, from_id)];
1889 if (separator_pot !=
nullptr) pot_list.insert(separator_pot);
1894 const NodeSet& from_clique = _JT_->clique(from_id);
1895 const NodeSet& separator = _JT_->separator(from_id, to_id);
1898 const auto& bn = this->BN();
1900 for (
const auto node: from_clique) {
1901 if (!separator.contains(node)) {
1902 del_vars.
insert(&(bn.variable(node)));
1904 kept_vars.
insert(&(bn.variable(node)));
1910 const IScheduleMultiDim* new_pot = _marginalizeOut_(schedule, pot_list, del_vars, kept_vars);
1913 const Arc arc(from_id, to_id);
1914 if (!pot_list.exists(new_pot)) {
1915 if (!_arc_to_created_tensors_.exists(arc)) {
1916 _arc_to_created_tensors_.insert(arc, new_pot);
1919 auto op = schedule.scheduleMultiDimCreator(new_pot);
1920 if (op !=
nullptr)
const_cast< ScheduleOperator*
>(op)->makeResultsPersistent(
true);
1924 _separator_tensors_[arc] = new_pot;
1925 _messages_computed_[arc] =
true;
1929 template <
typename GUM_SCALAR >
1930 void ShaferShenoyInference< GUM_SCALAR >::_produceMessage_(NodeId from_id, NodeId to_id) {
1932 _ScheduleMultiDimSet_ pot_list;
1933 if (_clique_ss_tensor_[from_id] !=
nullptr) pot_list.insert(_clique_ss_tensor_[from_id]);
1936 for (
const auto other_id: _JT_->neighbours(from_id)) {
1937 if (other_id != to_id) {
1938 const auto separator_pot = _separator_tensors_[Arc(other_id, from_id)];
1939 if (separator_pot !=
nullptr) pot_list.insert(separator_pot);
1944 const NodeSet& from_clique = _JT_->clique(from_id);
1945 const NodeSet& separator = _JT_->separator(from_id, to_id);
1948 const auto& bn = this->BN();
1950 for (
const auto node: from_clique) {
1951 if (!separator.contains(node)) {
1952 del_vars.
insert(&(bn.variable(node)));
1954 kept_vars.
insert(&(bn.variable(node)));
1960 const IScheduleMultiDim* new_pot = _marginalizeOut_(pot_list, del_vars, kept_vars);
1963 const Arc arc(from_id, to_id);
1964 if (!pot_list.exists(new_pot)) {
1965 if (!_arc_to_created_tensors_.exists(arc)) { _arc_to_created_tensors_.insert(arc, new_pot); }
1968 _separator_tensors_[arc] = new_pot;
1969 _messages_computed_[arc] =
true;
1973 template <
typename GUM_SCALAR >
1974 INLINE
void ShaferShenoyInference< GUM_SCALAR >::makeInference_() {
1975 if (_use_schedules_) {
1979 for (
const auto node: this->targets()) {
1983 if (_graph_.exists(node)) {
1984 _collectMessage_(schedule, _node_to_clique_[node], _node_to_clique_[node]);
1992 for (
const auto& set: _joint_target_to_clique_)
1993 _collectMessage_(schedule, set.second, set.second);
1996 this->scheduler().execute(schedule);
1999 for (
const auto node: this->targets()) {
2003 if (_graph_.exists(node)) {
2004 _collectMessage_(_node_to_clique_[node], _node_to_clique_[node]);
2012 for (
const auto& set: _joint_target_to_clique_)
2013 _collectMessage_(set.second, set.second);
2018 template <
typename GUM_SCALAR >
2019 Tensor< GUM_SCALAR >*
2020 ShaferShenoyInference< GUM_SCALAR >::unnormalizedJointPosterior_(NodeId
id) {
2021 if (_use_schedules_) {
2023 return _unnormalizedJointPosterior_(schedule,
id);
2025 return _unnormalizedJointPosterior_(
id);
2030 template <
typename GUM_SCALAR >
2031 Tensor< GUM_SCALAR >*
2032 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(Schedule& schedule,
2034 const auto& bn = this->BN();
2038 if (this->hardEvidenceNodes().contains(
id)) {
2039 return new Tensor< GUM_SCALAR >(*(this->evidence()[
id]));
2042 auto& scheduler = this->scheduler();
2046 const NodeId clique_of_id = _node_to_clique_[id];
2047 _collectMessage_(schedule, clique_of_id, clique_of_id);
2052 _ScheduleMultiDimSet_ pot_list;
2053 if (_clique_ss_tensor_[clique_of_id] !=
nullptr)
2054 pot_list.insert(_clique_ss_tensor_[clique_of_id]);
2057 for (
const auto other: _JT_->neighbours(clique_of_id))
2058 pot_list.insert(_separator_tensors_[Arc(other, clique_of_id)]);
2061 const NodeSet& nodes = _JT_->clique(clique_of_id);
2064 for (
const auto node: nodes) {
2065 if (node !=
id) del_vars.
insert(&(bn.variable(node)));
2070 auto resulting_pot =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
2071 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
2072 _marginalizeOut_(schedule, pot_list, del_vars, kept_vars)));
2073 Tensor< GUM_SCALAR >* joint =
nullptr;
2075 scheduler.execute(schedule);
2079 if (pot_list.exists(resulting_pot)) {
2080 joint =
new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2082 joint = resulting_pot->exportMultiDim();
2088 bool nonzero_found =
false;
2089 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2090 if (joint->get(inst)) {
2091 nonzero_found =
true;
2095 if (!nonzero_found) {
2099 "some evidence entered into the Bayes "
2100 "net are incompatible (their joint proba = 0)");
2106 template <
typename GUM_SCALAR >
2107 Tensor< GUM_SCALAR >*
2108 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(NodeId
id) {
2109 const auto& bn = this->BN();
2113 if (this->hardEvidenceNodes().contains(
id)) {
2114 return new Tensor< GUM_SCALAR >(*(this->evidence()[
id]));
2119 NodeId clique_of_id = _node_to_clique_[id];
2120 _collectMessage_(clique_of_id, clique_of_id);
2125 _ScheduleMultiDimSet_ pot_list;
2126 if (_clique_ss_tensor_[clique_of_id] !=
nullptr)
2127 pot_list.insert(_clique_ss_tensor_[clique_of_id]);
2130 for (
const auto other: _JT_->neighbours(clique_of_id))
2131 pot_list.insert(_separator_tensors_[Arc(other, clique_of_id)]);
2134 const NodeSet& nodes = _JT_->clique(clique_of_id);
2137 for (
const auto node: nodes) {
2138 if (node !=
id) del_vars.
insert(&(bn.variable(node)));
2143 auto resulting_pot =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
2144 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
2145 _marginalizeOut_(pot_list, del_vars, kept_vars)));
2146 Tensor< GUM_SCALAR >* joint =
nullptr;
2150 if (pot_list.exists(resulting_pot)) {
2151 joint =
new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2153 joint = resulting_pot->exportMultiDim();
2154 delete resulting_pot;
2160 bool nonzero_found =
false;
2161 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2162 if (joint->get(inst)) {
2163 nonzero_found =
true;
2167 if (!nonzero_found) {
2171 "some evidence entered into the Bayes "
2172 "net are incompatible (their joint proba = 0)");
2178 template <
typename GUM_SCALAR >
2179 const Tensor< GUM_SCALAR >& ShaferShenoyInference< GUM_SCALAR >::posterior_(NodeId
id) {
2181 if (_target_posteriors_.exists(
id)) {
return *(_target_posteriors_[id]); }
2184 auto joint = unnormalizedJointPosterior_(
id);
2185 if (joint->sum() != 1)
2187 _target_posteriors_.insert(
id, joint);
2193 template <
typename GUM_SCALAR >
2194 Tensor< GUM_SCALAR >*
2195 ShaferShenoyInference< GUM_SCALAR >::unnormalizedJointPosterior_(
const NodeSet& set) {
2196 if (_use_schedules_) {
2198 return _unnormalizedJointPosterior_(schedule, set);
2200 return _unnormalizedJointPosterior_(set);
2205 template <
typename GUM_SCALAR >
2206 Tensor< GUM_SCALAR >*
2207 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(Schedule& schedule,
2208 const NodeSet& set) {
2211 NodeSet targets = set, hard_ev_nodes;
2212 for (
const auto node: this->hardEvidenceNodes()) {
2213 if (targets.contains(node)) {
2214 targets.erase(node);
2215 hard_ev_nodes.insert(node);
2219 auto& scheduler = this->scheduler();
2223 const auto& evidence = this->evidence();
2224 if (targets.empty()) {
2225 if (set.size() == 1) {
2226 return new Tensor< GUM_SCALAR >(*evidence[*set.begin()]);
2228 _ScheduleMultiDimSet_ pot_list;
2229 for (
const auto node: set) {
2230 auto new_pot_ev = schedule.insertTable< Tensor< GUM_SCALAR > >(*evidence[node],
false);
2231 pot_list.insert(new_pot_ev);
2235 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2236 const IScheduleMultiDim* pot = fast_combination.schedule(schedule, pot_list);
2237 auto schedule_pot =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
2238 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(pot));
2239 scheduler.execute(schedule);
2240 auto result = schedule_pot->exportMultiDim();
2250 NodeId clique_of_set;
2252 clique_of_set = _joint_target_to_clique_[set];
2259 for (
const auto node: targets) {
2260 if (!_graph_.exists(node)) {
2262 "The variable " << this->BN().variable(node).name() <<
"(" << node
2263 <<
") does not belong to this optimized inference.")
2269 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
2271 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
2272 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
2273 elim_order.insert(JT_elim_order[i], (
int)i);
2274 NodeId first_eliminated_node = *(targets.begin());
2275 int elim_number = elim_order[first_eliminated_node];
2276 for (
const auto node: targets) {
2277 if (elim_order[node] < elim_number) {
2278 elim_number = elim_order[node];
2279 first_eliminated_node = node;
2283 clique_of_set = _triangulation_->createdJunctionTreeClique(first_eliminated_node);
2287 const NodeSet& clique_nodes = _JT_->clique(clique_of_set);
2288 for (
const auto node: targets) {
2289 if (!clique_nodes.contains(node)) {
2291 this->BN().names(set) <<
"(" << set <<
")"
2292 <<
" is not addressable in this optimized inference.")
2297 _joint_target_to_clique_.
insert(set, clique_of_set);
2301 _collectMessage_(schedule, clique_of_set, clique_of_set);
2306 _ScheduleMultiDimSet_ pot_list;
2307 if (_clique_ss_tensor_[clique_of_set] !=
nullptr) {
2308 auto pot = _clique_ss_tensor_[clique_of_set];
2309 if (!schedule.existsScheduleMultiDim(pot->id())) schedule.emplaceScheduleMultiDim(*pot);
2310 pot_list.insert(_clique_ss_tensor_[clique_of_set]);
2314 for (
const auto other: _JT_->neighbours(clique_of_set)) {
2315 const auto pot = _separator_tensors_[Arc(other, clique_of_set)];
2316 if (pot !=
nullptr) pot_list.insert(pot);
2321 const NodeSet& nodes = _JT_->clique(clique_of_set);
2324 const auto& bn = this->BN();
2325 for (
const auto node: nodes) {
2326 if (!targets.contains(node)) {
2327 del_vars.
insert(&(bn.variable(node)));
2329 kept_vars.
insert(&(bn.variable(node)));
2335 const IScheduleMultiDim* new_pot = _marginalizeOut_(schedule, pot_list, del_vars, kept_vars);
2336 scheduler.execute(schedule);
2337 ScheduleMultiDim< Tensor< GUM_SCALAR > >* resulting_pot
2338 =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
2339 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(new_pot));
2343 Tensor< GUM_SCALAR >* joint =
nullptr;
2344 if (pot_list.exists(resulting_pot)) {
2345 joint =
new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2347 joint = resulting_pot->exportMultiDim();
2352 bool nonzero_found =
false;
2353 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2354 if ((*joint)[inst]) {
2355 nonzero_found =
true;
2359 if (!nonzero_found) {
2363 "some evidence entered into the Bayes "
2364 "net are incompatible (their joint proba = 0)");
2370 if (!hard_ev_nodes.empty()) {
2371 _TensorSet_ pot_list;
2372 pot_list.insert(joint);
2373 const auto& hard_evidence = this->evidence();
2374 for (
const auto node: hard_ev_nodes)
2375 pot_list.insert(hard_evidence[node]);
2376 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > combine(_combination_op_);
2377 Tensor< GUM_SCALAR >* new_joint = combine.execute(pot_list);
2386 template <
typename GUM_SCALAR >
2387 Tensor< GUM_SCALAR >*
2388 ShaferShenoyInference< GUM_SCALAR >::_unnormalizedJointPosterior_(
const NodeSet& set) {
2391 NodeSet targets = set, hard_ev_nodes;
2392 for (
const auto node: this->hardEvidenceNodes()) {
2393 if (targets.contains(node)) {
2394 targets.erase(node);
2395 hard_ev_nodes.insert(node);
2401 const auto& evidence = this->evidence();
2402 if (targets.empty()) {
2403 if (set.size() == 1) {
2404 return new Tensor< GUM_SCALAR >(*evidence[*set.begin()]);
2406 _TensorSet_ pot_list;
2407 for (
const auto node: set) {
2408 pot_list.insert(evidence[node]);
2412 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > fast_combination(_combination_op_);
2413 const Tensor< GUM_SCALAR >* pot = fast_combination.execute(pot_list);
2415 return const_cast< Tensor< GUM_SCALAR >*
>(pot);
2423 NodeId clique_of_set;
2425 clique_of_set = _joint_target_to_clique_[set];
2432 for (
const auto node: targets) {
2433 if (!_graph_.exists(node)) {
2435 node <<
" cannot be a query in the optimized inference (w.r.t the declared "
2436 "targets/evidence)")
2442 const std::vector< NodeId >& JT_elim_order = _triangulation_->eliminationOrder();
2444 NodeProperty< int > elim_order(Size(JT_elim_order.size()));
2445 for (std::size_t i = std::size_t(0), size = JT_elim_order.size(); i < size; ++i)
2446 elim_order.insert(JT_elim_order[i], (
int)i);
2447 NodeId first_eliminated_node = *(targets.begin());
2448 int elim_number = elim_order[first_eliminated_node];
2449 for (
const auto node: targets) {
2450 if (elim_order[node] < elim_number) {
2451 elim_number = elim_order[node];
2452 first_eliminated_node = node;
2456 clique_of_set = _triangulation_->createdJunctionTreeClique(first_eliminated_node);
2459 const NodeSet& clique_nodes = _JT_->clique(clique_of_set);
2460 for (
const auto node: targets) {
2461 if (!clique_nodes.contains(node)) {
2463 this->BN().names(set) <<
"(" << set <<
")"
2464 <<
" is not addressable in this optimized inference.")
2469 _joint_target_to_clique_.
insert(set, clique_of_set);
2473 _collectMessage_(clique_of_set, clique_of_set);
2478 _ScheduleMultiDimSet_ pot_list;
2479 if (_clique_ss_tensor_[clique_of_set] !=
nullptr) {
2480 auto pot = _clique_ss_tensor_[clique_of_set];
2481 if (pot !=
nullptr) pot_list.insert(_clique_ss_tensor_[clique_of_set]);
2485 for (
const auto other: _JT_->neighbours(clique_of_set)) {
2486 const auto pot = _separator_tensors_[Arc(other, clique_of_set)];
2487 if (pot !=
nullptr) pot_list.insert(pot);
2491 const NodeSet& nodes = _JT_->clique(clique_of_set);
2494 const auto& bn = this->BN();
2495 for (
const auto node: nodes) {
2496 if (!targets.contains(node)) {
2497 del_vars.
insert(&(bn.variable(node)));
2499 kept_vars.
insert(&(bn.variable(node)));
2505 const IScheduleMultiDim* new_pot = _marginalizeOut_(pot_list, del_vars, kept_vars);
2506 ScheduleMultiDim< Tensor< GUM_SCALAR > >* resulting_pot
2507 =
const_cast< ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(
2508 static_cast< const ScheduleMultiDim< Tensor< GUM_SCALAR >
>* >(new_pot));
2512 Tensor< GUM_SCALAR >* joint =
nullptr;
2513 if (pot_list.exists(resulting_pot)) {
2514 joint =
new Tensor< GUM_SCALAR >(resulting_pot->multiDim());
2516 joint = resulting_pot->exportMultiDim();
2522 bool nonzero_found =
false;
2523 for (Instantiation inst(*joint); !inst.end(); ++inst) {
2524 if ((*joint)[inst]) {
2525 nonzero_found =
true;
2529 if (!nonzero_found) {
2533 "some evidence entered into the Bayes "
2534 "net are incompatible (their joint proba = 0)");
2540 if (!hard_ev_nodes.empty()) {
2541 _TensorSet_ pot_list;
2542 pot_list.insert(joint);
2543 const auto& hard_evidence = this->evidence();
2544 for (
const auto node: hard_ev_nodes)
2545 pot_list.insert(hard_evidence[node]);
2546 MultiDimCombinationDefault< Tensor< GUM_SCALAR > > combine(_combination_op_);
2547 Tensor< GUM_SCALAR >* new_joint = combine.execute(pot_list);
2556 template <
typename GUM_SCALAR >
2557 const Tensor< GUM_SCALAR >&
2558 ShaferShenoyInference< GUM_SCALAR >::jointPosterior_(
const NodeSet& set) {
2560 if (_joint_target_posteriors_.exists(set)) {
return *(_joint_target_posteriors_[set]); }
2563 auto joint = unnormalizedJointPosterior_(set);
2565 _joint_target_posteriors_.insert(set, joint);
2571 template <
typename GUM_SCALAR >
2572 const Tensor< GUM_SCALAR >&
2573 ShaferShenoyInference< GUM_SCALAR >::jointPosterior_(
const NodeSet& wanted_target,
2574 const NodeSet& declared_target) {
2576 if (_joint_target_posteriors_.exists(wanted_target))
2577 return *(_joint_target_posteriors_[wanted_target]);
2583 if (!_joint_target_posteriors_.exists(declared_target)) { jointPosterior_(declared_target); }
2586 const auto& bn = this->BN();
2588 for (
const auto node: declared_target)
2589 if (!wanted_target.contains(node)) del_vars.
insert(&(bn.variable(node)));
2591 =
new Tensor< GUM_SCALAR >(_joint_target_posteriors_[declared_target]->sumOut(del_vars));
2594 _joint_target_posteriors_.insert(wanted_target, pot);
2599 template <
typename GUM_SCALAR >
2600 GUM_SCALAR ShaferShenoyInference< GUM_SCALAR >::evidenceProbability() {
2603 RelevantTensorsFinderType old_relevant_type = _find_relevant_tensor_type_;
2608 if (old_relevant_type != RelevantTensorsFinderType::FIND_ALL) {
2609 _find_relevant_tensor_type_ = RelevantTensorsFinderType::FIND_ALL;
2610 _is_new_jt_needed_ =
true;
2611 this->setOutdatedStructureState_();
2615 this->makeInference();
2623 GUM_SCALAR prob_ev = 1;
2624 for (
const auto root: _roots_) {
2626 NodeId node = *(_JT_->clique(root).begin());
2627 Tensor< GUM_SCALAR >* tmp = unnormalizedJointPosterior_(node);
2628 prob_ev *= tmp->sum();
2632 for (
const auto& projected_cpt: _constants_)
2633 prob_ev *= projected_cpt.second;
2636 _find_relevant_tensor_type_ = old_relevant_type;
The BayesBall algorithm (as described by Schachter).
Detect barren nodes for inference in Bayesian networks.
An algorithm for converting a join tree into a binary join tree.
Exception : a similar element already exists.
<agrum/BN/inference/evidenceInference.h>
Exception : fatal (unknown ?) error.
Class representing the minimal interface for Bayesian network with no numerical data.
Exception : several evidence are incompatible together (proba=0).
Exception: at least one argument passed to a function is not what was expected.
<agrum/BN/inference/jointTargetedInference.h>
Exception : the element we looked for cannot be found.
Size size() const noexcept
Returns the number of elements in the set.
iterator_safe beginSafe() const
The usual safe begin iterator to parse the set.
const iterator_safe & endSafe() const noexcept
The usual safe end iterator to parse the set.
bool exists(const Key &k) const
Indicates whether a given elements belong to the set.
void insert(const Key &k)
Inserts a new element into the set.
void erase(const Key &k)
Erases an element from the set.
ShaferShenoyInference(const IBayesNet< GUM_SCALAR > *BN, RelevantTensorsFinderType=RelevantTensorsFinderType::DSEP_BAYESBALL_TENSORS, FindBarrenNodesType barren_type=FindBarrenNodesType::FIND_BARREN_NODES, bool use_binary_join_tree=true)
default constructor
Exception : a looked-for element could not be found.
d-separation analysis (as described in Koller & Friedman 2009)
#define GUM_ERROR(type, msg)
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
Header files of gum::Instantiation.
gum is the global namespace for all aGrUM entities
FindBarrenNodesType
type of algorithm to determine barren nodes
Set< const DiscreteVariable * > VariableSet
CliqueGraph JoinTree
a join tree is a clique graph satisfying the running intersection property (but some cliques may be i...
CliqueGraph JunctionTree
a junction tree is a clique graph satisfying the running intersection property and such that no cliqu...
RelevantTensorsFinderType
type of algorithm for determining the relevant tensors for combinations using some d-separation analy...