aGrUM 2.3.2
a C++ library for (probabilistic) graphical models
inferenceEngine_tpl.h
Go to the documentation of this file.
1/****************************************************************************
2 * This file is part of the aGrUM/pyAgrum library. *
3 * *
4 * Copyright (c) 2005-2025 by *
5 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
6 * - Christophe GONZALES(_at_AMU) *
7 * *
8 * The aGrUM/pyAgrum library is free software; you can redistribute it *
9 * and/or modify it under the terms of either : *
10 * *
11 * - the GNU Lesser General Public License as published by *
12 * the Free Software Foundation, either version 3 of the License, *
13 * or (at your option) any later version, *
14 * - the MIT license (MIT), *
15 * - or both in dual license, as here. *
16 * *
17 * (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) *
18 * *
19 * This aGrUM/pyAgrum library is distributed in the hope that it will be *
20 * useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
21 * INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS *
22 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR *
26 * OTHER DEALINGS IN THE SOFTWARE. *
27 * *
28 * See LICENCES for more details. *
29 * *
30 * SPDX-FileCopyrightText: Copyright 2005-2025 *
31 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
32 * - Christophe GONZALES(_at_AMU) *
33 * SPDX-License-Identifier: LGPL-3.0-or-later OR MIT *
34 * *
35 * Contact : info_at_agrum_dot_org *
36 * homepage : http://agrum.gitlab.io *
37 * gitlab : https://gitlab.com/agrumery/agrum *
38 * *
39 ****************************************************************************/
40#pragma once
41
42
47
48#include <algorithm>
49
50#include <agrum/agrum.h>
51
53
54namespace gum {
55 namespace credal {
56
57 /*template< typename GUM_SCALAR >
58 InferenceEngine< GUM_SCALAR >::InferenceEngine () : ApproximationScheme() {
59 std::cout << "InferenceEngine construct ()" << std::endl;
60 GUM_CONSTRUCTOR ( InferenceEngine )
61 }*/
62
63 template < typename GUM_SCALAR >
74
75 template < typename GUM_SCALAR >
79
80 template < typename GUM_SCALAR >
84
85 template < typename GUM_SCALAR >
87 evidence_.clear();
88 query_.clear();
89 /*
90 marginalMin_.clear();
91 marginalMax_.clear();
92 oldMarginalMin_.clear();
93 oldMarginalMax_.clear();
94 */
96 /*
97 expectationMin_.clear();
98 expectationMax_.clear();
99 */
101
102 // marginalSets_.clear();
104
105 dynamicExpMin_.clear();
106 dynamicExpMax_.clear();
107
108 //_modal.clear();
109
110 //_t0.clear();
111 //_t1.clear();
112 }
113
114 /*
115 template< typename GUM_SCALAR >
116 void InferenceEngine< GUM_SCALAR >::setIterStop ( const int &iter_stop ) {
117 iterStop_ = iter_stop;
118 }*/
119
120 template < typename GUM_SCALAR >
122 storeBNOpt_ = value;
123 }
124
125 template < typename GUM_SCALAR >
127 storeVertices_ = value;
128
129 if (value) initMarginalSets_();
130 }
131
132 template < typename GUM_SCALAR >
134 bool oldValue = repetitiveInd_;
135 repetitiveInd_ = repetitive;
136
137 // do not compute clusters more than once
138 if (repetitiveInd_ && !oldValue) repetitiveInit_();
139 }
140
141 template < typename GUM_SCALAR >
145
146 /*
147 template< typename GUM_SCALAR >
148 int InferenceEngine< GUM_SCALAR >::iterStop () const {
149 return iterStop_;
150 }*/
151
152 template < typename GUM_SCALAR >
156
157 template < typename GUM_SCALAR >
161
162 template < typename GUM_SCALAR >
163 VarMod2BNsMap< GUM_SCALAR >* InferenceEngine< GUM_SCALAR >::getVarMod2BNsMap() {
164 return &dbnOpt_;
165 }
166
167 template < typename GUM_SCALAR >
169 std::ifstream mod_stream(path.c_str(), std::ios::in);
170
171 if (!mod_stream.good()) {
173 "void InferenceEngine< GUM_SCALAR "
174 ">::insertModals(const std::string & path) : "
175 "could not open input file : "
176 << path);
177 }
178
179 if (!modal_.empty()) modal_.clear();
180
181 std::string line, tmp;
182 char * cstr, *p;
183
184 while (mod_stream.good()) {
185 getline(mod_stream, line);
186
187 if (line.size() == 0) continue;
188
189 cstr = new char[line.size() + 1];
190 strcpy(cstr, line.c_str());
191
192 p = strtok(cstr, " ");
193 tmp = p;
194
195 std::vector< GUM_SCALAR > values;
196 p = strtok(nullptr, " ");
197
198 while (p != nullptr) {
199 values.push_back(GUM_SCALAR(atof(p)));
200 p = strtok(nullptr, " ");
201 } // end of : line
203 modal_.insert(tmp, values); //[tmp] = values;
204
205 delete[] p;
206 delete[] cstr;
207 } // end of : file
209 mod_stream.close();
210
212 }
214 template < typename GUM_SCALAR >
216 const std::map< std::string, std::vector< GUM_SCALAR > >& modals) {
217 if (!modal_.empty()) modal_.clear();
218
219 for (auto it = modals.cbegin(), theEnd = modals.cend(); it != theEnd; ++it) {
220 NodeId id;
221
222 try {
223 id = credalNet_->current_bn().idFromName(it->first);
224 } catch (NotFound& err) {
225 GUM_SHOWERROR(err);
226 continue;
227 }
228
229 // check that modals are net compatible
230 auto dSize = credalNet_->current_bn().variable(id).domainSize();
231
232 if (dSize != it->second.size()) continue;
233
234 // GUM_ERROR(OperationNotAllowed, "void InferenceEngine< GUM_SCALAR
235 // >::insertModals( const std::map< std::string, std::vector< GUM_SCALAR
236 // > >
237 // &modals) : modalities does not respect variable cardinality : " <<
238 // credalNet_->current_bn().variable( id ).name() << " : " << dSize << "
239 // != "
240 // << it->second.size());
241
242 modal_.insert(it->first, it->second); //[ it->first ] = it->second;
243 }
244
245 //_modal = modals;
246
247 initExpectations_();
248 }
249
250 template < typename GUM_SCALAR >
252 const std::map< std::string, std::vector< GUM_SCALAR > >& eviMap) {
253 if (!evidence_.empty()) evidence_.clear();
254
255 for (auto it = eviMap.cbegin(), theEnd = eviMap.cend(); it != theEnd; ++it) {
256 NodeId id;
257
258 try {
259 id = credalNet_->current_bn().idFromName(it->first);
260 } catch (NotFound& err) {
261 GUM_SHOWERROR(err);
262 continue;
263 }
264
265 evidence_.insert(id, it->second);
266 }
267
268 // forces the computation of the begin iterator to avoid subsequent data races
269 // @TODO make HashTableConstIterator constructors thread safe
270 evidence_.begin();
271 }
272
273 // check that observed variables DO exists in the network (otherwise Lazy
274 // report
275 // an error and app crash)
276 template < typename GUM_SCALAR >
278 const NodeProperty< std::vector< GUM_SCALAR > >& evidence) {
279 if (!evidence_.empty()) evidence_.clear();
280
281 // use cbegin() to get const_iterator when available in aGrUM hashtables
282 for (const auto& elt: evidence) {
283 try {
284 credalNet_->current_bn().variable(elt.first);
285 } catch (NotFound& err) {
286 GUM_SHOWERROR(err);
287 continue;
288 }
289
290 evidence_.insert(elt.first, elt.second);
291 }
292
293 // forces the computation of the begin iterator to avoid subsequent data races
294 // @TODO make HashTableConstIterator constructors thread safe
295 evidence_.begin();
296 }
297
298 template < typename GUM_SCALAR >
300 std::ifstream evi_stream(path.c_str(), std::ios::in);
302 if (!evi_stream.good()) {
304 "void InferenceEngine< GUM_SCALAR "
305 ">::insertEvidence(const std::string & path) : could not "
306 "open input file : "
307 << path);
308 }
309
310 if (!evidence_.empty()) evidence_.clear();
311
312 std::string line, tmp;
313 char * cstr, *p;
314
315 while (evi_stream.good() && std::strcmp(line.c_str(), "[EVIDENCE]") != 0) {
316 getline(evi_stream, line);
317 }
318
319 while (evi_stream.good()) {
320 getline(evi_stream, line);
321
322 if (std::strcmp(line.c_str(), "[QUERY]") == 0) break;
323
324 if (line.size() == 0) continue;
326 cstr = new char[line.size() + 1];
327 strcpy(cstr, line.c_str());
328
329 p = strtok(cstr, " ");
330 tmp = p;
332 // if user input is wrong
333 NodeId node = -1;
334
335 try {
336 node = credalNet_->current_bn().idFromName(tmp);
337 } catch (NotFound& err) {
338 GUM_SHOWERROR(err);
339 continue;
340 }
341
342 std::vector< GUM_SCALAR > values;
343 p = strtok(nullptr, " ");
344
345 while (p != nullptr) {
346 values.push_back(GUM_SCALAR(atof(p)));
347 p = strtok(nullptr, " ");
348 } // end of : line
349
350 evidence_.insert(node, values);
351
352 delete[] p;
353 delete[] cstr;
354 } // end of : file
355
356 evi_stream.close();
357
358 // forces the computation of the begin iterator to avoid subsequent data races
359 // @TODO make HashTableConstIterator constructors thread safe
360 evidence_.begin();
361 }
363 template < typename GUM_SCALAR >
365 const NodeProperty< std::vector< bool > >& query) {
366 if (!query_.empty()) query_.clear();
367
368 for (const auto& elt: query) {
369 try {
370 credalNet_->current_bn().variable(elt.first);
371 } catch (NotFound& err) {
372 GUM_SHOWERROR(err);
373 continue;
374 }
375
376 query_.insert(elt.first, elt.second);
377 }
379
380 template < typename GUM_SCALAR >
382 std::ifstream evi_stream(path.c_str(), std::ios::in);
383
384 if (!evi_stream.good()) {
386 "void InferenceEngine< GUM_SCALAR >::insertQuery(const "
387 "std::string & path) : could not open input file : "
388 << path);
389 }
390
391 if (!query_.empty()) query_.clear();
392
393 std::string line, tmp;
394 char * cstr, *p;
395
396 while (evi_stream.good() && std::strcmp(line.c_str(), "[QUERY]") != 0) {
397 getline(evi_stream, line);
398 }
399
400 while (evi_stream.good()) {
401 getline(evi_stream, line);
402
403 if (std::strcmp(line.c_str(), "[EVIDENCE]") == 0) break;
404
405 if (line.size() == 0) continue;
407 cstr = new char[line.size() + 1];
408 strcpy(cstr, line.c_str());
409
410 p = strtok(cstr, " ");
411 tmp = p;
412
413 // if user input is wrong
414 NodeId node = -1;
415
416 try {
417 node = credalNet_->current_bn().idFromName(tmp);
418 } catch (NotFound& err) {
419 GUM_SHOWERROR(err);
420 continue;
421 }
422
423 auto dSize = credalNet_->current_bn().variable(node).domainSize();
424
425 p = strtok(nullptr, " ");
426
427 if (p == nullptr) {
428 query_.insert(node, std::vector< bool >(dSize, true));
429 } else {
430 std::vector< bool > values(dSize, false);
431
432 while (p != nullptr) {
433 if ((Size)atoi(p) >= dSize)
435 "void InferenceEngine< GUM_SCALAR "
436 ">::insertQuery(const std::string & path) : "
437 "query modality is higher or equal to "
438 "cardinality");
439
440 values[atoi(p)] = true;
441 p = strtok(nullptr, " ");
442 } // end of : line
443
444 query_.insert(node, values);
445 }
446
447 delete[] p;
448 delete[] cstr;
449 } // end of : file
450
451 evi_stream.close();
452 }
453
454 template < typename GUM_SCALAR >
455 INLINE Tensor< GUM_SCALAR >
456 InferenceEngine< GUM_SCALAR >::marginalMin(const std::string& varName) const {
457 return marginalMin(credalNet_->current_bn().idFromName(varName));
458 }
459
460 template < typename GUM_SCALAR >
461 INLINE Tensor< GUM_SCALAR >
462 InferenceEngine< GUM_SCALAR >::marginalMax(const std::string& varName) const {
463 return marginalMax(credalNet_->current_bn().idFromName(varName));
464 }
465
466 template < typename GUM_SCALAR >
468 try {
469 Tensor< GUM_SCALAR > res;
470 res.add(credalNet_->current_bn().variable(id));
471 res.fillWith(marginalMin_[id]);
472 return res;
473 } catch (NotFound& err) { throw(err); }
474 }
475
476 template < typename GUM_SCALAR >
478 try {
479 Tensor< GUM_SCALAR > res;
480 res.add(credalNet_->current_bn().variable(id));
481 res.fillWith(marginalMax_[id]);
482 return res;
483 } catch (NotFound& err) { throw(err); }
484 }
486 template < typename GUM_SCALAR >
487 const GUM_SCALAR&
488 InferenceEngine< GUM_SCALAR >::expectationMin(const std::string& varName) const {
489 try {
490 return expectationMin_[credalNet_->current_bn().idFromName(varName)];
491 } catch (NotFound& err) { throw(err); }
493
494 template < typename GUM_SCALAR >
495 const GUM_SCALAR&
496 InferenceEngine< GUM_SCALAR >::expectationMax(const std::string& varName) const {
497 try {
498 return expectationMax_[credalNet_->current_bn().idFromName(varName)];
499 } catch (NotFound& err) { throw(err); }
500 }
501
502 template < typename GUM_SCALAR >
503 const GUM_SCALAR& InferenceEngine< GUM_SCALAR >::expectationMin(const NodeId id) const {
504 try {
505 return expectationMin_[id];
506 } catch (NotFound& err) { throw(err); }
507 }
508
509 template < typename GUM_SCALAR >
510 const GUM_SCALAR& InferenceEngine< GUM_SCALAR >::expectationMax(const NodeId id) const {
511 try {
512 return expectationMax_[id];
513 } catch (NotFound& err) { throw(err); }
514 }
515
516 template < typename GUM_SCALAR >
517 const std::vector< GUM_SCALAR >&
518 InferenceEngine< GUM_SCALAR >::dynamicExpMin(const std::string& varName) const {
519 std::string errTxt = "const std::vector< GUM_SCALAR > & InferenceEngine< "
520 "GUM_SCALAR >::dynamicExpMin ( const std::string & "
521 "varName ) const : ";
522
523 if (dynamicExpMin_.empty())
524 GUM_ERROR(OperationNotAllowed, errTxt + "_dynamicExpectations() needs to be called before")
525
526 if (!dynamicExpMin_.exists(varName) /*dynamicExpMin_.find(varName) == dynamicExpMin_.end()*/)
527 GUM_ERROR(NotFound, errTxt + "variable name not found : " << varName)
528
529 return dynamicExpMin_[varName];
530 }
531
532 template < typename GUM_SCALAR >
533 const std::vector< GUM_SCALAR >&
534 InferenceEngine< GUM_SCALAR >::dynamicExpMax(const std::string& varName) const {
535 std::string errTxt = "const std::vector< GUM_SCALAR > & InferenceEngine< "
536 "GUM_SCALAR >::dynamicExpMax ( const std::string & "
537 "varName ) const : ";
538
539 if (dynamicExpMax_.empty())
540 GUM_ERROR(OperationNotAllowed, errTxt + "_dynamicExpectations() needs to be called before")
541
542 if (!dynamicExpMax_.exists(varName) /*dynamicExpMin_.find(varName) == dynamicExpMin_.end()*/)
543 GUM_ERROR(NotFound, errTxt + "variable name not found : " << varName)
544
545 return dynamicExpMax_[varName];
546 }
547
548 template < typename GUM_SCALAR >
549 const std::vector< std::vector< GUM_SCALAR > >&
551 return marginalSets_[id];
552 }
554 template < typename GUM_SCALAR >
555 void InferenceEngine< GUM_SCALAR >::saveMarginals(const std::string& path) const {
556 std::ofstream m_stream(path.c_str(), std::ios::out | std::ios::trunc);
557
558 if (!m_stream.good()) {
560 "void InferenceEngine< GUM_SCALAR >::saveMarginals(const "
561 "std::string & path) const : could not open output file "
562 ": " << path);
563 }
564
565 for (const auto& elt: marginalMin_) {
566 Size esize = Size(elt.second.size());
567
568 for (Size mod = 0; mod < esize; mod++) {
569 m_stream << credalNet_->current_bn().variable(elt.first).name() << " " << mod << " "
570 << (elt.second)[mod] << " " << marginalMax_[elt.first][mod] << std::endl;
571 }
573
574 m_stream.close();
575 }
576
577 template < typename GUM_SCALAR >
578 void InferenceEngine< GUM_SCALAR >::saveExpectations(const std::string& path) const {
579 if (dynamicExpMin_.empty()) //_modal.empty())
580 return;
581
582 // else not here, to keep the const (natural with a saving process)
583 // else if(dynamicExpMin_.empty() || dynamicExpMax_.empty())
584 //_dynamicExpectations(); // works with or without a dynamic network
586 std::ofstream m_stream(path.c_str(), std::ios::out | std::ios::trunc);
587
588 if (!m_stream.good()) {
590 "void InferenceEngine< GUM_SCALAR "
591 ">::saveExpectations(const std::string & path) : could "
592 "not open output file : "
593 << path);
594 }
595
596 for (const auto& elt: dynamicExpMin_) {
597 m_stream << elt.first; // it->first;
598
599 // iterates over a vector
600 for (const auto& elt2: elt.second) {
601 m_stream << " " << elt2;
602 }
603
604 m_stream << std::endl;
605 }
606
607 for (const auto& elt: dynamicExpMax_) {
608 m_stream << elt.first;
609
610 // iterates over a vector
611 for (const auto& elt2: elt.second) {
612 m_stream << " " << elt2;
613 }
614
615 m_stream << std::endl;
616 }
617
618 m_stream.close();
619 }
620
621 template < typename GUM_SCALAR >
623 std::stringstream output;
624 output << std::endl;
625
626 // use cbegin() when available
627 for (const auto& elt: marginalMin_) {
628 Size esize = Size(elt.second.size());
629
630 for (Size mod = 0; mod < esize; mod++) {
631 output << "P(" << credalNet_->current_bn().variable(elt.first).name() << "=" << mod
632 << "|e) = [ ";
633 output << marginalMin_[elt.first][mod] << ", " << marginalMax_[elt.first][mod] << " ]";
634
635 if (!query_.empty())
636 if (query_.exists(elt.first) && query_[elt.first][mod]) output << " QUERY";
637
638 output << std::endl;
639 }
640
641 output << std::endl;
642 }
643
644 return output.str();
645 }
646
647 template < typename GUM_SCALAR >
648 void InferenceEngine< GUM_SCALAR >::saveVertices(const std::string& path) const {
649 std::ofstream m_stream(path.c_str(), std::ios::out | std::ios::trunc);
650
651 if (!m_stream.good()) {
653 "void InferenceEngine< GUM_SCALAR >::saveVertices(const "
654 "std::string & path) : could not open outpul file : "
655 << path);
656 }
657
658 for (const auto& elt: marginalSets_) {
659 m_stream << credalNet_->current_bn().variable(elt.first).name() << std::endl;
660
661 for (const auto& elt2: elt.second) {
662 m_stream << "[";
663 bool first = true;
664
665 for (const auto& elt3: elt2) {
666 if (!first) {
667 m_stream << ",";
668 first = false;
669 }
670
671 m_stream << elt3;
672 }
673
674 m_stream << "]\n";
675 }
676 }
677
678 m_stream.close();
679 }
680
681 template < typename GUM_SCALAR >
683 marginalMin_.clear();
684 marginalMax_.clear();
685 oldMarginalMin_.clear();
686 oldMarginalMax_.clear();
687
688 for (auto node: credalNet_->current_bn().nodes()) {
689 auto dSize = credalNet_->current_bn().variable(node).domainSize();
690 marginalMin_.insert(node, std::vector< GUM_SCALAR >(dSize, 1));
691 oldMarginalMin_.insert(node, std::vector< GUM_SCALAR >(dSize, 1));
692
693 marginalMax_.insert(node, std::vector< GUM_SCALAR >(dSize, 0));
694 oldMarginalMax_.insert(node, std::vector< GUM_SCALAR >(dSize, 0));
695 }
696
697 // now that we know the sizes of marginalMin_ and marginalMax_, we can
698 // dispatch their processes to the threads
700 }
701
702 template < typename GUM_SCALAR >
704 marginalSets_.clear();
705
706 if (!storeVertices_) return;
707
708 for (auto node: credalNet_->current_bn().nodes())
709 marginalSets_.insert(node, std::vector< std::vector< GUM_SCALAR > >());
710 }
711
712 // since only monitored variables in modal_ will be alble to compute
713 // expectations, it is useless to initialize those for all variables
714 // modal_ variables will always be checked further, so it is not necessary
715 // to
716 // check it here, but doing so will use less memory
717 template < typename GUM_SCALAR >
719 expectationMin_.clear();
720 expectationMax_.clear();
721
722 if (modal_.empty()) return;
723
724 for (auto node: credalNet_->current_bn().nodes()) {
725 std::string var_name, time_step;
726
727 var_name = credalNet_->current_bn().variable(node).name();
728 auto delim = var_name.find_first_of("_");
729 var_name = var_name.substr(0, delim);
730
731 if (!modal_.exists(var_name)) continue;
732
733 expectationMin_.insert(node, modal_[var_name].back());
734 expectationMax_.insert(node, modal_[var_name].front());
735 }
736 }
737
738 template < typename GUM_SCALAR >
742
743 template < typename GUM_SCALAR >
745 // no modals, no expectations computed during inference
746 if (expectationMin_.empty() || modal_.empty()) return;
747
748 // already called by the algorithm or the user
749 if (dynamicExpMax_.size() > 0 && dynamicExpMin_.size() > 0) return;
750
751 using innerMap = typename gum::HashTable< int, GUM_SCALAR >;
752
753 using outerMap = typename gum::HashTable< std::string, innerMap >;
754
755
756 // if non dynamic, directly save expectationMin_ et Max (same but faster)
757 outerMap expectationsMin, expectationsMax;
758
759 for (const auto& elt: expectationMin_) {
760 std::string var_name, time_step;
761
762 var_name = credalNet_->current_bn().variable(elt.first).name();
763 auto delim = var_name.find_first_of("_");
764 time_step = var_name.substr(delim + 1, var_name.size());
765 var_name = var_name.substr(0, delim);
766
767 // to be sure (don't store not monitored variables' expectations)
768 // although it
769 // should be taken care of before this point
770 if (!modal_.exists(var_name)) continue;
771
772 expectationsMin.getWithDefault(var_name, innerMap())
773 .getWithDefault(atoi(time_step.c_str()), 0)
774 = elt.second; // we iterate with min iterators
775 expectationsMax.getWithDefault(var_name, innerMap())
776 .getWithDefault(atoi(time_step.c_str()), 0)
777 = expectationMax_[elt.first];
778 }
779
780 for (const auto& elt: expectationsMin) {
781 typename std::vector< GUM_SCALAR > dynExp(elt.second.size());
782
783 for (const auto& elt2: elt.second)
784 dynExp[elt2.first] = elt2.second;
785
786 dynamicExpMin_.insert(elt.first, dynExp);
787 }
788
789 for (const auto& elt: expectationsMax) {
790 typename std::vector< GUM_SCALAR > dynExp(elt.second.size());
791
792 for (const auto& elt2: elt.second) {
793 dynExp[elt2.first] = elt2.second;
794 }
795
796 dynamicExpMax_.insert(elt.first, dynExp);
797 }
798 }
799
800 template < typename GUM_SCALAR >
802 timeSteps_ = 0;
803 t0_.clear();
804 t1_.clear();
805
806 // t = 0 vars belongs to t0_ as keys
807 for (auto node: credalNet_->current_bn().dag().nodes()) {
808 std::string var_name = credalNet_->current_bn().variable(node).name();
809 auto delim = var_name.find_first_of("_");
810
811 if (delim > var_name.size()) {
813 "void InferenceEngine< GUM_SCALAR "
814 ">::repetitiveInit_() : the network does not "
815 "appear to be dynamic");
816 }
817
818 std::string time_step = var_name.substr(delim + 1, 1);
819
820 if (time_step.compare("0") == 0) t0_.insert(node, std::vector< NodeId >());
821 }
822
823 // t = 1 vars belongs to either t0_ as member value or t1_ as keys
824 for (const auto& node: credalNet_->current_bn().dag().nodes()) {
825 std::string var_name = credalNet_->current_bn().variable(node).name();
826 auto delim = var_name.find_first_of("_");
827 std::string time_step = var_name.substr(delim + 1, var_name.size());
828 var_name = var_name.substr(0, delim);
829 delim = time_step.find_first_of("_");
830 time_step = time_step.substr(0, delim);
831
832 if (time_step.compare("1") == 0) {
833 bool found = false;
834
835 for (const auto& elt: t0_) {
836 std::string var_0_name = credalNet_->current_bn().variable(elt.first).name();
837 delim = var_0_name.find_first_of("_");
838 var_0_name = var_0_name.substr(0, delim);
839
840 if (var_name.compare(var_0_name) == 0) {
841 const Tensor< GUM_SCALAR >* tensor(&credalNet_->current_bn().cpt(node));
842 const Tensor< GUM_SCALAR >* tensor2(&credalNet_->current_bn().cpt(elt.first));
843
844 if (tensor->domainSize() == tensor2->domainSize()) t0_[elt.first].push_back(node);
845 else t1_.insert(node, std::vector< NodeId >());
846
847 found = true;
848 break;
849 }
850 }
851
852 if (!found) { t1_.insert(node, std::vector< NodeId >()); }
853 }
854 }
855
856 // t > 1 vars belongs to either t0_ or t1_ as member value
857 // remember timeSteps_
858 for (auto node: credalNet_->current_bn().dag().nodes()) {
859 std::string var_name = credalNet_->current_bn().variable(node).name();
860 auto delim = var_name.find_first_of("_");
861 std::string time_step = var_name.substr(delim + 1, var_name.size());
862 var_name = var_name.substr(0, delim);
863 delim = time_step.find_first_of("_");
864 time_step = time_step.substr(0, delim);
865
866 if (time_step.compare("0") != 0 && time_step.compare("1") != 0) {
867 // keep max time_step
868 if (atoi(time_step.c_str()) > timeSteps_) timeSteps_ = atoi(time_step.c_str());
869
870 std::string var_0_name;
871 bool found = false;
872
873 for (const auto& elt: t0_) {
874 std::string var_0_name = credalNet_->current_bn().variable(elt.first).name();
875 delim = var_0_name.find_first_of("_");
876 var_0_name = var_0_name.substr(0, delim);
877
878 if (var_name.compare(var_0_name) == 0) {
879 const Tensor< GUM_SCALAR >* tensor(&credalNet_->current_bn().cpt(node));
880 const Tensor< GUM_SCALAR >* tensor2(&credalNet_->current_bn().cpt(elt.first));
881
882 if (tensor->domainSize() == tensor2->domainSize()) {
883 t0_[elt.first].push_back(node);
884 found = true;
885 break;
886 }
887 }
888 }
889
890 if (!found) {
891 for (const auto& elt: t1_) {
892 std::string var_0_name = credalNet_->current_bn().variable(elt.first).name();
893 auto delim = var_0_name.find_first_of("_");
894 var_0_name = var_0_name.substr(0, delim);
895
896 if (var_name.compare(var_0_name) == 0) {
897 const Tensor< GUM_SCALAR >* tensor(&credalNet_->current_bn().cpt(node));
898 const Tensor< GUM_SCALAR >* tensor2(&credalNet_->current_bn().cpt(elt.first));
899
900 if (tensor->domainSize() == tensor2->domainSize()) {
901 t1_[elt.first].push_back(node);
902 break;
903 }
904 }
905 }
906 }
907 }
908 }
909 }
910
911 template < typename GUM_SCALAR >
913 const NodeId& id,
914 const std::vector< GUM_SCALAR >& vertex) {
915 std::string var_name = credalNet_->current_bn().variable(id).name();
916 auto delim = var_name.find_first_of("_");
917
918 var_name = var_name.substr(0, delim);
919
920 if (modal_.exists(var_name) /*modal_.find(var_name) != modal_.end()*/) {
921 GUM_SCALAR exp = 0;
922 auto vsize = vertex.size();
923
924 for (Size mod = 0; mod < vsize; mod++)
925 exp += vertex[mod] * modal_[var_name][mod];
926
927 if (exp > expectationMax_[id]) expectationMax_[id] = exp;
928
929 if (exp < expectationMin_[id]) expectationMin_[id] = exp;
930 }
931 }
932
933 template < typename GUM_SCALAR >
935 const std::vector< GUM_SCALAR >& vertex,
936 const bool& elimRedund) {
937 auto& nodeCredalSet = marginalSets_[id];
938 auto dsize = vertex.size();
939
940 bool eq = true;
941
942 for (auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend(); it != itEnd; ++it) {
943 eq = true;
944
945 for (Size i = 0; i < dsize; i++) {
946 if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
947 eq = false;
948 break;
949 }
950 }
951
952 if (eq) break;
953 }
954
955 if (!eq || nodeCredalSet.size() == 0) {
956 nodeCredalSet.push_back(vertex);
957 return;
958 } else return;
959
960 // because of next lambda return condition
961 if (nodeCredalSet.size() == 1) return;
962
963 // check that the point and all previously added ones are not inside the
964 // actual
965 // polytope
966 auto itEnd = std::remove_if(
967 nodeCredalSet.begin(),
968 nodeCredalSet.end(),
969 [&](const std::vector< GUM_SCALAR >& v) -> bool {
970 for (auto jt = v.cbegin(),
971 jtEnd = v.cend(),
972 minIt = marginalMin_[id].cbegin(),
973 minItEnd = marginalMin_[id].cend(),
974 maxIt = marginalMax_[id].cbegin(),
975 maxItEnd = marginalMax_[id].cend();
976 jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
977 ++jt, ++minIt, ++maxIt) {
978 if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
979 && std::fabs(*minIt - *maxIt) > 1e-6)
980 return false;
981 }
982 return true;
983 });
984
985 nodeCredalSet.erase(itEnd, nodeCredalSet.end());
986
987 // we need at least 2 points to make a convex combination
988 if (!elimRedund || nodeCredalSet.size() <= 2) return;
989
990 // there may be points not inside the polytope but on one of it's facet,
991 // meaning it's still a convex combination of vertices of this facet. Here
992 // we
993 // need lrs.
994 LRSWrapper< GUM_SCALAR > lrsWrapper;
995 lrsWrapper.setUpV((unsigned int)dsize, (unsigned int)(nodeCredalSet.size()));
996
997 for (const auto& vtx: nodeCredalSet)
998 lrsWrapper.fillV(vtx);
999
1000 lrsWrapper.elimRedundVrep();
1001
1002 marginalSets_[id] = lrsWrapper.getOutput();
1003 }
1004
1005 template < typename GUM_SCALAR >
1010
1011 template < typename GUM_SCALAR >
1016
1017 template < typename GUM_SCALAR >
1019 // compute the number of threads and prepare for the result
1020 const Size nb_threads = ThreadExecutor::nbRunningThreadsExecutors() == 0
1021 ? this->threadRanges_.size() - 1
1022 : 1; // no nested multithreading
1023 std::vector< GUM_SCALAR > tEps(nb_threads, std::numeric_limits< GUM_SCALAR >::max());
1024
1025 // create the function to be executed by the threads
1026 auto threadedEps = [this, &tEps](const std::size_t this_thread,
1027 const std::size_t nb_threads,
1028 const std::vector< std::pair< NodeId, Idx > >& ranges) {
1029 auto& this_tEps = tEps[this_thread];
1030 GUM_SCALAR delta;
1031
1032 // below, we will loop over indices i and j of marginalMin_ and
1033 // marginalMax_. Index i represents nodes and j allow to parse their
1034 // domain. To parse all the domains of all the nodes, we should theorically
1035 // use 2 loops. However, here, we will use one loop: we start with node i
1036 // and parse its domain with index j. When this is done, we move to the
1037 // next node, and so on. The underlying idea is that, by doing so, we
1038 // need not parse in this function the whole domain of a node: we can start
1039 // the loop at a given value of node i and complete the loop on another
1040 // value of another node. These values are computed in Vector threadRanges_
1041 // by Method dispatchMarginalsToThreads_(), which dispatches the loops
1042 // among threads
1043 auto i = ranges[this_thread].first;
1044 auto j = ranges[this_thread].second;
1045 auto domain_size = this->marginalMax_[i].size();
1046 const auto end_i = ranges[this_thread + 1].first;
1047 auto end_j = ranges[this_thread + 1].second;
1048 const auto marginalMax_size = this->marginalMax_.size();
1049
1050 while ((i < end_i) || (j < end_j)) {
1051 // on min
1052 delta = marginalMin_[i][j] - oldMarginalMin_[i][j];
1053 delta = (delta < 0) ? (-delta) : delta;
1054 this_tEps = (this_tEps < delta) ? delta : this_tEps;
1055
1056 // on max
1057 delta = marginalMax_[i][j] - oldMarginalMax_[i][j];
1058 delta = (delta < 0) ? (-delta) : delta;
1059 this_tEps = (this_tEps < delta) ? delta : this_tEps;
1060
1061 oldMarginalMin_[i][j] = marginalMin_[i][j];
1062 oldMarginalMax_[i][j] = marginalMax_[i][j];
1063
1064 if (++j == domain_size) {
1065 j = 0;
1066 ++i;
1067 if (i < marginalMax_size) domain_size = this->marginalMax_[i].size();
1068 }
1069 }
1070 };
1071
1072 // launch the threads
1074 nb_threads,
1075 threadedEps,
1076 (nb_threads == 1)
1077 ? std::vector< std::pair< NodeId, Idx > >{{0, 0}, {this->marginalMin_.size(), 0}}
1078 : this->threadRanges_);
1079
1080 // aggregate all the results
1081 GUM_SCALAR eps = tEps[0];
1082 for (const auto nb: tEps)
1083 if (eps < nb) eps = nb;
1084
1085 return eps;
1086 }
1087
1088 /*
1089 // old openMP code:
1090 GUM_SCALAR eps = 0;
1091 #pragma omp parallel
1092 {
1093 GUM_SCALAR tEps = 0;
1094 GUM_SCALAR delta;
1095
1097 int nsize = int(marginalMin_.size());
1098
1099 #pragma omp for
1100
1101 for (int i = 0; i < nsize; i++) {
1102 auto dSize = marginalMin_[i].size();
1103
1104 for (Size j = 0; j < dSize; j++) {
1105 // on min
1106 delta = marginalMin_[i][j] - oldMarginalMin_[i][j];
1107 delta = (delta < 0) ? (-delta) : delta;
1108 tEps = (tEps < delta) ? delta : tEps;
1109
1110 // on max
1111 delta = marginalMax_[i][j] - oldMarginalMax_[i][j];
1112 delta = (delta < 0) ? (-delta) : delta;
1113 tEps = (tEps < delta) ? delta : tEps;
1114
1115 oldMarginalMin_[i][j] = marginalMin_[i][j];
1116 oldMarginalMax_[i][j] = marginalMax_[i][j];
1117 }
1118 } // end of : all variables
1119
1120 #pragma omp critical(epsilon_max)
1121 {
1122 #pragma omp flush(eps)
1123 eps = (eps < tEps) ? tEps : eps;
1124 }
1125 }
1126
1127 return eps;
1128 }
1129 */
1130
1131
1132 template < typename GUM_SCALAR >
1134 // we compute the number of elements in the 2 loops (over i,j in marginalMin_[i][j])
1135 Size nb_elements = 0;
1136 const auto marginalMin_size = this->marginalMin_.size();
1137 for (const auto& marg_i: this->marginalMin_)
1138 nb_elements += marg_i.second.size();
1139
1140 // distribute evenly the elements among the threads
1141 auto nb_threads = ThreadNumberManager::getNumberOfThreads();
1142 if (nb_elements < nb_threads) nb_threads = nb_elements;
1143
1144 // the result that we return is a vector of pairs (NodeId, Idx). For thread number i, the
1145 // pair at index i is the beginning of the range that the thread will have to process: this
1146 // is the part of the marginal distribution vector of node NodeId starting at index Idx.
1147 // The pair at index i+1 is the end of this range (not included)
1148 threadRanges_.clear();
1149 threadRanges_.reserve(nb_threads + 1);
1150
1151 // try to balance the number of elements among the threads
1152 Idx nb_elts_par_thread = nb_elements / nb_threads;
1153 Idx rest_elts = nb_elements - nb_elts_par_thread * nb_threads;
1154
1155 NodeId current_node = 0;
1156 Idx current_domain_index = 0;
1157 Size current_domain_size = this->marginalMin_[0].size();
1158 threadRanges_.emplace_back(current_node, current_domain_index);
1159
1160 for (Idx i = Idx(0); i < nb_threads; ++i) {
1161 // compute the end of the threads, assuming that the current node has a domain
1162 // sufficiently large
1163 current_domain_index += nb_elts_par_thread;
1164 if (rest_elts != Idx(0)) {
1165 ++current_domain_index;
1166 --rest_elts;
1167 }
1168
1169 // if the current node is not sufficient to hold all the elements that
1170 // the current thread should process. So we should add elements of the
1171 // next nodes
1172 while (current_domain_index >= current_domain_size) {
1173 current_domain_index -= current_domain_size;
1174 ++current_node;
1175 current_domain_index = 0;
1176 if (current_node != marginalMin_size) {
1177 current_domain_size = this->marginalMin_[current_node].size();
1178 }
1179 }
1180
1181 // now we can store the range if elements
1182 threadRanges_.emplace_back(current_node, current_domain_index);
1183
1184 // compute the next begin_node
1185 if (current_domain_index == current_domain_size) {
1186 ++current_node;
1187 current_domain_index = 0;
1188 }
1189 }
1190 }
1191
1192 template < typename GUM_SCALAR >
1194 const std::vector< GUM_SCALAR >& vals) {
1195 evidence_.insert(id, vals);
1196 // forces the computation of the begin iterator to avoid subsequent data races
1197 // @TODO make HashTableConstIterator constructors thread safe
1198 evidence_.begin();
1199 }
1200
1202 template < typename GUM_SCALAR >
1204 std::vector< GUM_SCALAR > vals(this->credalNet_->current_bn().variable(id).domainSize(), 0);
1205 vals[val] = 1;
1206 addEvidence(id, vals);
1207 }
1208
1210 template < typename GUM_SCALAR >
1211 void InferenceEngine< GUM_SCALAR >::addEvidence(const std::string& nodeName, const Idx val) {
1212 addEvidence(this->credalNet_->current_bn().idFromName(nodeName), val);
1213 }
1214
1216 template < typename GUM_SCALAR >
1217 void InferenceEngine< GUM_SCALAR >::addEvidence(NodeId id, const std::string& label) {
1218 addEvidence(id, this->credalNet_->current_bn().variable(id)[label]);
1219 }
1220
1222 template < typename GUM_SCALAR >
1223 void InferenceEngine< GUM_SCALAR >::addEvidence(const std::string& nodeName,
1224 const std::string& label) {
1225 const NodeId id = this->credalNet_->current_bn().idFromName(nodeName);
1226 addEvidence(id, this->credalNet_->current_bn().variable(id)[label]);
1227 }
1228
1229 template < typename GUM_SCALAR >
1230 void InferenceEngine< GUM_SCALAR >::addEvidence(const std::string& nodeName,
1231 const std::vector< GUM_SCALAR >& vals) {
1232 addEvidence(this->credalNet_->current_bn().idFromName(nodeName), vals);
1233 }
1234
1235 template < typename GUM_SCALAR >
1236 void InferenceEngine< GUM_SCALAR >::addEvidence(const Tensor< GUM_SCALAR >& pot) {
1237 const auto id = this->credalNet_->current_bn().idFromName(pot.variable(0).name());
1238 std::vector< GUM_SCALAR > vals(this->credalNet_->current_bn().variable(id).domainSize(), 0);
1239 Instantiation I(pot);
1240 for (I.setFirst(); !I.end(); I.inc()) {
1241 vals[I.val(0)] = pot[I];
1242 }
1243 addEvidence(id, vals);
1244 }
1245 } // namespace credal
1246} // namespace gum
ApproximationScheme(bool verbosity=false)
The class for generic Hash Tables.
Definition hashTable.h:637
Exception : input/output problem.
Class for assigning/browsing values to tuples of discrete variables.
bool end() const
Returns true if the Instantiation reached the end.
void inc()
Operator increment.
Idx val(Idx i) const
Returns the current value of the variable at position i.
void setFirst()
Assign the first values to the tuple of the Instantiation.
Exception: at least one argument passed to a function is not what was expected.
Exception : the element we looked for cannot be found.
Exception : operation not allowed.
Exception : out of bound.
aGrUM's Tensor is a multi-dimensional array with tensor operators.
Definition tensor.h:85
virtual Size getNumberOfThreads() const
returns the current max number of threads used by the class containing this ThreadNumberManager
Class template representing a Credal Network.
Definition credalNet.h:97
void updateExpectations_(const NodeId &id, const std::vector< GUM_SCALAR > &vertex)
Given a node id and one of it's possible vertex obtained during inference, update this node lower and...
void repetitiveInit_()
Initialize t0_ and t1_ clusters.
void dynamicExpectations()
Compute dynamic expectations.
void saveVertices(const std::string &path) const
Saves vertices to file.
margi oldMarginalMax_
Old upper marginals used to compute epsilon.
const std::vector< GUM_SCALAR > & dynamicExpMin(const std::string &varName) const
Get the lower dynamic expectation of a given variable prefix (without the time step included,...
margi evidence_
Holds observed variables states.
virtual void insertEvidenceFile(const std::string &path)
Insert evidence from file.
cluster t1_
Clusters of nodes used with dynamic networks.
dynExpe dynamicExpMin_
Lower dynamic expectations.
bool storeBNOpt_
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
void initExpectations_()
Initialize lower and upper expectations before inference, with the lower expectation being initialize...
void updateCredalSets_(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it's possible vertex, update it's credal set.
bool repetitiveInd_
True if using repetitive independence ( dynamic network only ), False otherwise.
const NodeProperty< std::vector< NodeId > > & getT1Cluster() const
Get the t1_ cluster.
void displatchMarginalsToThreads_()
computes Vector threadRanges_, that assigns some part of marginalMin_ and marginalMax_ to the threads
void saveExpectations(const std::string &path) const
Saves expectations to file.
virtual const GUM_SCALAR computeEpsilon_()
Compute approximation scheme epsilon using the old marginals and the new ones.
void saveMarginals(const std::string &path) const
Saves marginals to file.
const std::vector< std::vector< GUM_SCALAR > > & vertices(const NodeId id) const
Get the vertice of a given node id.
InferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Construtor.
void initMarginalSets_()
Initialize credal set vertices with empty sets.
margi oldMarginalMin_
Old lower marginals used to compute epsilon.
bool storeVertices_
True if credal sets vertices are stored, False otherwise.
dynExpe dynamicExpMax_
Upper dynamic expectations.
const std::vector< GUM_SCALAR > & dynamicExpMax(const std::string &varName) const
Get the upper dynamic expectation of a given variable prefix (without the time step included,...
std::string toString() const
Print all nodes marginals to standart output.
void insertQuery(const NodeProperty< std::vector< bool > > &query)
Insert query variables and states from Property.
bool repetitiveInd() const
Get the current independence status.
NodeProperty< std::vector< bool > > query
void dynamicExpectations_()
Rearrange lower and upper expectations to suit dynamic networks.
bool storeVertices() const
Get the number of iterations without changes used to stop some algorithms.
const CredalNet< GUM_SCALAR > * credalNet_
A pointer to the Credal Net used.
void setRepetitiveInd(const bool repetitive)
virtual void addEvidence(NodeId id, const Idx val) final
adds a new hard evidence on node id
virtual void eraseAllEvidence()
removes all the evidence entered into the network
void insertModalsFile(const std::string &path)
Insert variables modalities from file to compute expectations.
expe expectationMax_
Upper expectations, if some variables modalities were inserted.
void insertEvidence(const std::map< std::string, std::vector< GUM_SCALAR > > &eviMap)
Insert evidence from map.
void insertModals(const std::map< std::string, std::vector< GUM_SCALAR > > &modals)
Insert variables modalities from map to compute expectations.
query query_
Holds the query nodes states.
void insertQueryFile(const std::string &path)
Insert query variables states from file.
credalSet marginalSets_
Credal sets vertices, if enabled.
Tensor< GUM_SCALAR > marginalMin(const NodeId id) const
Get the lower marginals of a given node id.
virtual ~InferenceEngine()
Destructor.
const CredalNet< GUM_SCALAR > & credalNet() const
Get this creadal network.
VarMod2BNsMap< GUM_SCALAR > * getVarMod2BNsMap()
Get optimum IBayesNet.
margi marginalMin_
Lower marginals.
cluster t0_
Clusters of nodes used with dynamic networks.
const GUM_SCALAR & expectationMin(const NodeId id) const
Get the lower expectation of a given node id.
dynExpe modal_
Variables modalities used to compute expectations.
expe expectationMin_
Lower expectations, if some variables modalities were inserted.
Tensor< GUM_SCALAR > marginalMax(const NodeId id) const
Get the upper marginals of a given node id.
const GUM_SCALAR & expectationMax(const NodeId id) const
Get the upper expectation of a given node id.
int timeSteps_
The number of time steps of this network (only usefull for dynamic networks).
std::vector< std::pair< NodeId, Idx > > threadRanges_
the ranges of elements of marginalMin_ and marginalMax_ processed by each thread
const NodeProperty< std::vector< NodeId > > & getT0Cluster() const
Get the t0_ cluster.
void initMarginals_()
Initialize lower and upper old marginals and marginals before inference, with the lower marginal bein...
VarMod2BNsMap< GUM_SCALAR > dbnOpt_
Object used to efficiently store optimal bayes net during inference, for some algorithms.
Class template acting as a wrapper for Lexicographic Reverse Search by David Avis.
Definition LrsWrapper.h:119
const matrix & getOutput() const
Get the output matrix solution of the problem.
void setUpV(const Size &card, const Size &vertices)
Sets up a V-representation.
void elimRedundVrep()
V-Redundancy elimination.
void fillV(const std::vector< GUM_SCALAR > &vertex)
Creates the V-representation of a polytope by adding a vertex to the problem input _input_.
#define GUM_ERROR(type, msg)
Definition exceptions.h:72
#define GUM_SHOWERROR(e)
Definition exceptions.h:85
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition types.h:74
Size Idx
Type for indexes.
Definition types.h:79
Size NodeId
Type for node ids.
HashTable< NodeId, VAL > NodeProperty
Property on graph elements.
Abstract class representing CredalNet inference engines.
namespace for all credal networks entities
Definition agrum.h:61
gum is the global namespace for all aGrUM entities
Definition agrum.h:46
STL namespace.
static void execute(std::size_t nb_threads, FUNCTION exec_func, ARGS &&... func_args)
executes a function using several threads
static int nbRunningThreadsExecutors()
indicates how many threadExecutors are currently running