47 template <
typename GUM_SCALAR >
49 std::string path_name = path.substr(0, path.size() - 4);
50 path_name = path_name +
".res";
52 std::ofstream res(path_name.c_str(), std::ios::out | std::ios::trunc);
56 "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
57 "string & path) : could not open file : "
62 if (std::string ext = path.substr(path.size() - 3, path.size());
63 std::strcmp(ext.c_str(),
"evi") == 0) {
64 std::ifstream evi(path.c_str(), std::ios::in);
69 "CNLoopyPropagation<GUM_SCALAR>::saveInference(std::"
70 "string & path) : could not open file : "
85 for (
auto node:
_bnet_->nodes()) {
87 GUM_SCALAR msg_p_min = 1.0;
88 GUM_SCALAR msg_p_max = 0.0;
98 msg_p_max = msg_p_min;
121 if (min ==
INF_ && lmin == 0.) {
122 std::cout <<
"proba ERR (negatif) : pi = inf, l = 0" << std::endl;
126 msg_p_min = GUM_SCALAR(1.);
127 }
else if (min == 0. || lmin == 0.) {
128 msg_p_min = GUM_SCALAR(0.);
130 msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
134 if (max ==
INF_ && lmax == 0.) {
135 std::cout <<
"proba ERR (negatif) : pi = inf, l = 0" << std::endl;
139 msg_p_max = GUM_SCALAR(1.);
140 }
else if (max == 0. || lmax == 0.) {
141 msg_p_max = GUM_SCALAR(0.);
143 msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
147 if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) { msg_p_min = msg_p_max; }
149 if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) { msg_p_max = msg_p_min; }
151 if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
152 std::cout << std::endl;
153 std::cout <<
"pas de proba calculable (verifier observations)" << std::endl;
156 res <<
"P(" <<
_bnet_->variable(node).name() <<
" | e) = ";
159 res <<
"(observe)" << std::endl;
164 res <<
"\t\t" <<
_bnet_->variable(node).label(0) <<
" [ " << (GUM_SCALAR)1. - msg_p_max;
166 if (msg_p_min != msg_p_max) {
167 res <<
", " << (GUM_SCALAR)1. - msg_p_min <<
" ] | ";
172 res <<
_bnet_->variable(node).label(1) <<
" [ " << msg_p_min;
174 if (msg_p_min != msg_p_max) {
175 res <<
", " << msg_p_max <<
" ]" << std::endl;
177 res <<
" ]" << std::endl;
193 template <
typename GUM_SCALAR >
195 GUM_SCALAR& msg_l_max,
196 std::vector< GUM_SCALAR >& lx,
200 GUM_SCALAR& den_max) {
201 GUM_SCALAR num_min_tmp = 1.;
202 GUM_SCALAR den_min_tmp = 1.;
203 GUM_SCALAR num_max_tmp = 1.;
204 GUM_SCALAR den_max_tmp = 1.;
206 GUM_SCALAR res_min = 1.0;
207 GUM_SCALAR res_max = 0.0;
209 auto lsize = lx.size();
211 for (
decltype(lsize) i = 0; i < lsize; i++) {
212 bool non_defini_min =
false;
213 bool non_defini_max =
false;
216 num_min_tmp = num_min;
217 den_min_tmp = den_max;
218 num_max_tmp = num_max;
219 den_max_tmp = den_min;
220 }
else if (lx[i] == (GUM_SCALAR)1.) {
221 num_min_tmp = GUM_SCALAR(1.);
222 den_min_tmp = GUM_SCALAR(1.);
223 num_max_tmp = GUM_SCALAR(1.);
224 den_max_tmp = GUM_SCALAR(1.);
225 }
else if (lx[i] > (GUM_SCALAR)1.) {
226 GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
227 num_min_tmp = num_min + li;
228 den_min_tmp = den_max + li;
229 num_max_tmp = num_max + li;
230 den_max_tmp = den_min + li;
231 }
else if (lx[i] < (GUM_SCALAR)1.) {
232 GUM_SCALAR li = GUM_SCALAR(1.) / (lx[i] - GUM_SCALAR(1.));
233 num_min_tmp = num_max + li;
234 den_min_tmp = den_min + li;
235 num_max_tmp = num_min + li;
236 den_max_tmp = den_max + li;
239 if (den_min_tmp == 0. && num_min_tmp == 0.) {
240 non_defini_min =
true;
241 }
else if (den_min_tmp == 0. && num_min_tmp != 0.) {
243 }
else if (den_min_tmp !=
INF_ || num_min_tmp !=
INF_) {
244 res_min = num_min_tmp / den_min_tmp;
247 if (den_max_tmp == 0. && num_max_tmp == 0.) {
248 non_defini_max =
true;
249 }
else if (den_max_tmp == 0. && num_max_tmp != 0.) {
251 }
else if (den_max_tmp !=
INF_ || num_max_tmp !=
INF_) {
252 res_max = num_max_tmp / den_max_tmp;
255 if (non_defini_max && non_defini_min) {
256 std::cout <<
"undefined msg" << std::endl;
258 }
else if (non_defini_min && !non_defini_max) {
260 }
else if (non_defini_max && !non_defini_min) {
264 if (res_min < 0.) { res_min = 0.; }
266 if (res_max < 0.) { res_max = 0.; }
268 if (msg_l_min == msg_l_max && msg_l_min == -2.) {
273 if (res_max > msg_l_max) { msg_l_max = res_max; }
275 if (res_min < msg_l_min) { msg_l_min = res_min; }
283 template <
typename GUM_SCALAR >
285 std::vector< std::vector< GUM_SCALAR > >& combi_msg_p,
287 GUM_SCALAR& msg_l_min,
288 GUM_SCALAR& msg_l_max,
289 std::vector< GUM_SCALAR >& lx,
291 GUM_SCALAR num_min = 0.;
292 GUM_SCALAR num_max = 0.;
293 GUM_SCALAR den_min = 0.;
294 GUM_SCALAR den_max = 0.;
296 auto taille = combi_msg_p.size();
298 std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
300 for (
decltype(taille) i = 0; i < taille; i++) {
301 it[i] = combi_msg_p[i].begin();
310 while (it[taille - 1] != combi_msg_p[taille - 1].end()) {
311 GUM_SCALAR prod = 1.;
313 for (
decltype(taille) k = 0; k < taille; k++) {
317 den_min += (
_cn_->get_binaryCPT_min()[
id][combi_den] * prod);
318 den_max += (
_cn_->get_binaryCPT_max()[
id][combi_den] * prod);
320 num_min += (
_cn_->get_binaryCPT_min()[
id][combi_num] * prod);
321 num_max += (
_cn_->get_binaryCPT_max()[
id][combi_num] * prod);
327 if (combi_den % pp == 0) {
336 for (
decltype(taille) i = 0; (i < taille - 1) && (it[i] == combi_msg_p[i].end()); ++i) {
337 it[i] = combi_msg_p[i].begin();
342 compute_ext_(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
349 template <
typename GUM_SCALAR >
351 std::vector< std::vector< GUM_SCALAR > >& combi_msg_p,
353 GUM_SCALAR& msg_p_min,
354 GUM_SCALAR& msg_p_max) {
358 auto taille = combi_msg_p.size();
360 std::vector< typename std::vector< GUM_SCALAR >::iterator > it(taille);
362 for (
decltype(taille) i = 0; i < taille; i++) {
363 it[i] = combi_msg_p[i].begin();
367 auto theEnd = combi_msg_p[taille - 1].end();
369 while (it[taille - 1] != theEnd) {
370 GUM_SCALAR prod = 1.;
372 for (
decltype(taille) k = 0; k < taille; k++) {
376 min += (
_cn_->get_binaryCPT_min()[
id][combi] * prod);
377 max += (
_cn_->get_binaryCPT_max()[
id][combi] * prod);
384 for (
decltype(taille) i = 0; (i < taille - 1) && (it[i] == combi_msg_p[i].end()); ++i) {
385 it[i] = combi_msg_p[i].begin();
390 if (min < msg_p_min) { msg_p_min = min; }
392 if (max > msg_p_max) { msg_p_max = max; }
398 template <
typename GUM_SCALAR >
400 std::vector< std::vector< std::vector< GUM_SCALAR > > >& msgs_p,
402 GUM_SCALAR& msg_p_min,
403 GUM_SCALAR& msg_p_max) {
404 auto taille = msgs_p.size();
408 msg_p_min =
_cn_->get_binaryCPT_min()[id][0];
409 msg_p_max =
_cn_->get_binaryCPT_max()[id][0];
414 for (
Size i = 0; i < taille; i++) {
415 msgPerm *= msgs_p[i].size();
424 if (nb_threads < 1) nb_threads = 1;
427 const auto real_nb_threads = ranges.size();
428 std::vector< GUM_SCALAR > msg_pmin(real_nb_threads, msg_p_min);
429 std::vector< GUM_SCALAR > msg_pmax(real_nb_threads, msg_p_max);
433 = [
this, &msg_pmin, &msg_pmax, msgs_p, taille, ranges, id](
const std::size_t this_thread,
434 const std::size_t nb_threads) {
435 std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
437 const auto& [first, second] = ranges[this_thread];
438 for (
Idx j = first; j < second; ++j) {
442 for (
Idx i = 0; i < taille; i++) {
443 if (msgs_p[i].size() == 2) {
444 combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
447 combi_msg_p[i] = msgs_p[i][0];
451 compute_ext_(combi_msg_p,
id, msg_pmin[this_thread], msg_pmax[this_thread]);
458 for (
Idx j = 0; j < real_nb_threads; ++j) {
459 if (msg_p_min > msg_pmin[j]) { msg_p_min = msg_pmin[j]; }
460 if (msg_p_max < msg_pmax[j]) { msg_p_max = msg_pmax[j]; }
468 template <
typename GUM_SCALAR >
470 std::vector< std::vector< std::vector< GUM_SCALAR > > >& msgs_p,
472 GUM_SCALAR& real_msg_l_min,
473 GUM_SCALAR& real_msg_l_max,
474 std::vector< GUM_SCALAR >& lx,
476 GUM_SCALAR msg_l_min = real_msg_l_min;
477 GUM_SCALAR msg_l_max = real_msg_l_max;
479 auto taille = msgs_p.size();
483 GUM_SCALAR num_min =
_cn_->get_binaryCPT_min()[id][1];
484 GUM_SCALAR num_max =
_cn_->get_binaryCPT_max()[id][1];
485 GUM_SCALAR den_min =
_cn_->get_binaryCPT_min()[id][0];
486 GUM_SCALAR den_max =
_cn_->get_binaryCPT_max()[id][0];
488 compute_ext_(msg_l_min, msg_l_max, lx, num_min, num_max, den_min, den_max);
490 real_msg_l_min = msg_l_min;
491 real_msg_l_max = msg_l_max;
496 for (
Size i = 0; i < taille; i++) {
497 msgPerm *= msgs_p[i].size();
506 if (nb_threads < 1) nb_threads = 1;
509 const auto real_nb_threads = ranges.size();
510 std::vector< GUM_SCALAR > msg_lmin(real_nb_threads, msg_l_min);
511 std::vector< GUM_SCALAR > msg_lmax(real_nb_threads, msg_l_max);
514 auto threadedExec = [
this, &msg_lmin, &msg_lmax, msgs_p, taille, ranges, id, &lx, pos](
515 const std::size_t this_thread,
516 const std::size_t nb_threads) {
517 std::vector< std::vector< GUM_SCALAR > > combi_msg_p(taille);
519 const auto& [first, second] = ranges[this_thread];
520 for (
Idx j = first; j < second; ++j) {
524 for (
Idx i = 0; i < taille; i++) {
525 if (msgs_p[i].size() == 2) {
526 combi_msg_p[i] = (jvalue & 1) ? msgs_p[i][1] : msgs_p[i][0];
529 combi_msg_p[i] = msgs_p[i][0];
532 compute_ext_(combi_msg_p,
id, msg_lmin[this_thread], msg_lmax[this_thread], lx, pos);
539 for (
Idx j = 0; j < real_nb_threads; ++j) {
540 if ((msg_l_min > msg_lmin[j] || msg_l_min == -2) && msg_lmin[j] > 0) {
541 msg_l_min = msg_lmin[j];
543 if ((msg_l_max < msg_lmax[j] || msg_l_max == -2) && msg_lmax[j] > 0) {
544 msg_l_max = msg_lmax[j];
548 real_msg_l_min = msg_l_min;
549 real_msg_l_max = msg_l_max;
552 template <
typename GUM_SCALAR >
576 template <
typename GUM_SCALAR >
592 for (
auto node:
_bnet_->nodes()) {
605 template <
typename GUM_SCALAR >
610 for (
auto node:
_bnet_->topologicalOrder()) {
634 std::vector< GUM_SCALAR > marg(2);
636 marg[0] = 1 - marg[1];
663 const auto parents = &
_bnet_->cpt(node).variablesSequence();
665 std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
666 std::vector< std::vector< GUM_SCALAR > > msg_p;
667 std::vector< GUM_SCALAR > distri(2);
671 for (
auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd; ++jt) {
676 distri[0] = (GUM_SCALAR)1. - distri[1];
677 msg_p.push_back(distri);
681 distri[0] = (GUM_SCALAR)1. - distri[1];
682 msg_p.push_back(distri);
685 msgs_p.push_back(msg_p);
689 GUM_SCALAR msg_p_min = 1.;
690 GUM_SCALAR msg_p_max = 0.;
696 if (msg_p_min <= (GUM_SCALAR)0.) { msg_p_min = (GUM_SCALAR)0.; }
698 if (msg_p_max <= (GUM_SCALAR)0.) { msg_p_max = (GUM_SCALAR)0.; }
701 std::vector< GUM_SCALAR > marg(2);
703 marg[0] = 1 - msg_p_min;
707 if (msg_p_min != msg_p_max) {
709 marg[0] = 1 - msg_p_max;
718 for (
auto arc:
_bnet_->arcs()) {
727 template <
typename GUM_SCALAR >
737 for (
auto chil: graphe.
children(node)) {
743 for (
auto par: graphe.
parents(node)) {
765 template <
typename GUM_SCALAR >
769 std::vector< cArcP > seq;
770 seq.reserve(nbrArcs);
772 for (
const auto& arc:
_bnet_->arcs()) {
781 for (
Size j = 0, theEnd = nbrArcs / 2; j < theEnd; j++) {
785 if (w1 == w2) {
continue; }
787 std::swap(seq[w1], seq[w2]);
790 for (
const auto it: seq) {
796 msgP_(it->tail(), it->head());
797 msgL_(it->head(), it->tail());
810 template <
typename GUM_SCALAR >
814 std::vector< cArcP > seq;
815 seq.reserve(nbrArcs);
817 for (
const auto& arc:
_bnet_->arcs()) {
826 for (
const auto it: seq) {
832 msgP_(it->tail(), it->head());
833 msgL_(it->head(), it->tail());
843 template <
typename GUM_SCALAR >
848 const auto parents = &
_bnet_->cpt(Y).variablesSequence();
857 if (!update_p && !update_l) {
return; }
870 GUM_SCALAR lmin = 1.;
871 GUM_SCALAR lmax = 1.;
873 for (
auto chil: children) {
885 if (lmax != lmax && lmin == lmin) { lmax = lmin; }
887 if (lmax != lmax && lmin != lmin) {
888 std::cout <<
"no likelihood defined [lmin, lmax] (incompatibles "
893 if (lmin < 0.) { lmin = 0.; }
895 if (lmax < 0.) { lmax = 0.; }
924 if (lmin == lmax && lmin == 1.) {
936 if (update_p || update_l) {
937 std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
938 std::vector< std::vector< GUM_SCALAR > > msg_p;
939 std::vector< GUM_SCALAR > distri(2);
945 for (
auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd; ++jt) {
946 if (
_bnet_->nodeId(**jt) == X) {
948 pos = parents->pos(*jt) - 1;
955 distri[0] = GUM_SCALAR(1.) - distri[1];
956 msg_p.push_back(distri);
960 distri[0] = GUM_SCALAR(1.) - distri[1];
961 msg_p.push_back(distri);
964 msgs_p.push_back(msg_p);
968 GUM_SCALAR min = -2.;
969 GUM_SCALAR max = -2.;
971 std::vector< GUM_SCALAR > lx;
974 if (lmin != lmax) { lx.push_back(lmax); }
978 if (min == -2. || max == -2.) {
981 }
else if (max != -2.) {
984 std::cout << std::endl;
985 std::cout <<
"!!!! pas de message L calculable !!!!" << std::endl;
990 if (min < 0.) { min = 0.; }
992 if (max < 0.) { max = 0.; }
1026 template <
typename GUM_SCALAR >
1030 const auto parents = &
_bnet_->cpt(X).variablesSequence();
1050 if (!update_p && !update_l) {
return; }
1052 GUM_SCALAR lmin = 1.;
1053 GUM_SCALAR lmax = 1.;
1056 for (
auto chil: children) {
1057 if (chil == demanding_child) {
continue; }
1068 if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1070 if (lmax != lmax && lmin == lmin) { lmax = lmin; }
1072 if (lmax != lmax && lmin != lmin) {
1073 std::cout <<
"pas de vraisemblance definie [lmin, lmax] (observations "
1079 if (lmin < 0.) { lmin = 0.; }
1081 if (lmax < 0.) { lmax = 0.; }
1084 GUM_SCALAR min =
INF_;
1085 GUM_SCALAR max = 0.;
1088 std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1089 std::vector< std::vector< GUM_SCALAR > > msg_p;
1090 std::vector< GUM_SCALAR > distri(2);
1094 for (
auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd; ++jt) {
1099 distri[0] = GUM_SCALAR(1.) - distri[1];
1100 msg_p.push_back(distri);
1104 distri[0] = GUM_SCALAR(1.) - distri[1];
1105 msg_p.push_back(distri);
1108 msgs_p.push_back(msg_p);
1114 if (min < 0.) { min = 0.; }
1116 if (max < 0.) { max = 0.; }
1119 std::cout <<
" ERREUR msg P min = max = INF " << std::endl;
1145 if (update_p || update_l) {
1146 GUM_SCALAR msg_p_min;
1147 GUM_SCALAR msg_p_max;
1150 if (min ==
INF_ && lmin == 0.) {
1151 std::cout <<
"MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1155 msg_p_min = GUM_SCALAR(1.);
1156 }
else if (min == 0. || lmin == 0.) {
1159 msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1163 if (max ==
INF_ && lmax == 0.) {
1164 std::cout <<
"MESSAGE P ERR (negatif) : pi = inf, l = 0" << std::endl;
1168 msg_p_max = GUM_SCALAR(1.);
1169 }
else if (max == 0. || lmax == 0.) {
1172 msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1175 if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1176 msg_p_min = msg_p_max;
1177 std::cout << std::endl;
1178 std::cout <<
"msg_p_min is NaN" << std::endl;
1181 if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1182 msg_p_max = msg_p_min;
1183 std::cout << std::endl;
1184 std::cout <<
"msg_p_max is NaN" << std::endl;
1187 if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1188 std::cout << std::endl;
1189 std::cout <<
"pas de message P calculable (verifier observations)" << std::endl;
1193 if (msg_p_min < 0.) { msg_p_min = 0.; }
1195 if (msg_p_max < 0.) { msg_p_max = 0.; }
1197 bool update =
false;
1206 if (msg_p_max != msg_p_min) {
1215 if (msg_p_max != msg_p_min) {
1229 template <
typename GUM_SCALAR >
1231 for (
auto node:
_bnet_->nodes()) {
1239 auto parents = &
_bnet_->cpt(node).variablesSequence();
1242 GUM_SCALAR lmin = 1.;
1243 GUM_SCALAR lmax = 1.;
1246 for (
auto chil: children) {
1256 if (lmin != lmin && lmax == lmax) { lmin = lmax; }
1260 if (lmax != lmax && lmin != lmin) {
1261 std::cout <<
"pas de vraisemblance definie [lmin, lmax] (observations "
1267 if (lmin < 0.) { lmin = 0.; }
1269 if (lmax < 0.) { lmax = 0.; }
1284 std::vector< std::vector< std::vector< GUM_SCALAR > > > msgs_p;
1285 std::vector< std::vector< GUM_SCALAR > > msg_p;
1286 std::vector< GUM_SCALAR > distri(2);
1290 for (
auto jt = ++parents->begin(), theEnd = parents->end(); jt != theEnd; ++jt) {
1295 distri[0] = GUM_SCALAR(1.) - distri[1];
1296 msg_p.push_back(distri);
1300 distri[0] = GUM_SCALAR(1.) - distri[1];
1301 msg_p.push_back(distri);
1304 msgs_p.push_back(msg_p);
1308 GUM_SCALAR min =
INF_;
1309 GUM_SCALAR max = 0.;
1313 if (min < 0.) { min = 0.; }
1315 if (max < 0.) { max = 0.; }
1332 template <
typename GUM_SCALAR >
1334 for (
auto node:
_bnet_->nodes()) {
1335 GUM_SCALAR msg_p_min = 1.;
1336 GUM_SCALAR msg_p_max = 0.;
1340 msg_p_min = (GUM_SCALAR)0.;
1345 msg_p_max = msg_p_min;
1365 std::cout <<
" min ou max === INF_ !!!!!!!!!!!!!!!!!!!!!!!!!! " << std::endl;
1369 if (min ==
INF_ && lmin == 0.) {
1370 std::cout <<
"proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1375 msg_p_min = GUM_SCALAR(1.);
1376 }
else if (min == 0. || lmin == 0.) {
1377 msg_p_min = GUM_SCALAR(0.);
1379 msg_p_min = GUM_SCALAR(1. / (1. + ((1. / min - 1.) * 1. / lmin)));
1382 if (max ==
INF_ && lmax == 0.) {
1383 std::cout <<
"proba ERR (negatif) : pi = inf, l = 0" << std::endl;
1388 msg_p_max = GUM_SCALAR(1.);
1389 }
else if (max == 0. || lmax == 0.) {
1390 msg_p_max = GUM_SCALAR(0.);
1392 msg_p_max = GUM_SCALAR(1. / (1. + ((1. / max - 1.) * 1. / lmax)));
1396 if (msg_p_min != msg_p_min && msg_p_max == msg_p_max) {
1397 msg_p_min = msg_p_max;
1398 std::cout << std::endl;
1399 std::cout <<
"msg_p_min is NaN" << std::endl;
1402 if (msg_p_max != msg_p_max && msg_p_min == msg_p_min) {
1403 msg_p_max = msg_p_min;
1404 std::cout << std::endl;
1405 std::cout <<
"msg_p_max is NaN" << std::endl;
1408 if (msg_p_max != msg_p_max && msg_p_min != msg_p_min) {
1409 std::cout << std::endl;
1410 std::cout <<
"Please check the observations (no proba can be computed)" << std::endl;
1414 if (msg_p_min < 0.) { msg_p_min = 0.; }
1416 if (msg_p_max < 0.) { msg_p_max = 0.; }
1425 template <
typename GUM_SCALAR >
1433 template <
typename GUM_SCALAR >
1435 for (
auto node:
_bnet_->nodes()) {
1438 for (
auto pare:
_bnet_->parents(node)) {
1447 template <
typename GUM_SCALAR >
1451 std::vector< std::vector< GUM_SCALAR > >
vertices(2, std::vector< GUM_SCALAR >(2));
1453 for (
auto node:
_bnet_->nodes()) {
1460 for (
auto vertex = 0, vend = 2; vertex != vend; vertex++) {
1471 template <
typename GUM_SCALAR >
1474 if (!
credalNet.isSeparatelySpecified()) {
1475 GUM_ERROR(OperationNotAllowed,
1476 "CNLoopyPropagation is only available "
1477 "with separately specified nets");
1481 for (
auto node:
credalNet.current_bn().nodes())
1482 if (
credalNet.current_bn().variable(node).domainSize() != 2) {
1483 GUM_ERROR(OperationNotAllowed,
1484 "CNLoopyPropagation is only available "
1485 "with binary credal networks")
1489 if (!
credalNet.hasComputedBinaryCPTMinMax()) {
1490 GUM_ERROR(OperationNotAllowed,
1491 "CNLoopyPropagation only works when "
1492 "\"computeBinaryCPTMinMax()\" has been called for "
1505 template <
typename GUM_SCALAR >
1510 for (
auto node:
_bnet_->nodes()) {
1518 template <
typename GUM_SCALAR >
1523 template <
typename GUM_SCALAR >
Class implementing loopy-propagation with binary networks - L2U algorithm.
void updateApproximationScheme(unsigned int incr=1)
Update the scheme w.r.t the new error and increment steps.
void initApproximationScheme()
Initialise the scheme.
void stopApproximationScheme()
Stop the approximation scheme.
bool continueApproximationScheme(double error)
Update the scheme w.r.t the new error.
const NodeSet & parents(NodeId id) const
returns the set of nodes with arc ingoing to a given node
NodeSet children(const NodeSet &ids) const
returns the set of children of a set of nodes
The base class for all directed edges.
Exception : the element we looked for cannot be found.
Exception : operation not allowed.
Size size() const noexcept
Returns the number of elements in the set.
bool empty() const noexcept
Indicates whether the set is the empty set.
NodeProperty< GUM_SCALAR > NodesL_min_
"Lower" node information obtained by combinaison of children messages.
NodeProperty< GUM_SCALAR > NodesP_min_
"Lower" node information obtained by combinaison of parent's messages.
NodeProperty< GUM_SCALAR > NodesL_max_
"Upper" node information obtained by combinaison of children messages.
void msgL_(const NodeId X, const NodeId demanding_parent)
Sends a message to one's parent, i.e.
NodeProperty< NodeSet * > msg_l_sent_
Used to keep track of one's messages sent to it's parents.
InferenceType _inferenceType_
The choosen inference type.
void compute_ext_(GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, GUM_SCALAR &num_min, GUM_SCALAR &num_max, GUM_SCALAR &den_min, GUM_SCALAR &den_max)
Used by msgL_.
NodeProperty< bool > update_p_
Used to keep track of which node needs to update it's information coming from it's parents.
void refreshLMsPIs_(bool refreshIndic=false)
Get the last messages from one's parents and children.
NodeProperty< bool > update_l_
Used to keep track of which node needs to update it's information coming from it's children.
void makeInferenceNodeToNeighbours_()
Starts the inference with this inference type.
void initialize_()
Topological forward propagation to initialize old marginals & messages.
GUM_SCALAR calculateEpsilon_()
Compute epsilon.
void makeInferenceByRandomOrder_()
Starts the inference with this inference type.
const IBayesNet< GUM_SCALAR > * _bnet_
A pointer to it's IBayesNet used as a DAG.
ArcProperty< GUM_SCALAR > ArcsP_min_
"Lower" information coming from one's parent.
virtual ~CNLoopyPropagation()
Destructor.
InferenceType
Inference type to be used by the algorithm.
@ nodeToNeighbours
Uses a node-set so we don't iterate on nodes that can't send a new message.
@ randomOrder
Chooses a random arc ordering and sends messages accordingly.
@ ordered
Chooses an arc ordering and sends messages accordingly at all steps.
void msgP_(const NodeId X, const NodeId demanding_child)
Sends a message to one's child, i.e.
ArcProperty< GUM_SCALAR > ArcsL_max_
"Upper" information coming from one's children.
bool InferenceUpToDate_
TRUE if inference has already been performed, FALSE otherwise.
void updateMarginals_()
Compute marginals from up-to-date messages.
const CredalNet< GUM_SCALAR > * _cn_
A pointer to the CredalNet to be used.
void computeExpectations_()
Since the network is binary, expectations can be computed from the final marginals which give us the ...
NodeProperty< GUM_SCALAR > NodesP_max_
"Upper" node information obtained by combinaison of parent's messages.
void enum_combi_(std::vector< std::vector< std::vector< GUM_SCALAR > > > &msgs_p, const NodeId &id, GUM_SCALAR &msg_l_min, GUM_SCALAR &msg_l_max, std::vector< GUM_SCALAR > &lx, const Idx &pos)
Used by msgL_.
void makeInference()
Starts the inference.
InferenceType inferenceType()
Get the inference type.
void saveInference(const std::string &path)
void makeInferenceByOrderedArcs_()
Starts the inference with this inference type.
void eraseAllEvidence()
Erase all inference related data to perform another one.
void updateIndicatrices_()
Only update indicatrices variables at the end of computations ( calls msgP_ ).
NodeSet active_nodes_set
The current node-set to iterate through at this current step.
NodeSet next_active_nodes_set
The next node-set, i.e.
CNLoopyPropagation(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
ArcProperty< GUM_SCALAR > ArcsL_min_
"Lower" information coming from one's children.
ArcProperty< GUM_SCALAR > ArcsP_max_
"Upper" information coming from one's parent.
Class template representing a Credal Network.
void updateExpectations_(const NodeId &id, const std::vector< GUM_SCALAR > &vertex)
Given a node id and one of it's possible vertex obtained during inference, update this node lower and...
margi oldMarginalMax_
Old upper marginals used to compute epsilon.
margi evidence_
Holds observed variables states.
margi marginalMax_
Upper marginals.
void updateCredalSets_(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it's possible vertex, update it's credal set.
virtual const GUM_SCALAR computeEpsilon_()
Compute approximation scheme epsilon using the old marginals and the new ones.
const std::vector< std::vector< GUM_SCALAR > > & vertices(const NodeId id) const
Get the vertice of a given node id.
InferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Construtor.
margi oldMarginalMin_
Old lower marginals used to compute epsilon.
virtual void eraseAllEvidence()
removes all the evidence entered into the network
const CredalNet< GUM_SCALAR > & credalNet() const
Get this creadal network.
margi marginalMin_
Lower marginals.
dynExpe modal_
Variables modalities used to compute expectations.
#define GUM_ERROR(type, msg)
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Size Idx
Type for indexes.
Size NodeId
Type for node ids.
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
Idx randomValue(const Size max=2)
Returns a random Idx between 0 and max-1 included.
namespace for all credal networks entities
std::vector< std::pair< Idx, Idx > > dispatchRangeToThreads(Idx beg, Idx end, unsigned int nb_threads)
returns a vector equally splitting elements of a range among threads
unsigned int getNumberOfThreads()
returns the max number of threads used by default when entering the next parallel region
static void execute(std::size_t nb_threads, FUNCTION exec_func, ARGS &&... func_args)
executes a function using several threads
static int nbRunningThreadsExecutors()
indicates how many threadExecutors are currently running