52 template <
typename GUM_SCALAR >
63 template <
typename GUM_SCALAR >
71 if (a != b || a != c )
73 "addVariable : not the same id over all networks : " << a <<
", " << b <<
", "
79 template <
typename GUM_SCALAR >
86 template <
typename GUM_SCALAR >
89 const std::vector< std::vector< std::vector< GUM_SCALAR > > >& cpt) {
90 const Tensor< GUM_SCALAR >*
const tensor(&
_src_bn_.cpt(
id));
92 auto var_dSize =
_src_bn_.variable(
id).domainSize();
93 auto entry_size = tensor->domainSize() / var_dSize;
95 if (cpt.size() != entry_size)
97 "setCPTs : entry sizes of cpts does not match for node id : "
98 <<
id <<
" : " << cpt.size() <<
" != " << entry_size);
100 for (
const auto& cset: cpt) {
101 if (cset.size() == 0)
103 "setCPTs : vertices in credal set does not match for node id : "
104 <<
id <<
" with 0 vertices");
106 for (
const auto& vertex: cset) {
107 if (vertex.size() != var_dSize)
109 "setCPTs : variable modalities in cpts does "
110 "not match for node id : "
111 <<
id <<
" with vertex " << vertex <<
" : " << vertex.size()
112 <<
" != " << var_dSize);
116 for (
const auto& prob: vertex) {
120 if (std::fabs(sum - 1) > 1e-6)
122 "setCPTs : a vertex coordinates does not "
123 "sum to one for node id : "
124 <<
id <<
" with vertex " << vertex);
131 template <
typename GUM_SCALAR >
134 const std::vector< std::vector< GUM_SCALAR > >& cpt) {
135 const Tensor< GUM_SCALAR >*
const tensor(&
_src_bn_.cpt(
id));
137 auto var_dSize =
_src_bn_.variable(
id).domainSize();
138 auto entry_size = tensor->domainSize() / var_dSize;
140 if (entry >= entry_size)
142 "setCPT : entry is greater or equal than entry size "
143 "(entries start at 0 up to entry_size - 1) : "
144 << entry <<
" >= " << entry_size);
146 if (cpt.size() == 0)
GUM_ERROR(
SizeError,
"setCPT : empty credal set for entry : " << entry)
148 for (
const auto& vertex: cpt) {
149 if (vertex.size() != var_dSize)
151 "setCPT : variable modalities in cpts does not "
152 "match for node id : "
153 <<
id <<
" with vertex " << vertex <<
" at entry " << entry <<
" : "
154 << vertex.size() <<
" != " << var_dSize);
158 for (
const auto& prob: vertex) {
162 if (std::fabs(sum - 1) > 1e-6)
164 "setCPT : a vertex coordinates does not sum to one for node id : "
165 <<
id <<
" at entry " << entry <<
" with vertex " << vertex);
171 std::vector< std::vector< std::vector< GUM_SCALAR > > >(entry_size));
173 if (node_cpt[entry].size() != 0)
175 "setCPT : vertices of entry id " << entry
176 <<
" already set to : " << node_cpt[entry]
177 <<
", cannot insert : " << cpt);
179 node_cpt[entry] = cpt;
184 template <
typename GUM_SCALAR >
187 const std::vector< std::vector< GUM_SCALAR > >& cpt) {
188 const Tensor< GUM_SCALAR >*
const tensor(&
_src_bn_.cpt(
id));
190 auto var_dSize =
_src_bn_.variable(
id).domainSize();
191 auto entry_size = tensor->domainSize() / var_dSize;
212 "setCPT : instantiation : "
213 << ins <<
" is not valid for node id " <<
id
214 <<
" which accepts instantiations such as (order is not "
219 Idx entry = 0, jump = 1;
221 for (
Idx i = 0, end = ins.
nbrDim(); i < end; i++) {
224 entry += ins.
val(i) * jump;
229 if (entry >= entry_size)
231 "setCPT : entry is greater or equal than entry size "
232 "(entries start at 0 up to entry_size - 1) : "
233 << entry <<
" >= " << entry_size);
235 if (cpt.size() == 0)
GUM_ERROR(
SizeError,
"setCPT : empty credal set for entry : " << entry)
237 for (
const auto& vertex: cpt) {
238 if (vertex.size() != var_dSize)
240 "setCPT : variable modalities in cpts does not "
241 "match for node id : "
242 <<
id <<
" with vertex " << vertex <<
" at entry " << entry <<
" : "
243 << vertex.size() <<
" != " << var_dSize);
247 for (
const auto& prob: vertex) {
251 if (std::fabs(sum - 1) > 1e-6)
253 "setCPT : a vertex coordinates does not sum to one for node id : "
254 <<
id <<
" at entry " << entry <<
" with vertex " << vertex);
259 std::vector< std::vector< std::vector< GUM_SCALAR > > >(entry_size));
261 if (node_cpt[entry].size() != 0)
263 "setCPT : vertices of entry : " << ins <<
" id " << entry
264 <<
" already set to : " << node_cpt[entry]
265 <<
", cannot insert : " << cpt);
267 node_cpt[entry] = cpt;
272 template <
typename GUM_SCALAR >
274 const std::vector< GUM_SCALAR >& lower,
275 const std::vector< GUM_SCALAR >& upper) {
281 "fillConstraints : sizes does not match in fillWith for node id : " <<
id);
285 template <
typename GUM_SCALAR >
288 const std::vector< GUM_SCALAR >& lower,
289 const std::vector< GUM_SCALAR >& upper) {
290 Tensor< GUM_SCALAR >*
const tensor_min(
291 const_cast< Tensor< GUM_SCALAR >*
const >(&
_src_bn_min_.cpt(
id)));
292 Tensor< GUM_SCALAR >*
const tensor_max(
293 const_cast< Tensor< GUM_SCALAR >*
const >(&
_src_bn_max_.cpt(
id)));
295 auto var_dSize =
_src_bn_.variable(
id).domainSize();
297 if (lower.size() != var_dSize || upper.size() != var_dSize)
299 "setCPT : variable modalities in cpts does not match for node id : "
300 <<
id <<
" with sizes of constraints : ( " << lower.size() <<
" || "
301 << upper.size() <<
" ) != " << var_dSize);
303 auto entry_size = tensor_min->domainSize() / var_dSize;
305 if (entry >= entry_size)
307 "setCPT : entry is greater or equal than entry size "
308 "(entries start at 0 up to entry_size - 1) : "
309 << entry <<
" >= " << entry_size);
318 while (pos != entry) {
324 for (
Size i = 0; i < var_dSize; i++) {
325 tensor_min->set(min, lower[i]);
326 tensor_max->set(max, upper[i]);
332 template <
typename GUM_SCALAR >
335 const std::vector< GUM_SCALAR >& lower,
336 const std::vector< GUM_SCALAR >& upper) {
337 const Tensor< GUM_SCALAR >*
const tensor(&
_src_bn_.cpt(
id));
361 "setCPT : instantiation : "
362 << ins <<
" is not valid for node id " <<
id
363 <<
" which accepts instantiations such as (order is not "
368 Idx entry = 0, jump = 1;
370 for (
Idx i = 0, end = ins.
nbrDim(); i < end; i++) {
373 entry += ins.
val(i) * jump;
399 template <
typename GUM_SCALAR >
404 template <
typename GUM_SCALAR >
406 return _src_bn_.variable(
id).domainSize();
411 template <
typename GUM_SCALAR >
413 const std::string& src_max_den) {
420 template <
typename GUM_SCALAR >
422 const BayesNet< GUM_SCALAR >& src_max_den) {
429 template <
typename GUM_SCALAR >
441 template <
typename GUM_SCALAR >
446 template <
typename GUM_SCALAR >
448 GUM_SCALAR epsi_min = 1.;
449 GUM_SCALAR epsi_max = 0.;
450 GUM_SCALAR epsi_moy = 0.;
451 GUM_SCALAR epsi_den = 0.;
453 for (
auto node:
src_bn().nodes()) {
454 const Tensor< GUM_SCALAR >*
const tensor(&
_src_bn_.cpt(node));
456 Tensor< GUM_SCALAR >*
const tensor_min(
457 const_cast< Tensor< GUM_SCALAR >*
const >(&
_src_bn_min_.cpt(node)));
458 Tensor< GUM_SCALAR >*
const tensor_max(
459 const_cast< Tensor< GUM_SCALAR >*
const >(&
_src_bn_max_.cpt(node)));
462 Size entry_size = tensor->domainSize() / var_dSize;
472 std::vector< GUM_SCALAR > vertex(var_dSize);
474 for (
Size entry = 0; entry < entry_size; entry++) {
478 else den = tensor_max->get(ins_max);
482 for (
Size modality = 0; modality < var_dSize; modality++) {
483 vertex[modality] = tensor->get(ins);
486 den += vertex[modality];
488 if (vertex[modality] < 1 && vertex[modality] > 0)
490 "bnToCredal : the BayesNet contains "
491 "probabilities and not event counts "
492 "although user precised oneNet = "
496 if (vertex[modality] > 0) nbm++;
505 for (
auto modality = vertex.cbegin(), theEnd = vertex.cend(); modality != theEnd;
513 <<
"(" <<
_epsRedund_ <<
") does not sum to one for" <<
" " << entry
515 << vertex << std::endl
516 << ins << std::endl);
524 if (beta == 0) epsilon = 0;
525 else if (den == 0 || beta == 1) epsilon = GUM_SCALAR(1.0);
526 else epsilon = GUM_SCALAR(std::pow(beta, std::log1p(den)));
531 if (epsilon > epsi_max) epsi_max = epsilon;
533 if (epsilon < epsi_min) epsi_min = epsilon;
537 for (
Size modality = 0; modality < var_dSize; modality++) {
538 if ((vertex[modality] > 0 && nbm > 1) || !keepZeroes) {
539 min = GUM_SCALAR((1. - epsilon) * vertex[modality]);
541 if (oneNet) min = GUM_SCALAR(min * 1.0 / den);
543 max = GUM_SCALAR(min + epsilon);
547 min = vertex[modality];
549 if (oneNet) min = GUM_SCALAR(min * 1.0 / den);
554 tensor_min->set(ins_min, min);
555 tensor_max->set(ins_max, max);
567 _epsilonMoy_ = (GUM_SCALAR)epsi_moy / (GUM_SCALAR)epsi_den;
572 template <
typename GUM_SCALAR >
575 const Tensor< GUM_SCALAR >*
const tensor(&
_src_bn_.cpt(node));
577 auto var_dSize =
_src_bn_.variable(node).domainSize();
578 auto entry_size = tensor->domainSize() / var_dSize;
584 std::vector< GUM_SCALAR > vertex(var_dSize);
586 for (
Size entry = 0; entry < entry_size; entry++) {
591 for (
Size modality = 0; modality < var_dSize; modality++) {
592 vertex[modality] = tensor->get(ins);
594 if (vertex[modality] < 1 && vertex[modality] > 0)
596 "lagrangeNormalization : the BayesNet "
597 "contains probabilities and not event "
600 den += vertex[modality];
602 if (!zeroes && vertex[modality] == 0) { zeroes =
true; }
610 for (
Size modality = 0; modality < var_dSize; modality++) {
611 tensor->set(ins, tensor->get(ins) + 1);
621 template <
typename GUM_SCALAR >
624 const Tensor< GUM_SCALAR >*
const tensor(&
_src_bn_.cpt(node));
626 Tensor< GUM_SCALAR >*
const tensor_min(
627 const_cast< Tensor< GUM_SCALAR >*
const >(&
_src_bn_min_.cpt(node)));
628 Tensor< GUM_SCALAR >*
const tensor_max(
629 const_cast< Tensor< GUM_SCALAR >*
const >(&
_src_bn_max_.cpt(node)));
632 Size entry_size = tensor->domainSize() / var_dSize;
642 std::vector< GUM_SCALAR > vertex(var_dSize);
644 for (
Size entry = 0; entry < entry_size; entry++) {
648 for (
Size modality = 0; modality < var_dSize; modality++) {
649 vertex[modality] = tensor->get(ins);
651 if (vertex[modality] < 1 && vertex[modality] > 0)
653 "idmLearning : the BayesNet contains "
654 "probabilities and not event counts.");
656 den += vertex[modality];
658 if (vertex[modality] > 0) nbm++;
663 if (nbm > 1 || !keepZeroes) den += s;
667 for (
Size modality = 0; modality < var_dSize; modality++) {
668 min = vertex[modality];
671 if ((vertex[modality] > 0 && nbm > 1) || !keepZeroes) { max += s; }
673 min = GUM_SCALAR(min * 1.0 / den);
674 max = GUM_SCALAR(max * 1.0 / den);
676 tensor_min->set(ins_min, min);
677 tensor_max->set(ins_max, max);
694 template <
typename GUM_SCALAR >
701 const Tensor< GUM_SCALAR >*
const tensor_min(&
_src_bn_min_.cpt(node));
702 const Tensor< GUM_SCALAR >*
const tensor_max(&
_src_bn_max_.cpt(node));
705 Size entry_size = tensor_min->domainSize() / var_dSize;
707 std::vector< std::vector< std::vector< GUM_SCALAR > > > var_cpt(entry_size);
715 std::vector< GUM_SCALAR > lower(var_dSize);
716 std::vector< GUM_SCALAR > upper(var_dSize);
718 for (
Size entry = 0; entry < entry_size; entry++) {
719 for (
Size modality = 0; modality < var_dSize; modality++, ++ins_min, ++ins_max) {
720 lower[modality] = tensor_min->get(ins_min);
721 upper[modality] = tensor_max->get(ins_max);
724 bool all_equals =
true;
725 std::vector< std::vector< GUM_SCALAR > > vertices;
727 for (
Size modality = 0; modality < var_dSize; modality++) {
728 if (std::fabs(upper[modality] - lower[modality]) < 1e-6)
continue;
731 std::vector< GUM_SCALAR > vertex(var_dSize);
732 vertex[modality] = upper[modality];
734 for (
Size mod = 0; mod < var_dSize; mod++) {
735 if (modality != mod) vertex[mod] = lower[mod];
738 GUM_SCALAR total = 0;
740 auto vsize = vertex.size();
742 for (
Size i = 0; i < vsize; i++)
745 if (std::fabs(total - 1.) > 1e-6)
748 <<
" does not sum to one for " << entry << std::endl
749 << vertex << std::endl);
751 vertices.push_back(vertex);
755 std::vector< GUM_SCALAR > vertex(var_dSize);
757 for (
Size modality = 0; modality < var_dSize; modality++)
758 vertex[modality] = lower[modality];
760 GUM_SCALAR total = 0.;
762 auto vsize = vertex.size();
764 for (
Size i = 0; i < vsize; i++)
767 if (std::fabs(total - 1.) > 1e-6)
770 <<
" does not sum to one for " << entry << std::endl
771 << vertex << std::endl);
773 vertices.push_back(vertex);
776 var_cpt[entry] = vertices;
789 template <
typename GUM_SCALAR >
798 const Tensor< GUM_SCALAR >*
const tensor_min(&
_src_bn_min_.cpt(node));
799 const Tensor< GUM_SCALAR >*
const tensor_max(&
_src_bn_max_.cpt(node));
802 Size entry_size = tensor_min->domainSize() / var_dSize;
804 std::vector< std::vector< std::vector< GUM_SCALAR > > > var_cpt(entry_size);
812 lrsWrapper.
setUpH(var_dSize);
814 for (
Size entry = 0; entry < entry_size; entry++) {
815 for (
Size modality = 0; modality < var_dSize; modality++) {
816 if (tensor_min->get(ins_min) > tensor_max->get(ins_max)) {
819 <<
_src_bn_.variable(node).name() <<
" (at " << ins_min
820 <<
"), the min is greater than the max : " << tensor_min->get(ins_min)
821 <<
">" << tensor_max->get(ins_max) <<
".");
823 lrsWrapper.
fillH(tensor_min->get(ins_min), tensor_max->get(ins_max), modality);
843 template <
typename GUM_SCALAR >
850 const Tensor< GUM_SCALAR >*
const tensor_min(&
_src_bn_min_.cpt(node));
851 const Tensor< GUM_SCALAR >*
const tensor_max(&
_src_bn_max_.cpt(node));
853 auto var_dSize =
_src_bn_.variable(node).domainSize();
854 auto entry_size = tensor_min->domainSize() / var_dSize;
856 std::vector< std::vector< std::vector< GUM_SCALAR > > > var_cpt(entry_size);
865 for (
Size entry = 0; entry < entry_size; entry++) {
866 std::vector< std::vector< GUM_SCALAR > > vertices;
867 std::vector< GUM_SCALAR > vertex(var_dSize);
869 std::vector< std::vector< GUM_SCALAR > > inequalities(
871 std::vector< GUM_SCALAR >(var_dSize + 1, 0));
873 std::vector< GUM_SCALAR > sum_ineq1(var_dSize + 1, -1);
874 std::vector< GUM_SCALAR > sum_ineq2(var_dSize + 1, 1);
878 bool isInterval =
false;
880 for (
Size modality = 0; modality < var_dSize; modality++) {
881 inequalities[modality * 2][0] = -tensor_min->get(ins_min);
882 inequalities[modality * 2 + 1][0] = tensor_max->get(ins_max);
883 inequalities[modality * 2][modality + 1] = 1;
884 inequalities[modality * 2 + 1][modality + 1] = -1;
886 vertex[modality] = inequalities[modality * 2 + 1][0];
889 && (-inequalities[modality * 2][0] != inequalities[modality * 2 + 1][0]))
896 inequalities.push_back(sum_ineq1);
897 inequalities.push_back(sum_ineq2);
900 vertices.push_back(vertex);
905 }
catch (
const std::exception& err) {
906 std::cout << err.what() << std::endl;
912 if (entry == 0 && vertices.size() >= 2) {
913 auto tmp = vertices[0];
914 vertices[0] = vertices[1];
918 var_cpt[entry] = vertices;
937 template <
typename GUM_SCALAR >
939 const std::string& max_path)
const {
942 std::string minfilename = min_path;
943 std::string maxfilename = max_path;
944 std::ofstream min_file(minfilename.c_str(), std::ios::out | std::ios::trunc);
945 std::ofstream max_file(maxfilename.c_str(), std::ios::out | std::ios::trunc);
947 if (!min_file.good())
948 GUM_ERROR(
IOError,
"bnToCredal() : could not open stream : min_file : " << minfilename);
950 if (!max_file.good()) {
952 GUM_ERROR(
IOError,
"bnToCredal() : could not open stream : min_file : " << maxfilename);
969 template <
typename GUM_SCALAR >
972 auto bin_bn =
new BayesNet< GUM_SCALAR >();
976 auto credalNet_bin_cpt
984 credalNet_current_cpt;
995 bin_bn->beginTopologyTransformation();
998 auto var_dSize =
current_bn->variable(node).domainSize();
1000 if (var_dSize != 2) {
1003 superiorPow(
static_cast< unsigned long >(var_dSize), b, c);
1006 std::string bit_name;
1007 std::vector< NodeId > bits(nb_bits);
1009 for (
Size bit = 0; bit < nb_bits; bit++) {
1010 bit_name =
current_bn->variable(node).name() +
"-b";
1011 std::stringstream ss;
1013 bit_name += ss.str();
1016 NodeId iD = bin_bn->add(var_bit);
1025 const std::string bit_name =
current_bn->variable(node).name();
1027 const NodeId iD = bin_bn->add(var_bit);
1029 _var_bits_.insert(node, std::vector< NodeId >(1, iD));
1038 parent_bit < spbits;
1048 const auto bitsize =
_var_bits_[node].size();
1050 for (
Size bit_c = 1; bit_c < bitsize; bit_c++)
1051 for (
Size bit_p = 0; bit_p < bit_c; bit_p++)
1056 bin_bn->endTopologyTransformation();
1062 for (
Size var = 0; var < varsize; var++) {
1065 for (
Size i = 0; i < bitsize; i++) {
1066 Tensor< GUM_SCALAR >
const* tensor(&bin_bn->cpt(
_var_bits_[var][i]));
1070 auto entry_size = tensor->domainSize() / 2;
1071 std::vector< std::vector< std::vector< GUM_SCALAR > > > var_cpt(entry_size);
1075 for (
Size conf = 0; conf < entry_size; conf++) {
1076 std::vector< std::vector< GUM_SCALAR > > pvar_cpt;
1077 auto verticessize = (*credalNet_current_cpt)[var][old_conf].size();
1079 for (
Size old_distri = 0; old_distri < verticessize; old_distri++) {
1080 const std::vector< GUM_SCALAR >& vertex
1081 = (*credalNet_current_cpt)[var][old_conf][old_distri];
1082 auto vertexsize = vertex.size();
1084 std::vector< Idx > incc(vertexsize, 0);
1086 for (
Size preced = 0; preced < i; preced++) {
1087 auto bit_pos = ins.
pos(bin_bn->variable(
_var_bits_[var][preced]));
1088 auto val = ins.
val(bit_pos);
1093 if (val == 0) elem = 0;
1096 while (elem < vertexsize) {
1100 if (elem % pas == 0) elem += pas;
1106 std::vector< GUM_SCALAR > distri(2, 0);
1109 for (
Size elem = 0; elem < vertexsize; elem++) {
1110 if (elem % pas == 0) pos = -pos;
1112 if (incc[elem] == i)
1113 (pos < 0) ? (distri[0] += vertex[elem]) : (distri[1] += vertex[elem]);
1117 GUM_SCALAR den = distri[0] + distri[1];
1128 pvar_cpt.push_back(distri);
1133 std::vector< std::vector< GUM_SCALAR > > vertices(2, std::vector< GUM_SCALAR >(2, 1));
1136 const auto new_verticessize = pvar_cpt.size();
1138 for (
Size v = 0; v < new_verticessize; v++) {
1139 if (pvar_cpt[v][1] < vertices[0][1]) vertices[0][1] = pvar_cpt[v][1];
1141 if (pvar_cpt[v][1] > vertices[1][1]) vertices[1][1] = pvar_cpt[v][1];
1144 vertices[0][0] = 1 - vertices[0][1];
1145 vertices[1][0] = 1 - vertices[1][1];
1147 pvar_cpt = vertices;
1149 var_cpt[conf] = pvar_cpt;
1156 if (old_conf == (*credalNet_current_cpt)[var].size()) old_conf = 0;
1160 credalNet_bin_cpt->insert(
_var_bits_[var][i], var_cpt);
1166 bin_bn->beginTopologyTransformation();
1171 for (
Size i = 0; i < old_varsize; i++) {
1175 if (bitsize == 1)
continue;
1177 auto old_card =
_src_bn_.variable(i).domainSize();
1179 for (
Size mod = 0; mod < old_card; mod++) {
1180 std::stringstream ss;
1186 const NodeId indic = bin_bn->add(var);
1189 for (
Size bit = 0; bit < bitsize; bit++)
1195 std::vector< std::vector< std::vector< GUM_SCALAR > > > icpt(num);
1197 for (
Size entry = 0; entry < num; entry++) {
1198 std::vector< std::vector< GUM_SCALAR > > vertices(1, std::vector< GUM_SCALAR >(2, 0));
1200 if (mod == entry) vertices[0][1] = 1;
1201 else vertices[0][0] = 1;
1203 icpt[entry] = vertices;
1206 credalNet_bin_cpt->insert(indic, icpt);
1212 bin_bn->endTopologyTransformation();
1232 template <
typename GUM_SCALAR >
1240 template <
typename GUM_SCALAR >
1246 template <
typename GUM_SCALAR >
1254 template <
typename GUM_SCALAR >
1260 template <
typename GUM_SCALAR >
1265 template <
typename GUM_SCALAR >
1271 template <
typename GUM_SCALAR >
1278 std::vector< GUM_SCALAR > min(pConf);
1279 std::vector< GUM_SCALAR > max(pConf);
1281 for (
Size pconf = 0; pconf < pConf; pconf++) {
1289 GUM_SCALAR delta = v1 - v2;
1290 min[pconf] = (delta >= 0) ? v2 : v1;
1291 max[pconf] = (delta >= 0) ? v1 : v2;
1301 template <
typename GUM_SCALAR >
1302 const std::vector< std::vector< GUM_SCALAR > >&
1307 template <
typename GUM_SCALAR >
1308 const std::vector< std::vector< GUM_SCALAR > >&
1313 template <
typename GUM_SCALAR >
1318 template <
typename GUM_SCALAR >
1323 template <
typename GUM_SCALAR >
1328 template <
typename GUM_SCALAR >
1330 std::stringstream output;
1338 if (this->_credalNet_current_cpt_ ==
nullptr)
1343 const Tensor< GUM_SCALAR >* tensor(&
_current_bn_->cpt(node));
1344 auto pconfs = tensor->domainSize() /
_current_bn_->variable(node).domainSize();
1346 output <<
"\n" <<
_current_bn_->variable(node) <<
"\n";
1353 for (
Size pconf = 0; pconf < pconfs; pconf++) {
1354 output << ins <<
" : ";
1355 output << (*_credalNet_current_cpt_)[node][pconf] <<
"\n";
1357 if (pconf < pconfs - 1) ++ins;
1363 return output.str();
1366 template <
typename GUM_SCALAR >
1373 template <
typename GUM_SCALAR >
1382 template <
typename GUM_SCALAR >
1391 _epsF_ = GUM_SCALAR(1e-6);
1409 template <
typename GUM_SCALAR >
1411 const std::string& src_max_den) {
1415 if (src_max_den.compare(
"") != 0) other = src_max_den;
1416 else other = src_min_num;
1426 template <
typename GUM_SCALAR >
1428 const BayesNet< GUM_SCALAR >& src_max_den) {
1432 if (src_max_den.size() > 0)
_src_bn_max_ = src_max_den;
1436 template <
typename GUM_SCALAR >
1438 const std::vector< std::vector< std::vector< GUM_SCALAR > > >& var_cpt)
const {
1439 Size vertices_size = 0;
1441 for (
auto entry = var_cpt.cbegin(), theEnd = var_cpt.cend(); entry != theEnd; ++entry) {
1442 if (entry->size() > vertices_size) vertices_size =
Size(entry->size());
1445 return int(vertices_size);
1448 template <
typename GUM_SCALAR >
1458 dest.beginTopologyTransformation();
1461 for (
auto parent_idIt:
_current_bn_->cpt(node).variablesSequence()) {
1467 dest.endTopologyTransformation();
1522 template <
typename GUM_SCALAR >
1524 std::vector< std::vector< GUM_SCALAR > >& v_rep)
const {
1534 std::ofstream h_file(sinefile.c_str(), std::ios::out | std::ios::trunc);
1537 GUM_ERROR(
IOError,
" __H2Vlrs : could not open lrs input file : " << sinefile)
1539 h_file <<
"H - representation\n";
1540 h_file <<
"begin\n";
1541 h_file << h_rep.size() <<
' ' << h_rep[0].size() <<
" rational\n";
1543 for (
auto it = h_rep.cbegin(), theEnd = h_rep.cend(); it != theEnd; ++it) {
1544 for (
auto it2 = it->cbegin(), theEnd2 = it->cend(); it2 != theEnd2; ++it2) {
1550 ((*it2 > 0) ? *it2 : -*it2),
1554 h_file << ((*it2 > 0) ? num : -num) <<
'/' << den <<
' ';
1567 std::string soft_name =
"lrs";
1568 std::string extfile(sinefile);
1571 args[0] =
new char[soft_name.size()];
1572 args[1] =
new char[sinefile.size()];
1573 args[2] =
new char[extfile.size()];
1575 strcpy(args[0], soft_name.c_str());
1576 strcpy(args[1], sinefile.c_str());
1577 strcpy(args[2], extfile.c_str());
1587 std::ifstream v_file(extfile.c_str() , std::ios::in);
1589 if (!v_file.good())
GUM_ERROR(
IOError,
" __H2Vlrs : could not open lrs ouput file : ")
1591 std::string line, tmp;
1593 GUM_SCALAR probability;
1595 std::string::size_type pos;
1596 bool keep_going =
true;
1599 std::vector< GUM_SCALAR > vertex;
1601 v_file.ignore(256,
'l');
1603 while (v_file.good() && keep_going) {
1604 getline(v_file, line);
1606 if (line.size() == 0)
continue;
1607 else if (line.compare(
"end") == 0) {
1617 }
else if (line[1] !=
'1') {
1619 " __H2Vlrs : reading something other than a vertex from "
1620 "lrs output file : ");
1623 line = line.substr(2);
1624 cstr =
new char[line.size() + 1];
1625 strcpy(cstr, line.c_str());
1627 p = strtok(cstr,
" ");
1629 while (p !=
nullptr) {
1632 if (tmp.compare(
"1") == 0 || tmp.compare(
"0") == 0)
1633 probability = GUM_SCALAR(atof(tmp.c_str()));
1635 pos = tmp.find(
"/");
1636 probability = GUM_SCALAR(atof(tmp.substr(0, pos).c_str())
1637 / atof(tmp.substr(pos + 1, tmp.size()).c_str()));
1640 vertex.push_back(probability);
1641 p = strtok(
nullptr,
" ");
1653 const auto nsize = v_rep.size();
1654 const auto real_nb_threads = std::min(nb_threads, nsize);
1658 std::vector< Size > t_redund(real_nb_threads);
1661 auto threadedExec = [
this, ranges, &t_redund, vertex, v_rep](
const std::size_t this_thread,
1662 const std::size_t nb_threads) {
1663 const auto vsize = vertex.size();
1664 auto& thread_redund = t_redund[this_thread];
1666 for (
Idx i = ranges[this_thread].first, end = ranges[this_thread].second; i < end; i++) {
1668 for (
Idx modality = 0; modality < vsize; ++modality) {
1669 if (std::fabs(vertex[modality] - v_rep[i][modality]) >
_epsRedund_) {
1675 if (thread_redund)
return;
1683 bool is_redund =
false;
1684 for (
const auto thread_redund: t_redund) {
1685 if (thread_redund) {
1727 if (!is_redund) v_rep.push_back(vertex);
1735 if (std::remove(sinefile.c_str()) != 0)
GUM_ERROR(
IOError,
"error removing : " + sinefile)
1737 if (std::remove(extfile.c_str()) != 0)
GUM_ERROR(
IOError,
"error removing : " + extfile)
1740 template <
typename GUM_SCALAR >
1764 bool precise =
true, vacuous =
true;
1770 auto vertices = entry->size();
1771 auto var_dSize = (*entry)[0].size();
1773 if (precise && vertices > 1) precise =
false;
1775 if (vacuous && vertices == var_dSize) {
1776 std::vector< bool > elem(var_dSize,
false);
1778 for (
auto vertex = entry->cbegin(), vEnd = entry->cend(); vertex != vEnd; ++vertex) {
1779 for (
auto probability = vertex->cbegin(), pEnd = vertex->cend(); probability != pEnd;
1781 if (*probability == 1) {
1782 elem[probability - vertex->begin()] =
true;
1790 for (
auto probability = elem.cbegin();
1791 probability != elem.cend();
1793 if (*probability ==
false) vacuous =
false;
1799 if (vacuous ==
false && precise ==
false) {
Definition of templatized reader of BIF files for Bayesian networks.
Writes a IBayesNet in the BIF format.
void write(std::ostream &output, const IBayesNet< GUM_SCALAR > &bn)
Writes a Bayesian network in the output stream.
Exception base for CPT error.
virtual Size domainSize() const =0
Exception : a similar element already exists.
Base class for all aGrUM's exceptions.
value_type & insert(const Key &key, const Val &val)
Adds a new element (actually a copy of this element) into the hash table.
Exception : input/output problem.
Class for assigning/browsing values to tuples of discrete variables.
const Sequence< const DiscreteVariable * > & variablesSequence() const final
Returns the sequence of DiscreteVariable of this instantiation.
Idx pos(const DiscreteVariable &v) const final
Returns the position of the variable v.
void erase(const DiscreteVariable &v) final
Removes a variable from the Instantiation.
void reorder(const Sequence< const DiscreteVariable * > &v)
Reorder vars of this instantiation giving the order in v.
Idx val(Idx i) const
Returns the current value of the variable at position i.
void setFirst()
Assign the first values to the tuple of the Instantiation.
const DiscreteVariable & variable(Idx i) const final
Returns the variable at position i in the tuple.
Idx nbrDim() const final
Returns the number of variables in the Instantiation.
bool forgetMaster()
Deassociate the master MultiDimAdressable, if any.
Exception : operation not allowed.
static void farey(int64_t &numerator, int64_t &denominator, const GUM_SCALAR &number, const int64_t &den_max=1000000L, const GUM_SCALAR &zero=1e-6)
Find the rational close enough to a given ( decimal ) number in [-1,1] and whose denominator is not h...
Exception : problem with size.
void _H2Vlrs_(const std::vector< std::vector< GUM_SCALAR > > &h_rep, std::vector< std::vector< GUM_SCALAR > > &v_rep) const
void _initParams_()
Initialize private constant variables after the Constructor has been called.
GUM_SCALAR _epsilonMoy_
The average perturbation of the BayesNet provided as input for this CredalNet.
void setCPTs(const NodeId &id, const std::vector< std::vector< std::vector< GUM_SCALAR > > > &cpt)
Set the vertices of the credal sets ( all of the conditionals ) of a given node
GUM_SCALAR _deltaC_
5 by default, used by fracC as number of decimals.
BayesNet< GUM_SCALAR > _src_bn_max_
BayesNet used to store upper probabilities.
void intervalToCredalWithFiles()
NodeId addVariable(const std::string &name, const Size &card)
Adds a discrete node into the network.
BayesNet< GUM_SCALAR > * _current_bn_
Up-to-date BayesNet (used as a DAG).
void _intervalToCredal_()
Computes the vertices of each credal set according to their interval definition (does not use lrs).
void saveBNsMinMax(const std::string &min_path, const std::string &max_path) const
If this CredalNet was built over a perturbed BayesNet, one can save the intervals as two BayesNet.
std::string toString() const
bool _hasComputedBinaryCPTMinMax_
Used by L2U, to know if lower and upper probabilities over the second modality has been stored in ord...
Size domainSize(const NodeId &id)
Get the cardinality of a node
NodeProperty< std::vector< std::vector< std::vector< GUM_SCALAR > > > > _credalNet_src_cpt_
This CredalNet original CPTs.
const GUM_SCALAR & epsilonMax() const
void _bnCopy_(BayesNet< GUM_SCALAR > &bn_dest)
bool hasComputedBinaryCPTMinMax() const
GUM_SCALAR _precision_
Precision used by frac.
const NodeProperty< std::vector< std::vector< std::vector< GUM_SCALAR > > > > & credalNet_currentCpt() const
Instantiation instantiation(const NodeId &id)
Get an Instantiation from a node id, usefull to fill the constraints of the network
void bnToCredal(GUM_SCALAR beta, bool oneNet, bool keepZeroes)
Perturbates the BayesNet provided as input for this CredalNet by generating intervals instead of poin...
void intervalToCredal()
Computes the vertices of each credal set according to their interval definition (uses lrs).
const GUM_SCALAR & epsilonMean() const
std::vector< std::vector< GUM_SCALAR > > _binCptMin_
Used with binary networks to speed-up L2U inference.
void approximatedBinarization()
Approximate binarization.
const BayesNet< GUM_SCALAR > & src_bn() const
int _find_dNode_card_(const std::vector< std::vector< std::vector< GUM_SCALAR > > > &var_cpt) const
BayesNet< GUM_SCALAR > _src_bn_
Original BayesNet (used as a DAG).
void computeBinaryCPTMinMax()
Used with binary networks to speed-up L2U inference.
const NodeProperty< std::vector< std::vector< std::vector< GUM_SCALAR > > > > & credalNet_srcCpt() const
NodeType currentNodeType(const NodeId &id) const
NodeProperty< NodeType > _original_nodeType_
The NodeType of each node from the ORIGINAL network.
void lagrangeNormalization()
Normalize counts of a BayesNet storing counts of each events such that no probability is 0.
NodeProperty< std::vector< std::vector< std::vector< GUM_SCALAR > > > > * _credalNet_current_cpt_
This CredalNet up-to-date CPTs.
bool isSeparatelySpecified() const
void _sort_varType_()
Set the NodeType of each node
void _initCNNets_(const std::string &src_min_num, const std::string &src_max_den)
Initialize private BayesNet variables after the Constructor has been called.
void idmLearning(const Idx s=0, const bool keepZeroes=false)
Learns parameters from a BayesNet storing counts of events.
GUM_SCALAR _epsilonMax_
The highest perturbation of the BayesNet provided as input for this CredalNet.
std::vector< std::vector< GUM_SCALAR > > _binCptMax_
Used with binary networks to speed-up L2U inference.
NodeType nodeType(const NodeId &id) const
void setCPT(const NodeId &id, Size &entry, const std::vector< std::vector< GUM_SCALAR > > &cpt)
Set the vertices of one credal set of a given node ( any instantiation index )
bool _separatelySpecified_
TRUE if this CredalNet is separately and interval specified, FALSE otherwise.
NodeProperty< std::vector< NodeId > > _var_bits_
Corresponding bits of each variable.
const std::vector< std::vector< GUM_SCALAR > > & get_binaryCPT_max() const
Used with binary networks to speed-up L2U inference.
BayesNet< GUM_SCALAR > _src_bn_min_
BayesNet used to store lower probabilities.
const BayesNet< GUM_SCALAR > & current_bn() const
void addArc(const NodeId &tail, const NodeId &head)
Adds an arc between two nodes.
const std::vector< std::vector< GUM_SCALAR > > & get_binaryCPT_min() const
Used with binary networks to speed-up L2U inference.
void fillConstraints(const NodeId &id, const std::vector< GUM_SCALAR > &lower, const std::vector< GUM_SCALAR > &upper)
Set the interval constraints of the credal sets of a given node (all instantiations )
GUM_SCALAR _epsilonMin_
The lowest perturbation of the BayesNet provided as input for this CredalNet.
const GUM_SCALAR & epsilonMin() const
NodeProperty< NodeType > * _current_nodeType_
The NodeType of each node from the up-to-date network.
GUM_SCALAR _denMax_
Highest possible denominator allowed when using farey.
void fillConstraint(const NodeId &id, const Idx &entry, const std::vector< GUM_SCALAR > &lower, const std::vector< GUM_SCALAR > &upper)
Set the interval constraints of a credal set of a given node ( from an instantiation index )
GUM_SCALAR _epsRedund_
Value under which a decimal number is considered to be zero when computing redundant vertices.
GUM_SCALAR _precisionC_
1e6 by default, used by fracC as precision.
NodeType
NodeType to speed-up computations in some algorithms.
CredalNet()
Constructor used to create a CredalNet step by step, i.e.
GUM_SCALAR _epsF_
Value under which a decimal number is considered to be zero when using farey.
Class template acting as a wrapper for Lexicographic Reverse Search by David Avis.
const matrix & getOutput() const
Get the output matrix solution of the problem.
void H2V()
H-representation to V-representation.
void fillH(const GUM_SCALAR &min, const GUM_SCALAR &max, const Size &modal)
Creates the H-representation of min <= p(X=modal | .) <= max and add it to the problem input _input_.
void setUpH(const Size &card)
Sets up an H-representation.
void nextHInput()
Reset the wrapper for next computation for a H-representation with the same variable cardinality and ...
Class representing Credal Networks.
#define GUM_ERROR(type, msg)
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Size Idx
Type for indexes.
Size NodeId
Type for node ids.
HashTable< NodeId, VAL > NodeProperty
Property on graph elements.
Set< NodeId > NodeSet
Some typdefs and define for shortcuts ...
unsigned long int2Pow(unsigned long exponent)
Specialized base 2 pow function with integer.
void superiorPow(unsigned long card, unsigned long &num_bits, unsigned long &new_card)
Compute the superior and closest power of two of an integer.
std::string getUniqueFileName()
Returns a path to a unique file name.
namespace for all credal networks entities
gum is the global namespace for all aGrUM entities
std::vector< std::pair< Idx, Idx > > dispatchRangeToThreads(Idx beg, Idx end, unsigned int nb_threads)
returns a vector equally splitting elements of a range among threads
unsigned int getNumberOfThreads()
returns the max number of threads used by default when entering the next parallel region
static void execute(std::size_t nb_threads, FUNCTION exec_func, ARGS &&... func_args)
executes a function using several threads
static int nbRunningThreadsExecutors()
indicates how many threadExecutors are currently running
Utilities for manipulating strings.