aGrUM 2.3.2
a C++ library for (probabilistic) graphical models
multipleInferenceEngine_tpl.h
Go to the documentation of this file.
1/****************************************************************************
2 * This file is part of the aGrUM/pyAgrum library. *
3 * *
4 * Copyright (c) 2005-2025 by *
5 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
6 * - Christophe GONZALES(_at_AMU) *
7 * *
8 * The aGrUM/pyAgrum library is free software; you can redistribute it *
9 * and/or modify it under the terms of either : *
10 * *
11 * - the GNU Lesser General Public License as published by *
12 * the Free Software Foundation, either version 3 of the License, *
13 * or (at your option) any later version, *
14 * - the MIT license (MIT), *
15 * - or both in dual license, as here. *
16 * *
17 * (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) *
18 * *
19 * This aGrUM/pyAgrum library is distributed in the hope that it will be *
20 * useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
21 * INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS *
22 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR *
26 * OTHER DEALINGS IN THE SOFTWARE. *
27 * *
28 * See LICENCES for more details. *
29 * *
30 * SPDX-FileCopyrightText: Copyright 2005-2025 *
31 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
32 * - Christophe GONZALES(_at_AMU) *
33 * SPDX-License-Identifier: LGPL-3.0-or-later OR MIT *
34 * *
35 * Contact : info_at_agrum_dot_org *
36 * homepage : http://agrum.gitlab.io *
37 * gitlab : https://gitlab.com/agrumery/agrum *
38 * *
39 ****************************************************************************/
40#pragma once
41
42
45
46namespace gum {
47 namespace credal {
48
49 template < typename GUM_SCALAR, class BNInferenceEngine >
55
56 template < typename GUM_SCALAR, class BNInferenceEngine >
60
61 template < typename GUM_SCALAR, class BNInferenceEngine >
63 const Size& num_threads,
64 const bool _storeVertices_,
65 const bool _storeBNOpt_) {
66 workingSet_.clear();
67 workingSet_.resize(num_threads, nullptr);
68 workingSetE_.clear();
69 workingSetE_.resize(num_threads, nullptr);
70
71 l_marginalMin_.clear();
72 l_marginalMin_.resize(num_threads);
73 l_marginalMax_.clear();
74 l_marginalMax_.resize(num_threads);
75 l_expectationMin_.clear();
76 l_expectationMin_.resize(num_threads);
77 l_expectationMax_.clear();
78 l_expectationMax_.resize(num_threads);
79
80 l_clusters_.clear();
81 l_clusters_.resize(num_threads);
82
83 if (_storeVertices_) {
84 l_marginalSets_.clear();
85 l_marginalSets_.resize(num_threads);
86 }
87
88 if (_storeBNOpt_) {
89 for (Size ptr = 0; ptr < this->l_optimalNet_.size(); ptr++)
90 if (this->l_optimalNet_[ptr] != nullptr) delete l_optimalNet_[ptr];
91
92 l_optimalNet_.clear();
93 l_optimalNet_.resize(num_threads);
94 }
95
96 l_modal_.clear();
97 l_modal_.resize(num_threads);
98
100 this->oldMarginalMin_ = this->marginalMin_;
101 this->oldMarginalMax_.clear();
102 this->oldMarginalMax_ = this->marginalMax_;
103
104 // init the random number generators
105 generators_.clear();
106 generators_.resize(num_threads);
107 auto seed = currentRandomGeneratorValue();
108 for (auto& generator: generators_) {
109 generator.seed(seed);
110 seed = generator();
111 }
112 }
113
114 template < typename GUM_SCALAR, class BNInferenceEngine >
116 Size tId,
117 const NodeId& id,
118 const std::vector< GUM_SCALAR >& vertex,
119 const bool& elimRedund) {
120 // save E(X) if we don't save vertices
121 if (!_infE_::storeVertices_ && !l_modal_[tId].empty()) {
122 std::string var_name = workingSet_[tId]->variable(id).name();
123 auto delim = var_name.find_first_of("_");
124 var_name = var_name.substr(0, delim);
125
126 if (l_modal_[tId].exists(var_name)) {
127 GUM_SCALAR exp = 0;
128 Size vsize = Size(vertex.size());
129
130 for (Size mod = 0; mod < vsize; mod++)
131 exp += vertex[mod] * l_modal_[tId][var_name][mod];
132
133 if (exp > l_expectationMax_[tId][id]) l_expectationMax_[tId][id] = exp;
134
135 if (exp < l_expectationMin_[tId][id]) l_expectationMin_[tId][id] = exp;
136 }
137 } // end of : if modal (map) not empty
138
139 bool newOne = false;
140 bool added = false;
141 bool result = false;
142 // for burn in, we need to keep checking on local marginals and not global
143 // ones
144 // (faster inference)
145 // we also don't want to store dbn for observed variables since there will
146 // be a
147 // huge number of them (probably all of them).
148 Size vsize = Size(vertex.size());
149
150 for (Size mod = 0; mod < vsize; mod++) {
151 if (vertex[mod] < l_marginalMin_[tId][id][mod]) {
152 l_marginalMin_[tId][id][mod] = vertex[mod];
153 newOne = true;
154
155 if (_infE_::storeBNOpt_ && !_infE_::evidence_.exists(id)) {
156 std::vector< Size > key(3);
157 key[0] = id;
158 key[1] = mod;
159 key[2] = 0;
160
161 if (l_optimalNet_[tId]->insert(key, true)) result = true;
163 }
164
165 if (vertex[mod] > l_marginalMax_[tId][id][mod]) {
166 l_marginalMax_[tId][id][mod] = vertex[mod];
167 newOne = true;
168
169 if (_infE_::storeBNOpt_ && !_infE_::evidence_.exists(id)) {
170 std::vector< Size > key(3);
171 key[0] = id;
172 key[1] = mod;
173 key[2] = 1;
174
175 if (l_optimalNet_[tId]->insert(key, true)) result = true;
176 }
177 } else if (vertex[mod] == l_marginalMin_[tId][id][mod]
178 || vertex[mod] == l_marginalMax_[tId][id][mod]) {
179 newOne = true;
180
181 if (_infE_::storeBNOpt_ && vertex[mod] == l_marginalMin_[tId][id][mod]
182 && !_infE_::evidence_.exists(id)) {
183 std::vector< Size > key(3);
184 key[0] = id;
185 key[1] = mod;
186 key[2] = 0;
187
188 if (l_optimalNet_[tId]->insert(key, false)) result = true;
189 }
190
191 if (_infE_::storeBNOpt_ && vertex[mod] == l_marginalMax_[tId][id][mod]
192 && !_infE_::evidence_.exists(id)) {
193 std::vector< Size > key(3);
194 key[0] = id;
195 key[1] = mod;
196 key[2] = 1;
197
198 if (l_optimalNet_[tId]->insert(key, false)) result = true;
199 }
200 }
201
202 // store point to compute credal set vertices.
203 // check for redundancy at each step or at the end ?
204 if (_infE_::storeVertices_ && !added && newOne) {
205 _updateThreadCredalSets_(tId, id, vertex, elimRedund);
206 added = true;
208 }
209
210 // if all variables didn't get better marginals, we will delete
211 if (_infE_::storeBNOpt_ && result) return true;
212
213 return false;
214 }
216 template < typename GUM_SCALAR, class BNInferenceEngine >
218 Size tId,
219 const NodeId& id,
220 const std::vector< GUM_SCALAR >& vertex,
221 const bool& elimRedund) {
222 auto& nodeCredalSet = l_marginalSets_[tId][id];
223 Size dsize = Size(vertex.size());
224
225 bool eq = true;
226
227 for (auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend(); it != itEnd; ++it) {
228 eq = true;
229
230 for (Size i = 0; i < dsize; i++) {
231 if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
232 eq = false;
233 break;
235 }
236
237 if (eq) break;
238 }
239
240 if (!eq || nodeCredalSet.size() == 0) {
241 nodeCredalSet.push_back(vertex);
242 return;
243 } else return;
244
247 if (nodeCredalSet.size() == 1) return;
248
249 // check that the point and all previously added ones are not inside the
250 // actual
251 // polytope
252 auto itEnd = std::remove_if(
253 nodeCredalSet.begin(),
254 nodeCredalSet.end(),
255 [&](const std::vector< GUM_SCALAR >& v) -> bool {
256 for (auto jt = v.cbegin(),
257 jtEnd = v.cend(),
258 minIt = l_marginalMin_[tId][id].cbegin(),
259 minItEnd = l_marginalMin_[tId][id].cend(),
260 maxIt = l_marginalMax_[tId][id].cbegin(),
261 maxItEnd = l_marginalMax_[tId][id].cend();
262 jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
263 ++jt, ++minIt, ++maxIt) {
264 if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
265 && std::fabs(*minIt - *maxIt) > 1e-6)
266 return false;
267 }
268 return true;
269 });
270
271 nodeCredalSet.erase(itEnd, nodeCredalSet.end());
272
273 // we need at least 2 points to make a convex combination
274 if (!elimRedund || nodeCredalSet.size() <= 2) return;
275
276 // there may be points not inside the polytope but on one of it's facet,
277 // meaning it's still a convex combination of vertices of this facet. Here
278 // we need lrs.
279 Size setSize = Size(nodeCredalSet.size());
280
281 LRSWrapper< GUM_SCALAR > lrsWrapper;
282 lrsWrapper.setUpV(dsize, setSize);
283
284 for (const auto& vtx: nodeCredalSet)
285 lrsWrapper.fillV(vtx);
286
287 lrsWrapper.elimRedundVrep();
288
289 l_marginalSets_[tId][id] = lrsWrapper.getOutput();
290 }
291
292 template < typename GUM_SCALAR, class BNInferenceEngine >
294 // compute the max number of threads to use (avoid nested threads)
295 const Size nb_threads = ThreadExecutor::nbRunningThreadsExecutors() == 0
296 ? this->threadRanges_.size() - 1
297 : 1; // no nested multithreading
298
299 // create the function to be executed by the threads
300 auto threadedExec = [this](const std::size_t this_thread,
301 const std::size_t nb_threads,
302 const std::vector< std::pair< Idx, Idx > >& ranges) {
303 auto i = this->threadRanges_[this_thread].first;
304 auto j = this->threadRanges_[this_thread].second;
305 auto domain_size = this->marginalMax_[i].size();
306 const auto end_i = this->threadRanges_[this_thread + 1].first;
307 auto end_j = this->threadRanges_[this_thread + 1].second;
308 const auto marginalMax_size = this->marginalMax_.size();
309 const auto tsize = Size(l_marginalMin_.size());
310
311 while ((i < end_i) || (j < end_j)) {
312 // go through all work indices
313 for (Idx tId = 0; tId < tsize; tId++) {
314 if (l_marginalMin_[tId][i][j] < this->marginalMin_[i][j])
315 this->marginalMin_[i][j] = l_marginalMin_[tId][i][j];
316
317 if (l_marginalMax_[tId][i][j] > this->marginalMax_[i][j])
318 this->marginalMax_[i][j] = l_marginalMax_[tId][i][j];
319 }
320
321 if (++j == domain_size) {
322 j = 0;
323 ++i;
324 if (i < marginalMax_size) domain_size = this->marginalMax_[i].size();
325 }
326 }
327 };
328
329 // launch the threads
331 nb_threads,
332 threadedExec,
333 (nb_threads == 1)
334 ? std::vector< std::pair< NodeId, Idx > >{{0, 0}, {this->marginalMin_.size(), 0}}
335 : this->threadRanges_);
336 }
337
338 /*
339 // old openMP code :
340 #pragma omp parallel
341 {
342 int threadId = threadsOMP::getThreadNumber();
343 long nsize = long(workingSet_[threadId]->size());
344
345 #pragma omp for
346 for (long i = 0; i < nsize; i++) {
347 Size dSize = Size(l_marginalMin_[threadId][i].size());
348
349 for (Size j = 0; j < dSize; j++) {
350 Size tsize = Size(l_marginalMin_.size());
351
352 // go through all threads
353 for (Size tId = 0; tId < tsize; tId++) {
354 if (l_marginalMin_[tId][i][j] < this->marginalMin_[i][j])
355 this->marginalMin_[i][j] = l_marginalMin_[tId][i][j];
356
357 if (l_marginalMax_[tId][i][j] > this->marginalMax_[i][j])
358 this->marginalMax_[i][j] = l_marginalMax_[tId][i][j];
359 } // end of : all threads
360 } // end of : all modalities
361 } // end of : all variables
362 } // end of : parallel region
363 }
364 */
365
366
367 template < typename GUM_SCALAR, class BNInferenceEngine >
368 inline const GUM_SCALAR
370 // compute the number of threads (avoid nested threads)
371 const Size nb_threads = ThreadExecutor::nbRunningThreadsExecutors() == 0
372 ? this->threadRanges_.size() - 1
373 : 1; // no nested multithreading
374
375 std::vector< GUM_SCALAR > tEps(nb_threads, 0);
376
377 // create the function to be executed by the threads
378 auto threadedExec = [this, &tEps](const std::size_t this_thread,
379 const std::size_t nb_threads,
380 const std::vector< std::pair< Idx, Idx > >& ranges) {
381 auto& this_tEps = tEps[this_thread];
382 GUM_SCALAR delta = 0;
383
384 auto i = this->threadRanges_[this_thread].first;
385 auto j = this->threadRanges_[this_thread].second;
386 auto domain_size = this->marginalMax_[i].size();
387 const auto end_i = this->threadRanges_[this_thread + 1].first;
388 auto end_j = this->threadRanges_[this_thread + 1].second;
389 const auto marginalMax_size = this->marginalMax_.size();
390
391 while ((i < end_i) || (j < end_j)) {
392 // on min
393 delta = this->marginalMin_[i][j] - this->oldMarginalMin_[i][j];
394 delta = (delta < 0) ? (-delta) : delta;
395 if (this_tEps < delta) this_tEps = delta;
396
397 // on max
398 delta = this->marginalMax_[i][j] - this->oldMarginalMax_[i][j];
399 delta = (delta < 0) ? (-delta) : delta;
400 if (this_tEps < delta) this_tEps = delta;
401
402 this->oldMarginalMin_[i][j] = this->marginalMin_[i][j];
403 this->oldMarginalMax_[i][j] = this->marginalMax_[i][j];
404
405 if (++j == domain_size) {
406 j = 0;
407 ++i;
408 if (i < marginalMax_size) domain_size = this->marginalMax_[i].size();
409 }
410 }
411 };
412
413 // launch the threads
415 nb_threads,
416 threadedExec,
417 (nb_threads == 1)
418 ? std::vector< std::pair< NodeId, Idx > >{{0, 0}, {this->marginalMin_.size(), 0}}
419 : this->threadRanges_);
420
421 // aggregate all the results
422 GUM_SCALAR eps = tEps[0];
423 for (const auto nb: tEps)
424 if (eps < nb) eps = nb;
425
426 return eps;
427 }
428
429 /*
430 // old openMP code :
431 GUM_SCALAR eps = 0;
432 #pragma omp parallel
433 {
434 GUM_SCALAR tEps = 0;
435 GUM_SCALAR delta;
436
437 int tId = threadsOMP::getThreadNumber();
438 long nsize = long(workingSet_[tId]->size());
439
440 #pragma omp for
441 for (long i = 0; i < nsize; i++) {
442 Size dSize = Size(l_marginalMin_[tId][i].size());
443 std::cout << omp_get_num_threads() << std::endl;
444
445 for (Size j = 0; j < dSize; j++) {
446 // on min
447 delta = this->marginalMin_[i][j] - this->oldMarginalMin_[i][j];
448 delta = (delta < 0) ? (-delta) : delta;
449 tEps = (tEps < delta) ? delta : tEps;
450
451 // on max
452 delta = this->marginalMax_[i][j] - this->oldMarginalMax_[i][j];
453 delta = (delta < 0) ? (-delta) : delta;
454 tEps = (tEps < delta) ? delta : tEps;
455
456 this->oldMarginalMin_[i][j] = this->marginalMin_[i][j];
457 this->oldMarginalMax_[i][j] = this->marginalMax_[i][j];
458 }
459 } // end of : all variables
460
461 #pragma omp critical(epsilon_max)
462 {
463 #pragma omp flush(eps)
464 eps = (eps < tEps) ? tEps : eps;
465 }
466
467 } // end of : parallel region
468 return eps;
469 }
470 */
471
472 template < typename GUM_SCALAR, class BNInferenceEngine >
474#pragma omp parallel
475 {
476 int threadId = threadsOMP::getThreadNumber();
477 long nsize = long(workingSet_[threadId]->size());
478
479#pragma omp for
480
481 for (long i = 0; i < nsize; i++) {
482 Size dSize = Size(l_marginalMin_[threadId][i].size());
483
484 for (Size j = 0; j < dSize; j++) {
485 Size tsize = Size(l_marginalMin_.size());
486
487 // go through all threads
488 for (Size tId = 0; tId < tsize; tId++) {
489 if (l_marginalMin_[tId][i][j] < this->oldMarginalMin_[i][j])
490 this->oldMarginalMin_[i][j] = l_marginalMin_[tId][i][j];
491
492 if (l_marginalMax_[tId][i][j] > this->oldMarginalMax_[i][j])
493 this->oldMarginalMax_[i][j] = l_marginalMax_[tId][i][j];
494 } // end of : all threads
495 } // end of : all modalities
496 } // end of : all variables
497 } // end of : parallel region
498 }
499
500 template < typename GUM_SCALAR, class BNInferenceEngine >
502 // don't create threads if there are no vertices saved
503 if (!_infE_::storeVertices_) return;
504
505 // compute the max number of threads to use (avoid nested threads)
506 const Size nb_threads = ThreadExecutor::nbRunningThreadsExecutors() == 0
508 : 1; // no nested multithreading
509
510 // create the function to be executed by the threads
511 Size tsize = Size(l_marginalMin_.size());
512 auto threadedExec = [this, tsize](const std::size_t this_thread,
513 const std::size_t nb_threads,
514 Idx work_index,
515 const std::vector< std::pair< Idx, Idx > >& ranges) {
516 for (Idx i = ranges[this_thread].first, end = ranges[this_thread].second; i < end; i++) {
517 // go through all threads
518 for (Size tId = 0; tId < tsize; ++tId) {
519 auto& nodeThreadCredalSet = l_marginalSets_[tId][i];
520
521 // for each vertex, if we are at any opt marginal, add it to the set
522 for (const auto& vtx: nodeThreadCredalSet) {
523 // we run redundancy elimination at each step because there could
524 // be 100000 threads and the set will be so huge...
525 // BUT not if vertices are of dimension 2 ! opt check and equality
526 // should be enough
527 _infE_::updateCredalSets_(i, vtx, (vtx.size() > 2) ? true : false);
528 } // end of : nodeThreadCredalSet
529 } // end of : all threads
530 } // end of : all variables
531 };
532
533 const Size working_size = workingSet_.size();
534 for (Idx work_index = 0; work_index < working_size; ++work_index) {
535 // compute the ranges over which the threads will work
536 const auto nsize = workingSet_[work_index]->size();
537 const auto real_nb_threads = std::min(nb_threads, nsize);
538 const auto ranges = gum::dispatchRangeToThreads(0, nsize, (unsigned int)(real_nb_threads));
539 ThreadExecutor::execute(real_nb_threads, threadedExec, work_index, ranges);
540 }
541 }
542
543 /*
544 // old openMP code :
545 #pragma omp parallel
546 {
547 int threadId = threadsOMP::getThreadNumber();
548 Size nsize = Size(workingSet_[threadId]->size());
549
550 #pragma omp for
551 for (long i = 0; i < long(nsize); i++) {
552 Size tsize = Size(l_marginalMin_.size());
553
554 // go through all threads
555 for (long tId = 0; tId < long(tsize); tId++) {
556 auto& nodeThreadCredalSet = l_marginalSets_[tId][i];
557
558 // for each vertex, if we are at any opt marginal, add it to the set
559 for (const auto& vtx: nodeThreadCredalSet) {
560 // we run redundancy elimination at each step
561 // because there could be 100000 threads and the set will be so
562 // huge
563 // ...
564 // BUT not if vertices are of dimension 2 ! opt check and equality
565 // should be enough
566 _infE_::updateCredalSets_(i, vtx, (vtx.size() > 2) ? true : false);
567 } // end of : nodeThreadCredalSet
568 } // end of : all threads
569 } // end of : all variables
570 } // end of : parallel region
571 }
572 */
573
574
575 template < typename GUM_SCALAR, class BNInferenceEngine >
577 // don't create threads if there are no modalities to compute expectations
578 if (this->modal_.empty()) return;
579
580 // compute the max number of threads to use (avoid nested threads)
581 const Size nb_threads = ThreadExecutor::nbRunningThreadsExecutors() == 0
583 : 1; // no nested multithreading
584
585 // we can compute expectations from vertices of the final credal set
587 // create the function to be executed by the threads
588 auto threadedExec = [this](const std::size_t this_thread,
589 const std::size_t nb_threads,
590 Idx work_index,
591 const std::vector< std::pair< Idx, Idx > >& ranges) {
592 for (Idx i = ranges[this_thread].first, end = ranges[this_thread].second; i < end; i++) {
593 std::string var_name = workingSet_[work_index]->variable(i).name();
594 auto delim = var_name.find_first_of("_");
595 var_name = var_name.substr(0, delim);
596
597 if (!l_modal_[work_index].exists(var_name)) continue;
598
599 for (const auto& vertex: _infE_::marginalSets_[i]) {
600 GUM_SCALAR exp = 0;
601 Size vsize = Size(vertex.size());
602
603 for (Size mod = 0; mod < vsize; mod++)
604 exp += vertex[mod] * l_modal_[work_index][var_name][mod];
605
607
609 }
610 }
611 };
612
613 const Size working_size = workingSet_.size();
614 for (Idx work_index = 0; work_index < working_size; ++work_index) {
615 if (!this->l_modal_[work_index].empty()) {
616 // compute the ranges over which the threads will work
617 const auto nsize = workingSet_[work_index]->size();
618 const auto real_nb_threads = std::min(nb_threads, nsize);
619 const auto ranges
620 = gum::dispatchRangeToThreads(0, nsize, (unsigned int)(real_nb_threads));
621 ThreadExecutor::execute(real_nb_threads, threadedExec, work_index, ranges);
622 }
623 }
624
625 return;
626 }
627
628 // create the function to be executed by the threads
629 auto threadedExec = [this](const std::size_t this_thread,
630 const std::size_t nb_threads,
631 Idx work_index,
632 const std::vector< std::pair< Idx, Idx > >& ranges) {
633 for (Idx i = ranges[this_thread].first, end = ranges[this_thread].second; i < end; i++) {
634 std::string var_name = workingSet_[work_index]->variable(i).name();
635 auto delim = var_name.find_first_of("_");
636 var_name = var_name.substr(0, delim);
637
638 if (!l_modal_[work_index].exists(var_name)) continue;
639
640 Size tsize = Size(l_expectationMax_.size());
641
642 for (Idx tId = 0; tId < tsize; tId++) {
643 if (l_expectationMax_[tId][i] > this->expectationMax_[i])
644 this->expectationMax_[i] = l_expectationMax_[tId][i];
645
646 if (l_expectationMin_[tId][i] < this->expectationMin_[i])
647 this->expectationMin_[i] = l_expectationMin_[tId][i];
648 } // end of : each thread
649 } // end of : each variable
650 };
651
652 const Size working_size = workingSet_.size();
653 for (Idx work_index = 0; work_index < working_size; ++work_index) {
654 if (!this->l_modal_[work_index].empty()) {
655 const auto nsize = Size(workingSet_[work_index]->size());
656 const auto real_nb_threads = std::min(nb_threads, nsize);
657 const auto ranges
658 = gum::dispatchRangeToThreads(0, nsize, (unsigned int)(real_nb_threads));
659 ThreadExecutor::execute(real_nb_threads, threadedExec, work_index, ranges);
660 }
661 }
662 }
663
664 /*
665 // old openMP code :
666 // don't create threads if there are no modalities to compute expectations
667 if (this->modal_.empty()) return;
668
669 // we can compute expectations from vertices of the final credal set
670 if (_infE_::storeVertices_) {
671 #pragma omp parallel
672 {
673 int threadId = threadsOMP::getThreadNumber();
674
675 if (!this->l_modal_[threadId].empty()) {
676 Size nsize = Size(workingSet_[threadId]->size());
677
678 #pragma omp for
679 for (long i = 0; i < long(nsize); i++) { // i needs to be signed (due to omp with
680 // visual c++ 15)
681 std::string var_name = workingSet_[threadId]->variable(i).name();
682 auto delim = var_name.find_first_of("_");
683 var_name = var_name.substr(0, delim);
684
685 if (!l_modal_[threadId].exists(var_name)) continue;
686
687 for (const auto& vertex: _infE_::marginalSets_[i]) {
688 GUM_SCALAR exp = 0;
689 Size vsize = Size(vertex.size());
690
691 for (Size mod = 0; mod < vsize; mod++)
692 exp += vertex[mod] * l_modal_[threadId][var_name][mod];
693
694 if (exp > _infE_::expectationMax_[i]) _infE_::expectationMax_[i] = exp;
695
696 if (exp < _infE_::expectationMin_[i]) _infE_::expectationMin_[i] = exp;
697 }
698 } // end of : each variable parallel for
699 } // end of : if this thread has modals
700 } // end of parallel region
701 return;
702 }
703
704 #pragma omp parallel
705 {
706 int threadId = threadsOMP::getThreadNumber();
707
708 if (!this->l_modal_[threadId].empty()) {
709 Size nsize = Size(workingSet_[threadId]->size());
710 #pragma omp for
711 for (long i = 0; i < long(nsize);
712 i++) { // long instead of Idx due to omp for visual C++15
713 std::string var_name = workingSet_[threadId]->variable(i).name();
714 auto delim = var_name.find_first_of("_");
715 var_name = var_name.substr(0, delim);
716
717 if (!l_modal_[threadId].exists(var_name)) continue;
718
719 Size tsize = Size(l_expectationMax_.size());
720
721 for (Idx tId = 0; tId < tsize; tId++) {
722 if (l_expectationMax_[tId][i] > this->expectationMax_[i])
723 this->expectationMax_[i] = l_expectationMax_[tId][i];
724
725 if (l_expectationMin_[tId][i] < this->expectationMin_[i])
726 this->expectationMin_[i] = l_expectationMin_[tId][i];
727 } // end of : each thread
728 } // end of : each variable
729 } // end of : if modals not empty
730 } // end of : parallel region
731 }
732 */
733
734 template < typename GUM_SCALAR, class BNInferenceEngine >
736 using dBN = std::vector< bool >;
737
738 Size nsize = Size(workingSet_[0]->size());
739
740 // no parallel insert in hash-tables (OptBN)
741 for (Idx i = 0; i < nsize; i++) {
742 // we don't store anything for observed variables
743 if (_infE_::evidence_.exists(i)) continue;
744
745 Size dSize = Size(l_marginalMin_[0][i].size());
746
747 for (Size j = 0; j < dSize; j++) {
748 // go through all threads
749 std::vector< Size > keymin(3);
750 keymin[0] = i;
751 keymin[1] = j;
752 keymin[2] = 0;
753 std::vector< Size > keymax(keymin);
754 keymax[2] = 1;
755
756 Size tsize = Size(l_marginalMin_.size());
757
758 for (Size tId = 0; tId < tsize; tId++) {
759 if (l_marginalMin_[tId][i][j] == this->marginalMin_[i][j]) {
760 const std::vector< dBN* >& tOpts = l_optimalNet_[tId]->getBNOptsFromKey(keymin);
761 Size osize = Size(tOpts.size());
762
763 for (Size bn = 0; bn < osize; bn++) {
764 _infE_::dbnOpt_.insert(*tOpts[bn], keymin);
765 }
766 }
767
768 if (l_marginalMax_[tId][i][j] == this->marginalMax_[i][j]) {
769 const std::vector< dBN* >& tOpts = l_optimalNet_[tId]->getBNOptsFromKey(keymax);
770 Size osize = Size(tOpts.size());
771
772 for (Size bn = 0; bn < osize; bn++) {
773 _infE_::dbnOpt_.insert(*tOpts[bn], keymax);
774 }
775 }
776 } // end of : all threads
777 } // end of : all modalities
778 } // end of : all variables
779 }
780
781 template < typename GUM_SCALAR, class BNInferenceEngine >
784 Size tsize = Size(workingSet_.size());
785
786 // delete pointers
787 for (Size bn = 0; bn < tsize; bn++) {
789
790 if (workingSet_[bn] != nullptr) delete workingSet_[bn];
791
793 if (l_inferenceEngine_[bn] != nullptr) delete l_optimalNet_[bn];
794
795 if (this->workingSetE_[bn] != nullptr) {
796 for (const auto ev: *workingSetE_[bn])
797 delete ev;
798
799 delete workingSetE_[bn];
800 }
801
802 if (l_inferenceEngine_[bn] != nullptr) delete l_inferenceEngine_[bn];
803 }
804
805 // this is important, those will be resized with the correct number of
806 // threads.
807
808 workingSet_.clear();
809 workingSetE_.clear();
810 l_inferenceEngine_.clear();
811 l_optimalNet_.clear();
812
813 l_marginalMin_.clear();
814 l_marginalMax_.clear();
815 l_expectationMin_.clear();
816 l_expectationMax_.clear();
817 l_modal_.clear();
818 l_marginalSets_.clear();
819 l_evidence_.clear();
820 l_clusters_.clear();
821 }
822
823 } // namespace credal
824} // namespace gum
virtual Size getNumberOfThreads() const
returns the current max number of threads used by the class containing this ThreadNumberManager
Class template representing a Credal Network.
Definition credalNet.h:97
margi oldMarginalMax_
Old upper marginals used to compute epsilon.
margi evidence_
Holds observed variables states.
bool storeBNOpt_
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
margi marginalMax_
Upper marginals.
void updateCredalSets_(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it's possible vertex, update it's credal set.
InferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Construtor.
margi oldMarginalMin_
Old lower marginals used to compute epsilon.
bool storeVertices_
True if credal sets vertices are stored, False otherwise.
virtual void eraseAllEvidence()
removes all the evidence entered into the network
expe expectationMax_
Upper expectations, if some variables modalities were inserted.
credalSet marginalSets_
Credal sets vertices, if enabled.
const CredalNet< GUM_SCALAR > & credalNet() const
Get this creadal network.
margi marginalMin_
Lower marginals.
dynExpe modal_
Variables modalities used to compute expectations.
expe expectationMin_
Lower expectations, if some variables modalities were inserted.
std::vector< std::pair< NodeId, Idx > > threadRanges_
the ranges of elements of marginalMin_ and marginalMax_ processed by each thread
VarMod2BNsMap< GUM_SCALAR > dbnOpt_
Object used to efficiently store optimal bayes net during inference, for some algorithms.
Class template acting as a wrapper for Lexicographic Reverse Search by David Avis.
Definition LrsWrapper.h:119
const matrix & getOutput() const
Get the output matrix solution of the problem.
void setUpV(const Size &card, const Size &vertices)
Sets up a V-representation.
void elimRedundVrep()
V-Redundancy elimination.
void fillV(const std::vector< GUM_SCALAR > &vertex)
Creates the V-representation of a polytope by adding a vertex to the problem input _input_.
void updateMarginals_()
Fusion of threads marginals.
_margis_ l_marginalMin_
Threads lower marginals, one per thread.
_expes_ l_expectationMax_
Threads upper expectations, one per thread.
_expes_ l_expectationMin_
Threads lower expectations, one per thread.
std::vector< _bnet_ * > workingSet_
Threads IBayesNet.
std::vector< BNInferenceEngine * > l_inferenceEngine_
Threads BNInferenceEngine.
_credalSets_ l_marginalSets_
Threads vertices.
void optFusion_()
Fusion of threads optimal IBayesNet.
std::vector< VarMod2BNsMap< GUM_SCALAR > * > l_optimalNet_
Threads optimal IBayesNet.
void updateOldMarginals_()
Update old marginals (from current marginals).
_margis_ l_marginalMax_
Threads upper marginals, one per thread.
MultipleInferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
void expFusion_()
Fusion of threads expectations.
void initThreadsData_(const Size &num_threads, const bool _storeVertices_, const bool _storeBNOpt_)
Initialize threads data.
const GUM_SCALAR computeEpsilon_()
Compute epsilon and update old marginals.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
bool updateThread_(Size this_thread, const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Update thread information (marginals, expectations, IBayesNet, vertices) for a given node id.
std::vector< List< const Tensor< GUM_SCALAR > * > * > workingSetE_
Threads evidence.
std::vector< std::mt19937 > generators_
the generators used for computing random values
void _updateThreadCredalSets_(Size this_thread, const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund)
Ask for redundancy elimination of a node credal set of a calling thread.
unsigned int getThreadNumber()
Get the calling thread id.
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition types.h:74
Size Idx
Type for indexes.
Definition types.h:79
Size NodeId
Type for node ids.
unsigned int currentRandomGeneratorValue()
returns the current generator's value
Abstract class representing CredalNet inference engines.
Abstract class representing CredalNet inference engines.
namespace for all credal networks entities
Definition agrum.h:61
gum is the global namespace for all aGrUM entities
Definition agrum.h:46
std::vector< std::pair< Idx, Idx > > dispatchRangeToThreads(Idx beg, Idx end, unsigned int nb_threads)
returns a vector equally splitting elements of a range among threads
Definition threads.cpp:76
static void execute(std::size_t nb_threads, FUNCTION exec_func, ARGS &&... func_args)
executes a function using several threads
static int nbRunningThreadsExecutors()
indicates how many threadExecutors are currently running