aGrUM 2.3.2
a C++ library for (probabilistic) graphical models
CNMonteCarloSampling_tpl.h
Go to the documentation of this file.
1/****************************************************************************
2 * This file is part of the aGrUM/pyAgrum library. *
3 * *
4 * Copyright (c) 2005-2025 by *
5 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
6 * - Christophe GONZALES(_at_AMU) *
7 * *
8 * The aGrUM/pyAgrum library is free software; you can redistribute it *
9 * and/or modify it under the terms of either : *
10 * *
11 * - the GNU Lesser General Public License as published by *
12 * the Free Software Foundation, either version 3 of the License, *
13 * or (at your option) any later version, *
14 * - the MIT license (MIT), *
15 * - or both in dual license, as here. *
16 * *
17 * (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) *
18 * *
19 * This aGrUM/pyAgrum library is distributed in the hope that it will be *
20 * useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, *
21 * INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS *
22 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, *
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR *
26 * OTHER DEALINGS IN THE SOFTWARE. *
27 * *
28 * See LICENCES for more details. *
29 * *
30 * SPDX-FileCopyrightText: Copyright 2005-2025 *
31 * - Pierre-Henri WUILLEMIN(_at_LIP6) *
32 * - Christophe GONZALES(_at_AMU) *
33 * SPDX-License-Identifier: LGPL-3.0-or-later OR MIT *
34 * *
35 * Contact : info_at_agrum_dot_org *
36 * homepage : http://agrum.gitlab.io *
37 * gitlab : https://gitlab.com/agrumery/agrum *
38 * *
39 ****************************************************************************/
40#pragma once
41
42
45
46namespace gum::credal {
47
48 template < typename GUM_SCALAR, class BNInferenceEngine >
63
64 template < typename GUM_SCALAR, class BNInferenceEngine >
68
69 template < typename GUM_SCALAR, class BNInferenceEngine >
72 try {
73 this->repetitiveInit_();
74 } catch (InvalidArgument const& err) {
75 GUM_SHOWERROR(err)
77 }
78 }
81
82 // don't put it after burnIn, it could stop with timeout : we want at
83 // least one burnIn and one periodSize
84 GUM_SCALAR eps = 1.; // to validate testSuite ?
85
86 auto psize = this->periodSize();
87
88 if (this->continueApproximationScheme(eps)) {
89 // compute the number of threads to use
90 const Size nb_threads = ThreadExecutor::nbRunningThreadsExecutors() == 0
91 ? this->getNumberOfThreads()
92 : 1; // no nested multithreading
93
94 // dispatch {0,...,psize} among the threads
95 const auto ranges = gum::dispatchRangeToThreads(0, psize, (unsigned int)(nb_threads));
96
97 // create the function to be executed by the threads
98 auto threadedExec
99 = [this, ranges](const std::size_t this_thread, const std::size_t nb_threads) {
100 const auto& this_range = ranges[this_thread];
101 for (Idx j = this_range.first; j < this_range.second; ++j) {
102 _threadInference_(this_thread);
103 _threadUpdate_(this_thread);
104 }
105 };
106
107 do {
108 eps = 0;
109
110 // launch the threads
111 ThreadExecutor::execute(nb_threads, threadedExec);
112
113 this->updateApproximationScheme(int(psize));
114
115 this->updateMarginals_(); // fusion threads + update margi
116
117 eps = this->computeEpsilon_(); // also updates oldMargi
118
119 } while (this->continueApproximationScheme(eps));
120 }
121
122 if (!this->modal_.empty()) { this->expFusion_(); }
123
124 if (_infEs_::storeBNOpt_) { this->optFusion_(); }
125
127
128 if (!this->modal_.empty()) {
129 this->dynamicExpectations_(); // work with any network
130 }
131 }
132
133 template < typename GUM_SCALAR, class BNInferenceEngine >
135 if (this->l_inferenceEngine_[tId]->evidenceProbability() > 0) {
136 const DAG& tDag = this->workingSet_[tId]->dag();
137
138 for (auto node: tDag) {
139 const Tensor< GUM_SCALAR >& tensor(this->l_inferenceEngine_[tId]->posterior(node));
140 Instantiation ins(tensor);
141 std::vector< GUM_SCALAR > vertex;
142
143 for (ins.setFirst(); !ins.end(); ++ins) {
144 vertex.push_back(tensor[ins]);
145 }
146
147 // true for redundancy elimination of node it credal set but since global
148 // marginals are only updated at the end of each period of
149 // approximationScheme, it is "useless" ( and expensive ) to check now
150 this->updateThread_(tId, node, vertex, false);
151 }
152 }
153 }
154
155 template < typename GUM_SCALAR, class BNInferenceEngine >
158
159 this->l_inferenceEngine_[tId]->eraseAllEvidence();
160 _insertEvidence_(tId);
161 this->l_inferenceEngine_[tId]->makeInference();
162 }
163
164 template < typename GUM_SCALAR, class BNInferenceEngine >
174
175 template < typename GUM_SCALAR, class BNInferenceEngine >
177 auto num_threads = this->getNumberOfThreads();
179 this->l_inferenceEngine_.resize(num_threads, nullptr);
180
181 // create the BNs: do this in a single thread because Bayes Nets do not
182 // support slaves in multi threading
183 for (auto& thread_bn: this->workingSet_)
184 thread_bn = new BayesNet< GUM_SCALAR >(this->credalNet_->current_bn());
185
186 // create the function to be executed by the threads
187 auto threadedExec = [this](const std::size_t this_thread, const std::size_t nb_threads) {
188 this->l_marginalMin_[this_thread] = this->marginalMin_;
189 this->l_marginalMax_[this_thread] = this->marginalMax_;
190 this->l_expectationMin_[this_thread] = this->expectationMin_;
191 this->l_expectationMax_[this_thread] = this->expectationMax_;
192 this->l_modal_[this_thread] = this->modal_;
193
194 _infEs_::l_clusters_[this_thread].resize(2);
195 _infEs_::l_clusters_[this_thread][0] = _infEs_::t0_;
196 _infEs_::l_clusters_[this_thread][1] = _infEs_::t1_;
197
198 if (_infEs_::storeVertices_) { this->l_marginalSets_[this_thread] = this->marginalSets_; }
199
200 this->workingSetE_[this_thread] = new List< const Tensor< GUM_SCALAR >* >();
201
202 // #TODO: the next instruction works only for lazy propagation.
203 // => find a way to remove the second argument
204 auto inference_engine = new BNInferenceEngine(this->workingSet_[this_thread],
206 inference_engine->setNumberOfThreads(1);
207
208 this->l_inferenceEngine_[this_thread] = inference_engine;
209
211 this->l_optimalNet_[this_thread] = new VarMod2BNsMap< GUM_SCALAR >(*this->credalNet_);
212 }
213 };
214
215 // launch the threads
216 ThreadExecutor::execute(num_threads, threadedExec);
217 }
218
219 template < typename GUM_SCALAR, class BNInferenceEngine >
221 std::vector< bool >& toFill,
222 const Idx value) const {
223 Idx n = value;
224 auto tfsize = toFill.size();
225
226 // get bits of choosen_vertex
227 for (decltype(tfsize) i = 0; i < tfsize; i++) {
228 toFill[i] = n & 1;
229 n /= 2;
230 }
231 }
232
233 template < typename GUM_SCALAR, class BNInferenceEngine >
234 inline void
236 IBayesNet< GUM_SCALAR >* working_bn = this->workingSet_[this_thread];
237 auto& random_generator = this->generators_[this_thread];
238 const auto cpt = &this->credalNet_->credalNet_currentCpt();
239
240 using dBN = std::vector< std::vector< std::vector< bool > > >;
241
242 dBN sample;
243
244 if (_infEs_::storeBNOpt_) { sample = dBN(this->l_optimalNet_[this_thread]->getSampleDef()); }
245
247 const auto& t0 = _infEs_::l_clusters_[this_thread][0];
248 const auto& t1 = _infEs_::l_clusters_[this_thread][1];
249
250 for (const auto& elt: t0) {
251 auto dSize = working_bn->variable(elt.first).domainSize();
252 Tensor< GUM_SCALAR >* tensor(
253 const_cast< Tensor< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
254 std::vector< GUM_SCALAR > var_cpt(tensor->domainSize());
255
256 Size pconfs = Size((*cpt)[elt.first].size());
257
258 for (Size pconf = 0; pconf < pconfs; pconf++) {
259 Size choosen_vertex = randomValue((*cpt)[elt.first][pconf].size());
260
261 if (_infEs_::storeBNOpt_) { _binaryRep_(sample[elt.first][pconf], choosen_vertex); }
262
263 for (Size mod = 0; mod < dSize; mod++) {
264 var_cpt[pconf * dSize + mod] = (*cpt)[elt.first][pconf][choosen_vertex][mod];
265 }
266 } // end of : pconf
267
268 tensor->fillWith(var_cpt);
269
270 Size t0esize = Size(elt.second.size());
271
272 for (Size pos = 0; pos < t0esize; pos++) {
273 if (_infEs_::storeBNOpt_) { sample[elt.second[pos]] = sample[elt.first]; }
274
275 Tensor< GUM_SCALAR >* tensor2(
276 const_cast< Tensor< GUM_SCALAR >* >(&working_bn->cpt(elt.second[pos])));
277 tensor2->fillWith(var_cpt);
278 }
279 }
280
281 for (const auto& elt: t1) {
282 auto dSize = working_bn->variable(elt.first).domainSize();
283 Tensor< GUM_SCALAR >* tensor(
284 const_cast< Tensor< GUM_SCALAR >* >(&working_bn->cpt(elt.first)));
285 std::vector< GUM_SCALAR > var_cpt(tensor->domainSize());
286
287 for (Size pconf = 0; pconf < (*cpt)[elt.first].size(); pconf++) {
288 Idx choosen_vertex = randomValue(random_generator, (*cpt)[elt.first][pconf].size());
289
290 if (_infEs_::storeBNOpt_) { _binaryRep_(sample[elt.first][pconf], choosen_vertex); }
291
292 for (decltype(dSize) mod = 0; mod < dSize; mod++) {
293 var_cpt[pconf * dSize + mod] = (*cpt)[elt.first][pconf][choosen_vertex][mod];
294 }
295 } // end of : pconf
296
297 tensor->fillWith(var_cpt);
298
299 auto t1esize = elt.second.size();
300
301 for (decltype(t1esize) pos = 0; pos < t1esize; pos++) {
302 if (_infEs_::storeBNOpt_) { sample[elt.second[pos]] = sample[elt.first]; }
303
304 Tensor< GUM_SCALAR >* tensor2(
305 const_cast< Tensor< GUM_SCALAR >* >(&working_bn->cpt(elt.second[pos])));
306 tensor2->fillWith(var_cpt);
307 }
308 }
309
310 if (_infEs_::storeBNOpt_) { this->l_optimalNet_[this_thread]->setCurrentSample(sample); }
311 } else {
312 for (auto node: working_bn->nodes()) {
313 auto dSize = working_bn->variable(node).domainSize();
314 Tensor< GUM_SCALAR >* tensor(const_cast< Tensor< GUM_SCALAR >* >(&working_bn->cpt(node)));
315 std::vector< GUM_SCALAR > var_cpt(tensor->domainSize());
316
317 auto pConfs = (*cpt)[node].size();
318
319 for (decltype(pConfs) pconf = 0; pconf < pConfs; pconf++) {
320 Size nVertices = Size((*cpt)[node][pconf].size());
321 Idx choosen_vertex = randomValue(random_generator, nVertices);
322
323 if (_infEs_::storeBNOpt_) { _binaryRep_(sample[node][pconf], choosen_vertex); }
324
325 for (decltype(dSize) mod = 0; mod < dSize; mod++) {
326 var_cpt[pconf * dSize + mod] = (*cpt)[node][pconf][choosen_vertex][mod];
327 }
328 } // end of : pconf
329
330 tensor->fillWith(var_cpt);
331 }
332
333 if (_infEs_::storeBNOpt_) { this->l_optimalNet_[this_thread]->setCurrentSample(sample); }
334 }
335 }
336
337 template < typename GUM_SCALAR, class BNInferenceEngine >
338 inline void
340 if (this->evidence_.size() == 0) { return; }
341
342 BNInferenceEngine* inference_engine = this->l_inferenceEngine_[this_thread];
343
344 IBayesNet< GUM_SCALAR >* working_bn = this->workingSet_[this_thread];
345
346 List< const Tensor< GUM_SCALAR >* >* evi_list = this->workingSetE_[this_thread];
347
348 if (evi_list->size() > 0) {
349 for (const auto pot: *evi_list)
350 inference_engine->addEvidence(*pot);
351 return;
352 }
353
354 for (const auto& elt: this->evidence_) {
355 auto p = new Tensor< GUM_SCALAR >;
356 (*p) << working_bn->variable(elt.first);
357
358 try {
359 p->fillWith(elt.second);
360 } catch (Exception const& err) {
361 GUM_SHOWERROR(err)
362 throw;
363 }
364
365 evi_list->insert(p);
366 }
367
368 if (evi_list->size() > 0) {
369 for (const auto pot: *evi_list)
370 inference_engine->addEvidence(*pot);
371 }
372 }
373
374} // namespace gum::credal
Inference by basic sampling algorithm (pure random) of bnet in credal networks.
Base class for dag.
Definition DAG.h:121
const NodeGraphPart & nodes() const final
Returns a constant reference to the dag of this Bayes Net.
virtual Size domainSize() const =0
Base class for all aGrUM's exceptions.
Definition exceptions.h:118
Class representing the minimal interface for Bayesian network with no numerical data.
Definition IBayesNet.h:75
virtual const DiscreteVariable & variable(NodeId id) const =0
Returns a constant reference over a variable given it's node id.
virtual const Tensor< GUM_SCALAR > & cpt(NodeId varId) const =0
Returns the CPT of a variable.
Class for assigning/browsing values to tuples of discrete variables.
bool end() const
Returns true if the Instantiation reached the end.
void setFirst()
Assign the first values to the tuple of the Instantiation.
Exception: at least one argument passed to a function is not what was expected.
Generic doubly linked lists.
Definition list.h:379
Size size() const noexcept
Returns the number of elements in the list.
Definition list_tpl.h:1719
Val & insert(const Val &val)
Inserts a new element at the end of the chained list (alias of pushBack).
Definition list_tpl.h:1515
void makeInference()
Starts the inference.
void _verticesSampling_(Size this_thread)
Thread samples a IBayesNet from the CredalNet.
CNMonteCarloSampling(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
void _binaryRep_(std::vector< bool > &toFill, const Idx value) const
Get the binary representation of a given value.
void _threadInference_(Size this_thread)
Thread performs an inference using BNInferenceEngine.
void _insertEvidence_(Size this_thread)
Insert CredalNet evidence into a thread BNInferenceEngine.
void _mcInitApproximationScheme_()
Initialize approximation Scheme.
void _mcThreadDataCopy_()
Initialize threads data.
void _threadUpdate_(Size this_thread)
Update thread data after a IBayesNet inference.
Class template representing a Credal Network.
Definition credalNet.h:97
cluster t1_
Clusters of nodes used with dynamic networks.
bool storeBNOpt_
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
bool repetitiveInd_
True if using repetitive independence ( dynamic network only ), False otherwise.
bool storeVertices_
True if credal sets vertices are stored, False otherwise.
cluster t0_
Clusters of nodes used with dynamic networks.
void initThreadsData_(const Size &num_threads, const bool _storeVertices_, const bool _storeBNOpt_)
bool updateThread_(Size this_thread, const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
aGrUM's exceptions
#define GUM_SHOWERROR(e)
Definition exceptions.h:85
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Definition types.h:74
Size Idx
Type for indexes.
Definition types.h:79
Idx randomValue(const Size max=2)
Returns a random Idx between 0 and max-1 included.
namespace for all credal networks entities
Definition agrum.h:61
std::vector< std::pair< Idx, Idx > > dispatchRangeToThreads(Idx beg, Idx end, unsigned int nb_threads)
returns a vector equally splitting elements of a range among threads
Definition threads.cpp:76
unsigned int getNumberOfThreads()
returns the max number of threads used by default when entering the next parallel region
static void execute(std::size_t nb_threads, FUNCTION exec_func, ARGS &&... func_args)
executes a function using several threads
static int nbRunningThreadsExecutors()
indicates how many threadExecutors are currently running