48 template <
typename GUM_SCALAR,
class BNInferenceEngine >
64 template <
typename GUM_SCALAR,
class BNInferenceEngine >
69 template <
typename GUM_SCALAR,
class BNInferenceEngine >
99 = [
this, ranges](
const std::size_t this_thread,
const std::size_t nb_threads) {
100 const auto& this_range = ranges[this_thread];
101 for (
Idx j = this_range.first; j < this_range.second; ++j) {
128 if (!this->
modal_.empty()) {
133 template <
typename GUM_SCALAR,
class BNInferenceEngine >
138 for (
auto node: tDag) {
141 std::vector< GUM_SCALAR > vertex;
144 vertex.push_back(tensor[ins]);
155 template <
typename GUM_SCALAR,
class BNInferenceEngine >
164 template <
typename GUM_SCALAR,
class BNInferenceEngine >
175 template <
typename GUM_SCALAR,
class BNInferenceEngine >
184 thread_bn =
new BayesNet< GUM_SCALAR >(this->
credalNet_->current_bn());
187 auto threadedExec = [
this](
const std::size_t this_thread,
const std::size_t nb_threads) {
204 auto inference_engine =
new BNInferenceEngine(this->workingSet_[this_thread],
206 inference_engine->setNumberOfThreads(1);
219 template <
typename GUM_SCALAR,
class BNInferenceEngine >
221 std::vector< bool >& toFill,
222 const Idx value)
const {
224 auto tfsize = toFill.size();
227 for (
decltype(tfsize) i = 0; i < tfsize; i++) {
233 template <
typename GUM_SCALAR,
class BNInferenceEngine >
237 auto& random_generator = this->
generators_[this_thread];
238 const auto cpt = &this->
credalNet_->credalNet_currentCpt();
240 using dBN = std::vector< std::vector< std::vector< bool > > >;
250 for (
const auto& elt: t0) {
252 Tensor< GUM_SCALAR >* tensor(
253 const_cast< Tensor< GUM_SCALAR >*
>(&working_bn->
cpt(elt.first)));
254 std::vector< GUM_SCALAR > var_cpt(tensor->domainSize());
256 Size pconfs =
Size((*cpt)[elt.first].size());
258 for (
Size pconf = 0; pconf < pconfs; pconf++) {
263 for (
Size mod = 0; mod < dSize; mod++) {
264 var_cpt[pconf * dSize + mod] = (*cpt)[elt.first][pconf][choosen_vertex][mod];
268 tensor->fillWith(var_cpt);
270 Size t0esize =
Size(elt.second.size());
272 for (
Size pos = 0; pos < t0esize; pos++) {
275 Tensor< GUM_SCALAR >* tensor2(
276 const_cast< Tensor< GUM_SCALAR >*
>(&working_bn->
cpt(elt.second[pos])));
277 tensor2->fillWith(var_cpt);
281 for (
const auto& elt: t1) {
283 Tensor< GUM_SCALAR >* tensor(
284 const_cast< Tensor< GUM_SCALAR >*
>(&working_bn->
cpt(elt.first)));
285 std::vector< GUM_SCALAR > var_cpt(tensor->domainSize());
287 for (
Size pconf = 0; pconf < (*cpt)[elt.first].size(); pconf++) {
288 Idx choosen_vertex =
randomValue(random_generator, (*cpt)[elt.first][pconf].size());
292 for (
decltype(dSize) mod = 0; mod < dSize; mod++) {
293 var_cpt[pconf * dSize + mod] = (*cpt)[elt.first][pconf][choosen_vertex][mod];
297 tensor->fillWith(var_cpt);
299 auto t1esize = elt.second.size();
301 for (
decltype(t1esize) pos = 0; pos < t1esize; pos++) {
304 Tensor< GUM_SCALAR >* tensor2(
305 const_cast< Tensor< GUM_SCALAR >*
>(&working_bn->
cpt(elt.second[pos])));
306 tensor2->fillWith(var_cpt);
312 for (
auto node: working_bn->
nodes()) {
314 Tensor< GUM_SCALAR >* tensor(
const_cast< Tensor< GUM_SCALAR >*
>(&working_bn->
cpt(node)));
315 std::vector< GUM_SCALAR > var_cpt(tensor->domainSize());
317 auto pConfs = (*cpt)[node].size();
319 for (
decltype(pConfs) pconf = 0; pconf < pConfs; pconf++) {
320 Size nVertices =
Size((*cpt)[node][pconf].size());
325 for (
decltype(dSize) mod = 0; mod < dSize; mod++) {
326 var_cpt[pconf * dSize + mod] = (*cpt)[node][pconf][choosen_vertex][mod];
330 tensor->fillWith(var_cpt);
337 template <
typename GUM_SCALAR,
class BNInferenceEngine >
340 if (this->
evidence_.size() == 0) {
return; }
348 if (evi_list->
size() > 0) {
349 for (
const auto pot: *evi_list)
350 inference_engine->addEvidence(*pot);
355 auto p =
new Tensor< GUM_SCALAR >;
356 (*p) << working_bn->
variable(elt.first);
359 p->fillWith(elt.second);
368 if (evi_list->
size() > 0) {
369 for (
const auto pot: *evi_list)
370 inference_engine->addEvidence(*pot);
Inference by basic sampling algorithm (pure random) of bnet in credal networks.
void updateApproximationScheme(unsigned int incr=1)
void setMaxTime(double timeout) override
void setPeriodSize(Size p) override
void enableEpsilon() override
void disableMinEpsilonRate() override
Size periodSize() const override
void enableMaxTime() override
void initApproximationScheme()
bool continueApproximationScheme(double error)
void disableMaxIter() override
void setEpsilon(double eps) override
const NodeGraphPart & nodes() const final
Returns a constant reference to the dag of this Bayes Net.
virtual Size domainSize() const =0
Base class for all aGrUM's exceptions.
Class representing the minimal interface for Bayesian network with no numerical data.
virtual const DiscreteVariable & variable(NodeId id) const =0
Returns a constant reference over a variable given it's node id.
virtual const Tensor< GUM_SCALAR > & cpt(NodeId varId) const =0
Returns the CPT of a variable.
Class for assigning/browsing values to tuples of discrete variables.
bool end() const
Returns true if the Instantiation reached the end.
void setFirst()
Assign the first values to the tuple of the Instantiation.
Exception: at least one argument passed to a function is not what was expected.
Generic doubly linked lists.
Size size() const noexcept
Returns the number of elements in the list.
Val & insert(const Val &val)
Inserts a new element at the end of the chained list (alias of pushBack).
void makeInference()
Starts the inference.
virtual ~CNMonteCarloSampling()
Destructor.
void _verticesSampling_(Size this_thread)
Thread samples a IBayesNet from the CredalNet.
CNMonteCarloSampling(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
void _binaryRep_(std::vector< bool > &toFill, const Idx value) const
Get the binary representation of a given value.
void _threadInference_(Size this_thread)
Thread performs an inference using BNInferenceEngine.
void _insertEvidence_(Size this_thread)
Insert CredalNet evidence into a thread BNInferenceEngine.
void _mcInitApproximationScheme_()
Initialize approximation Scheme.
void _mcThreadDataCopy_()
Initialize threads data.
void _threadUpdate_(Size this_thread)
Update thread data after a IBayesNet inference.
Class template representing a Credal Network.
cluster t1_
Clusters of nodes used with dynamic networks.
bool storeBNOpt_
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
bool repetitiveInd_
True if using repetitive independence ( dynamic network only ), False otherwise.
bool storeVertices_
True if credal sets vertices are stored, False otherwise.
void dynamicExpectations_()
const CredalNet< GUM_SCALAR > * credalNet_
const CredalNet< GUM_SCALAR > & credalNet() const
cluster t0_
Clusters of nodes used with dynamic networks.
_expes_ l_expectationMax_
_expes_ l_expectationMin_
std::vector< _bnet_ * > workingSet_
std::vector< LazyPropagation< GUM_SCALAR > * > l_inferenceEngine_
_credalSets_ l_marginalSets_
std::vector< VarMod2BNsMap< GUM_SCALAR > * > l_optimalNet_
MultipleInferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
void initThreadsData_(const Size &num_threads, const bool _storeVertices_, const bool _storeBNOpt_)
const GUM_SCALAR computeEpsilon_()
bool updateThread_(Size this_thread, const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
std::vector< List< const Tensor< GUM_SCALAR > * > * > workingSetE_
std::vector< std::mt19937 > generators_
_clusters_ l_clusters_
Threads clusters.
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Size Idx
Type for indexes.
Idx randomValue(const Size max=2)
Returns a random Idx between 0 and max-1 included.
namespace for all credal networks entities
std::vector< std::pair< Idx, Idx > > dispatchRangeToThreads(Idx beg, Idx end, unsigned int nb_threads)
returns a vector equally splitting elements of a range among threads
unsigned int getNumberOfThreads()
returns the max number of threads used by default when entering the next parallel region
static void execute(std::size_t nb_threads, FUNCTION exec_func, ARGS &&... func_args)
executes a function using several threads
static int nbRunningThreadsExecutors()
indicates how many threadExecutors are currently running