49 template <
typename GUM_SCALAR,
class BNInferenceEngine >
56 template <
typename GUM_SCALAR,
class BNInferenceEngine >
61 template <
typename GUM_SCALAR,
class BNInferenceEngine >
63 const Size& num_threads,
64 const bool _storeVertices_,
65 const bool _storeBNOpt_) {
83 if (_storeVertices_) {
109 generator.seed(seed);
114 template <
typename GUM_SCALAR,
class BNInferenceEngine >
118 const std::vector< GUM_SCALAR >& vertex,
119 const bool& elimRedund) {
122 std::string var_name =
workingSet_[tId]->variable(
id).name();
123 auto delim = var_name.find_first_of(
"_");
124 var_name = var_name.substr(0, delim);
126 if (
l_modal_[tId].exists(var_name)) {
130 for (
Size mod = 0; mod < vsize; mod++)
131 exp += vertex[mod] *
l_modal_[tId][var_name][mod];
150 for (
Size mod = 0; mod < vsize; mod++) {
156 std::vector< Size > key(3);
165 if (vertex[mod] > l_marginalMax_[tId][
id][mod]) {
166 l_marginalMax_[tId][id][mod] = vertex[mod];
169 if (_infE_::storeBNOpt_ && !_infE_::evidence_.exists(
id)) {
170 std::vector< Size > key(3);
175 if (l_optimalNet_[tId]->insert(key,
true)) result =
true;
177 }
else if (vertex[mod] == l_marginalMin_[tId][
id][mod]
178 || vertex[mod] == l_marginalMax_[tId][
id][mod]) {
181 if (_infE_::storeBNOpt_ && vertex[mod] == l_marginalMin_[tId][
id][mod]
182 && !_infE_::evidence_.exists(
id)) {
183 std::vector< Size > key(3);
193 std::vector< Size > key(3);
204 if (_infE_::storeVertices_ && !added && newOne) {
205 _updateThreadCredalSets_(tId,
id, vertex, elimRedund);
211 if (_infE_::storeBNOpt_ && result)
return true;
216 template <
typename GUM_SCALAR,
class BNInferenceEngine >
220 const std::vector< GUM_SCALAR >& vertex,
221 const bool& elimRedund) {
227 for (
auto it = nodeCredalSet.cbegin(), itEnd = nodeCredalSet.cend(); it != itEnd; ++it) {
230 for (
Size i = 0; i < dsize; i++) {
231 if (std::fabs(vertex[i] - (*it)[i]) > 1e-6) {
240 if (!eq || nodeCredalSet.size() == 0) {
241 nodeCredalSet.push_back(vertex);
247 if (nodeCredalSet.size() == 1)
return;
252 auto itEnd = std::remove_if(
253 nodeCredalSet.begin(),
255 [&](
const std::vector< GUM_SCALAR >& v) ->
bool {
256 for (auto jt = v.cbegin(),
258 minIt = l_marginalMin_[tId][id].cbegin(),
259 minItEnd = l_marginalMin_[tId][id].cend(),
260 maxIt = l_marginalMax_[tId][id].cbegin(),
261 maxItEnd = l_marginalMax_[tId][id].cend();
262 jt != jtEnd && minIt != minItEnd && maxIt != maxItEnd;
263 ++jt, ++minIt, ++maxIt) {
264 if ((std::fabs(*jt - *minIt) < 1e-6 || std::fabs(*jt - *maxIt) < 1e-6)
265 && std::fabs(*minIt - *maxIt) > 1e-6)
271 nodeCredalSet.erase(itEnd, nodeCredalSet.end());
274 if (!elimRedund || nodeCredalSet.size() <= 2)
return;
279 Size setSize =
Size(nodeCredalSet.size());
282 lrsWrapper.
setUpV(dsize, setSize);
284 for (
const auto& vtx: nodeCredalSet)
285 lrsWrapper.
fillV(vtx);
289 l_marginalSets_[tId][id] = lrsWrapper.
getOutput();
292 template <
typename GUM_SCALAR,
class BNInferenceEngine >
300 auto threadedExec = [
this](
const std::size_t this_thread,
301 const std::size_t nb_threads,
302 const std::vector< std::pair< Idx, Idx > >& ranges) {
306 const auto end_i = this->
threadRanges_[this_thread + 1].first;
308 const auto marginalMax_size = this->
marginalMax_.size();
311 while ((i < end_i) || (j < end_j)) {
313 for (
Idx tId = 0; tId < tsize; tId++) {
321 if (++j == domain_size) {
324 if (i < marginalMax_size) domain_size = this->
marginalMax_[i].size();
334 ? std::vector< std::pair< NodeId, Idx > >{{0, 0}, {this->
marginalMin_.size(), 0}}
367 template <
typename GUM_SCALAR,
class BNInferenceEngine >
368 inline const GUM_SCALAR
375 std::vector< GUM_SCALAR > tEps(nb_threads, 0);
378 auto threadedExec = [
this, &tEps](
const std::size_t this_thread,
379 const std::size_t nb_threads,
380 const std::vector< std::pair< Idx, Idx > >& ranges) {
381 auto& this_tEps = tEps[this_thread];
382 GUM_SCALAR delta = 0;
387 const auto end_i = this->
threadRanges_[this_thread + 1].first;
389 const auto marginalMax_size = this->
marginalMax_.size();
391 while ((i < end_i) || (j < end_j)) {
394 delta = (delta < 0) ? (-delta) : delta;
395 if (this_tEps < delta) this_tEps = delta;
399 delta = (delta < 0) ? (-delta) : delta;
400 if (this_tEps < delta) this_tEps = delta;
405 if (++j == domain_size) {
408 if (i < marginalMax_size) domain_size = this->
marginalMax_[i].size();
418 ? std::vector< std::pair< NodeId, Idx > >{{0, 0}, {this->
marginalMin_.size(), 0}}
422 GUM_SCALAR eps = tEps[0];
423 for (
const auto nb: tEps)
424 if (eps < nb) eps = nb;
472 template <
typename GUM_SCALAR,
class BNInferenceEngine >
481 for (
long i = 0; i < nsize; i++) {
484 for (
Size j = 0; j < dSize; j++) {
488 for (
Size tId = 0; tId < tsize; tId++) {
500 template <
typename GUM_SCALAR,
class BNInferenceEngine >
512 auto threadedExec = [
this, tsize](
const std::size_t this_thread,
513 const std::size_t nb_threads,
515 const std::vector< std::pair< Idx, Idx > >& ranges) {
516 for (
Idx i = ranges[this_thread].first, end = ranges[this_thread].second; i < end; i++) {
518 for (
Size tId = 0; tId < tsize; ++tId) {
522 for (
const auto& vtx: nodeThreadCredalSet) {
534 for (
Idx work_index = 0; work_index < working_size; ++work_index) {
536 const auto nsize =
workingSet_[work_index]->size();
537 const auto real_nb_threads = std::min(nb_threads, nsize);
575 template <
typename GUM_SCALAR,
class BNInferenceEngine >
578 if (this->
modal_.empty())
return;
588 auto threadedExec = [
this](
const std::size_t this_thread,
589 const std::size_t nb_threads,
591 const std::vector< std::pair< Idx, Idx > >& ranges) {
592 for (
Idx i = ranges[this_thread].first, end = ranges[this_thread].second; i < end; i++) {
593 std::string var_name =
workingSet_[work_index]->variable(i).name();
594 auto delim = var_name.find_first_of(
"_");
595 var_name = var_name.substr(0, delim);
597 if (!
l_modal_[work_index].exists(var_name))
continue;
603 for (
Size mod = 0; mod < vsize; mod++)
604 exp += vertex[mod] *
l_modal_[work_index][var_name][mod];
614 for (
Idx work_index = 0; work_index < working_size; ++work_index) {
615 if (!this->
l_modal_[work_index].empty()) {
617 const auto nsize =
workingSet_[work_index]->size();
618 const auto real_nb_threads = std::min(nb_threads, nsize);
629 auto threadedExec = [
this](
const std::size_t this_thread,
630 const std::size_t nb_threads,
632 const std::vector< std::pair< Idx, Idx > >& ranges) {
633 for (
Idx i = ranges[this_thread].first, end = ranges[this_thread].second; i < end; i++) {
634 std::string var_name =
workingSet_[work_index]->variable(i).name();
635 auto delim = var_name.find_first_of(
"_");
636 var_name = var_name.substr(0, delim);
638 if (!
l_modal_[work_index].exists(var_name))
continue;
642 for (
Idx tId = 0; tId < tsize; tId++) {
653 for (
Idx work_index = 0; work_index < working_size; ++work_index) {
654 if (!this->
l_modal_[work_index].empty()) {
656 const auto real_nb_threads = std::min(nb_threads, nsize);
734 template <
typename GUM_SCALAR,
class BNInferenceEngine >
736 using dBN = std::vector< bool >;
741 for (
Idx i = 0; i < nsize; i++) {
747 for (
Size j = 0; j < dSize; j++) {
749 std::vector< Size > keymin(3);
753 std::vector< Size > keymax(keymin);
758 for (
Size tId = 0; tId < tsize; tId++) {
760 const std::vector< dBN* >& tOpts =
l_optimalNet_[tId]->getBNOptsFromKey(keymin);
763 for (
Size bn = 0; bn < osize; bn++) {
769 const std::vector< dBN* >& tOpts =
l_optimalNet_[tId]->getBNOptsFromKey(keymax);
772 for (
Size bn = 0; bn < osize; bn++) {
781 template <
typename GUM_SCALAR,
class BNInferenceEngine >
787 for (
Size bn = 0; bn < tsize; bn++) {
virtual Size getNumberOfThreads() const
returns the current max number of threads used by the class containing this ThreadNumberManager
Class template representing a Credal Network.
margi oldMarginalMax_
Old upper marginals used to compute epsilon.
margi evidence_
Holds observed variables states.
bool storeBNOpt_
Iterations limit stopping rule used by some algorithms such as CNMonteCarloSampling.
margi marginalMax_
Upper marginals.
void updateCredalSets_(const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Given a node id and one of it's possible vertex, update it's credal set.
InferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Construtor.
margi oldMarginalMin_
Old lower marginals used to compute epsilon.
bool storeVertices_
True if credal sets vertices are stored, False otherwise.
virtual void eraseAllEvidence()
removes all the evidence entered into the network
expe expectationMax_
Upper expectations, if some variables modalities were inserted.
credalSet marginalSets_
Credal sets vertices, if enabled.
const CredalNet< GUM_SCALAR > & credalNet() const
Get this creadal network.
margi marginalMin_
Lower marginals.
dynExpe modal_
Variables modalities used to compute expectations.
expe expectationMin_
Lower expectations, if some variables modalities were inserted.
std::vector< std::pair< NodeId, Idx > > threadRanges_
the ranges of elements of marginalMin_ and marginalMax_ processed by each thread
VarMod2BNsMap< GUM_SCALAR > dbnOpt_
Object used to efficiently store optimal bayes net during inference, for some algorithms.
Class template acting as a wrapper for Lexicographic Reverse Search by David Avis.
const matrix & getOutput() const
Get the output matrix solution of the problem.
void setUpV(const Size &card, const Size &vertices)
Sets up a V-representation.
void elimRedundVrep()
V-Redundancy elimination.
void fillV(const std::vector< GUM_SCALAR > &vertex)
Creates the V-representation of a polytope by adding a vertex to the problem input _input_.
void updateMarginals_()
Fusion of threads marginals.
_margis_ l_marginalMin_
Threads lower marginals, one per thread.
_expes_ l_expectationMax_
Threads upper expectations, one per thread.
_expes_ l_expectationMin_
Threads lower expectations, one per thread.
std::vector< _bnet_ * > workingSet_
Threads IBayesNet.
std::vector< BNInferenceEngine * > l_inferenceEngine_
Threads BNInferenceEngine.
_credalSets_ l_marginalSets_
Threads vertices.
void optFusion_()
Fusion of threads optimal IBayesNet.
_modals_ l_modal_
Threads modalities.
std::vector< VarMod2BNsMap< GUM_SCALAR > * > l_optimalNet_
Threads optimal IBayesNet.
void updateOldMarginals_()
Update old marginals (from current marginals).
_margis_ l_marginalMax_
Threads upper marginals, one per thread.
MultipleInferenceEngine(const CredalNet< GUM_SCALAR > &credalNet)
Constructor.
void expFusion_()
Fusion of threads expectations.
void initThreadsData_(const Size &num_threads, const bool _storeVertices_, const bool _storeBNOpt_)
Initialize threads data.
virtual ~MultipleInferenceEngine()
Destructor.
const GUM_SCALAR computeEpsilon_()
Compute epsilon and update old marginals.
virtual void eraseAllEvidence()
Erase all inference related data to perform another one.
bool updateThread_(Size this_thread, const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund=false)
Update thread information (marginals, expectations, IBayesNet, vertices) for a given node id.
std::vector< List< const Tensor< GUM_SCALAR > * > * > workingSetE_
Threads evidence.
std::vector< std::mt19937 > generators_
the generators used for computing random values
_margis_ l_evidence_
Threads evidence.
void _updateThreadCredalSets_(Size this_thread, const NodeId &id, const std::vector< GUM_SCALAR > &vertex, const bool &elimRedund)
Ask for redundancy elimination of a node credal set of a calling thread.
_clusters_ l_clusters_
Threads clusters.
unsigned int getThreadNumber()
Get the calling thread id.
std::size_t Size
In aGrUM, hashed values are unsigned long int.
Size Idx
Type for indexes.
Size NodeId
Type for node ids.
unsigned int currentRandomGeneratorValue()
returns the current generator's value
Abstract class representing CredalNet inference engines.
Abstract class representing CredalNet inference engines.
namespace for all credal networks entities
gum is the global namespace for all aGrUM entities
std::vector< std::pair< Idx, Idx > > dispatchRangeToThreads(Idx beg, Idx end, unsigned int nb_threads)
returns a vector equally splitting elements of a range among threads
static void execute(std::size_t nb_threads, FUNCTION exec_func, ARGS &&... func_args)
executes a function using several threads
static int nbRunningThreadsExecutors()
indicates how many threadExecutors are currently running