pyAgrum-nightly 2.3.1.9.dev202512261765915415__cp310-abi3-macosx_10_15_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyagrum/__init__.py +165 -0
- pyagrum/_pyagrum.so +0 -0
- pyagrum/bnmixture/BNMInference.py +268 -0
- pyagrum/bnmixture/BNMLearning.py +376 -0
- pyagrum/bnmixture/BNMixture.py +464 -0
- pyagrum/bnmixture/__init__.py +60 -0
- pyagrum/bnmixture/notebook.py +1058 -0
- pyagrum/causal/_CausalFormula.py +280 -0
- pyagrum/causal/_CausalModel.py +436 -0
- pyagrum/causal/__init__.py +81 -0
- pyagrum/causal/_causalImpact.py +356 -0
- pyagrum/causal/_dSeparation.py +598 -0
- pyagrum/causal/_doAST.py +761 -0
- pyagrum/causal/_doCalculus.py +361 -0
- pyagrum/causal/_doorCriteria.py +374 -0
- pyagrum/causal/_exceptions.py +95 -0
- pyagrum/causal/_types.py +61 -0
- pyagrum/causal/causalEffectEstimation/_CausalEffectEstimation.py +1175 -0
- pyagrum/causal/causalEffectEstimation/_IVEstimators.py +718 -0
- pyagrum/causal/causalEffectEstimation/_RCTEstimators.py +132 -0
- pyagrum/causal/causalEffectEstimation/__init__.py +46 -0
- pyagrum/causal/causalEffectEstimation/_backdoorEstimators.py +774 -0
- pyagrum/causal/causalEffectEstimation/_causalBNEstimator.py +324 -0
- pyagrum/causal/causalEffectEstimation/_frontdoorEstimators.py +396 -0
- pyagrum/causal/causalEffectEstimation/_learners.py +118 -0
- pyagrum/causal/causalEffectEstimation/_utils.py +466 -0
- pyagrum/causal/notebook.py +172 -0
- pyagrum/clg/CLG.py +658 -0
- pyagrum/clg/GaussianVariable.py +111 -0
- pyagrum/clg/SEM.py +312 -0
- pyagrum/clg/__init__.py +63 -0
- pyagrum/clg/canonicalForm.py +408 -0
- pyagrum/clg/constants.py +54 -0
- pyagrum/clg/forwardSampling.py +202 -0
- pyagrum/clg/learning.py +776 -0
- pyagrum/clg/notebook.py +480 -0
- pyagrum/clg/variableElimination.py +271 -0
- pyagrum/common.py +60 -0
- pyagrum/config.py +319 -0
- pyagrum/ctbn/CIM.py +513 -0
- pyagrum/ctbn/CTBN.py +573 -0
- pyagrum/ctbn/CTBNGenerator.py +216 -0
- pyagrum/ctbn/CTBNInference.py +459 -0
- pyagrum/ctbn/CTBNLearner.py +161 -0
- pyagrum/ctbn/SamplesStats.py +671 -0
- pyagrum/ctbn/StatsIndepTest.py +355 -0
- pyagrum/ctbn/__init__.py +79 -0
- pyagrum/ctbn/constants.py +54 -0
- pyagrum/ctbn/notebook.py +264 -0
- pyagrum/defaults.ini +199 -0
- pyagrum/deprecated.py +95 -0
- pyagrum/explain/_ComputationCausal.py +75 -0
- pyagrum/explain/_ComputationConditional.py +48 -0
- pyagrum/explain/_ComputationMarginal.py +48 -0
- pyagrum/explain/_CustomShapleyCache.py +110 -0
- pyagrum/explain/_Explainer.py +176 -0
- pyagrum/explain/_Explanation.py +70 -0
- pyagrum/explain/_FIFOCache.py +54 -0
- pyagrum/explain/_ShallCausalValues.py +204 -0
- pyagrum/explain/_ShallConditionalValues.py +155 -0
- pyagrum/explain/_ShallMarginalValues.py +155 -0
- pyagrum/explain/_ShallValues.py +296 -0
- pyagrum/explain/_ShapCausalValues.py +208 -0
- pyagrum/explain/_ShapConditionalValues.py +126 -0
- pyagrum/explain/_ShapMarginalValues.py +191 -0
- pyagrum/explain/_ShapleyValues.py +298 -0
- pyagrum/explain/__init__.py +81 -0
- pyagrum/explain/_explGeneralizedMarkovBlanket.py +152 -0
- pyagrum/explain/_explIndependenceListForPairs.py +146 -0
- pyagrum/explain/_explInformationGraph.py +264 -0
- pyagrum/explain/notebook/__init__.py +54 -0
- pyagrum/explain/notebook/_bar.py +142 -0
- pyagrum/explain/notebook/_beeswarm.py +174 -0
- pyagrum/explain/notebook/_showShapValues.py +97 -0
- pyagrum/explain/notebook/_waterfall.py +220 -0
- pyagrum/explain/shapley.py +225 -0
- pyagrum/lib/__init__.py +46 -0
- pyagrum/lib/_colors.py +390 -0
- pyagrum/lib/bn2graph.py +299 -0
- pyagrum/lib/bn2roc.py +1026 -0
- pyagrum/lib/bn2scores.py +217 -0
- pyagrum/lib/bn_vs_bn.py +605 -0
- pyagrum/lib/cn2graph.py +305 -0
- pyagrum/lib/discreteTypeProcessor.py +1102 -0
- pyagrum/lib/discretizer.py +58 -0
- pyagrum/lib/dynamicBN.py +390 -0
- pyagrum/lib/explain.py +57 -0
- pyagrum/lib/export.py +84 -0
- pyagrum/lib/id2graph.py +258 -0
- pyagrum/lib/image.py +387 -0
- pyagrum/lib/ipython.py +307 -0
- pyagrum/lib/mrf2graph.py +471 -0
- pyagrum/lib/notebook.py +1821 -0
- pyagrum/lib/proba_histogram.py +552 -0
- pyagrum/lib/utils.py +138 -0
- pyagrum/pyagrum.py +31495 -0
- pyagrum/skbn/_MBCalcul.py +242 -0
- pyagrum/skbn/__init__.py +49 -0
- pyagrum/skbn/_learningMethods.py +282 -0
- pyagrum/skbn/_utils.py +297 -0
- pyagrum/skbn/bnclassifier.py +1014 -0
- pyagrum_nightly-2.3.1.9.dev202512261765915415.dist-info/LICENSE.md +12 -0
- pyagrum_nightly-2.3.1.9.dev202512261765915415.dist-info/LICENSES/LGPL-3.0-or-later.txt +304 -0
- pyagrum_nightly-2.3.1.9.dev202512261765915415.dist-info/LICENSES/MIT.txt +18 -0
- pyagrum_nightly-2.3.1.9.dev202512261765915415.dist-info/METADATA +145 -0
- pyagrum_nightly-2.3.1.9.dev202512261765915415.dist-info/RECORD +107 -0
- pyagrum_nightly-2.3.1.9.dev202512261765915415.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
############################################################################
|
|
2
|
+
# This file is part of the aGrUM/pyAgrum library. #
|
|
3
|
+
# #
|
|
4
|
+
# Copyright (c) 2005-2025 by #
|
|
5
|
+
# - Pierre-Henri WUILLEMIN(_at_LIP6) #
|
|
6
|
+
# - Christophe GONZALES(_at_AMU) #
|
|
7
|
+
# #
|
|
8
|
+
# The aGrUM/pyAgrum library is free software; you can redistribute it #
|
|
9
|
+
# and/or modify it under the terms of either : #
|
|
10
|
+
# #
|
|
11
|
+
# - the GNU Lesser General Public License as published by #
|
|
12
|
+
# the Free Software Foundation, either version 3 of the License, #
|
|
13
|
+
# or (at your option) any later version, #
|
|
14
|
+
# - the MIT license (MIT), #
|
|
15
|
+
# - or both in dual license, as here. #
|
|
16
|
+
# #
|
|
17
|
+
# (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) #
|
|
18
|
+
# #
|
|
19
|
+
# This aGrUM/pyAgrum library is distributed in the hope that it will be #
|
|
20
|
+
# useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
|
|
21
|
+
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS #
|
|
22
|
+
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
|
|
23
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
|
|
24
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, #
|
|
25
|
+
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #
|
|
26
|
+
# OTHER DEALINGS IN THE SOFTWARE. #
|
|
27
|
+
# #
|
|
28
|
+
# See LICENCES for more details. #
|
|
29
|
+
# #
|
|
30
|
+
# SPDX-FileCopyrightText: Copyright 2005-2025 #
|
|
31
|
+
# - Pierre-Henri WUILLEMIN(_at_LIP6) #
|
|
32
|
+
# - Christophe GONZALES(_at_AMU) #
|
|
33
|
+
# SPDX-License-Identifier: LGPL-3.0-or-later OR MIT #
|
|
34
|
+
# #
|
|
35
|
+
# Contact : info_at_agrum_dot_org #
|
|
36
|
+
# homepage : http://agrum.gitlab.io #
|
|
37
|
+
# gitlab : https://gitlab.com/agrumery/agrum #
|
|
38
|
+
# #
|
|
39
|
+
############################################################################
|
|
40
|
+
|
|
41
|
+
from typing import Tuple
|
|
42
|
+
import pyagrum
|
|
43
|
+
import random
|
|
44
|
+
|
|
45
|
+
from pyagrum.ctbn import CIM
|
|
46
|
+
from pyagrum.ctbn import CTBN
|
|
47
|
+
from pyagrum.ctbn.constants import NodeId
|
|
48
|
+
|
|
49
|
+
"""
|
|
50
|
+
This file is used to generate random CTBNs using Prufer code
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def directTree(graph: pyagrum.MixedGraph, root: NodeId):
|
|
55
|
+
"""
|
|
56
|
+
Direct a undirected tree.
|
|
57
|
+
|
|
58
|
+
Parameters
|
|
59
|
+
----------
|
|
60
|
+
graph : pyagrum.MixedGraph
|
|
61
|
+
A graph that contains directed/undirected arcs.
|
|
62
|
+
root : NodeId
|
|
63
|
+
Root of the tree to direct.
|
|
64
|
+
"""
|
|
65
|
+
for i in graph.neighbours(root):
|
|
66
|
+
graph.eraseEdge(i, root)
|
|
67
|
+
graph.addArc(root, i)
|
|
68
|
+
# CTBN.addArc(root, i)
|
|
69
|
+
directTree(graph, i)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def randomCIMs(ctbn: CTBN, valueRange: Tuple[float, float]):
|
|
73
|
+
"""
|
|
74
|
+
Fills the ``ctbn``'s CIMs with random values in ``valueRange``.
|
|
75
|
+
Note : as diagonal coefficents are the negative sum of the coefficients one the line,
|
|
76
|
+
their value does not necessarily belong to ``valueRange``.
|
|
77
|
+
|
|
78
|
+
Parameters
|
|
79
|
+
----------
|
|
80
|
+
ctbn : CTBN
|
|
81
|
+
The ctbn to fill.
|
|
82
|
+
valueRange : Tuple[float, float]
|
|
83
|
+
Range to choose values from when filling the cims.
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
for v in ctbn.names():
|
|
87
|
+
v_i = ctbn.CIM(v).findVar(CIM.varI(v))
|
|
88
|
+
v_j = ctbn.CIM(v).findVar(CIM.varJ(v))
|
|
89
|
+
|
|
90
|
+
I1 = pyagrum.Instantiation(ctbn.CIM(v)._pot)
|
|
91
|
+
I2 = pyagrum.Instantiation()
|
|
92
|
+
ind = I1.pos(v_j)
|
|
93
|
+
I2.add(I1.variable(ind))
|
|
94
|
+
|
|
95
|
+
I1.setFirst()
|
|
96
|
+
while not I1.end():
|
|
97
|
+
I2.setFirst()
|
|
98
|
+
sumCIM = 0
|
|
99
|
+
while not I2.end():
|
|
100
|
+
I1.setVals(I2)
|
|
101
|
+
if I1.val(v_i) != I1.val(v_j):
|
|
102
|
+
draw = random.uniform(valueRange[0], valueRange[1])
|
|
103
|
+
sumCIM += draw
|
|
104
|
+
ctbn.CIM(v)._pot.set(I1, draw)
|
|
105
|
+
else:
|
|
106
|
+
ctbn.CIM(v)._pot.set(I1, 0)
|
|
107
|
+
|
|
108
|
+
I2.inc()
|
|
109
|
+
|
|
110
|
+
tmp = pyagrum.Instantiation(I1)
|
|
111
|
+
tmp.chgVal(v_j, tmp.val(v_i))
|
|
112
|
+
ctbn.CIM(v)._pot.set(tmp, -sumCIM)
|
|
113
|
+
I1.incOut(I2)
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def randomCTBN(valueRange: Tuple[float, float], n: int = 1, parMax: int = 1, modal: int = 2) -> CTBN:
|
|
117
|
+
"""
|
|
118
|
+
Generates a random CTBN using Prufer's sequence.
|
|
119
|
+
Note : as diagonal coefficents are the negative sum of the coefficients one the line in a CIM,
|
|
120
|
+
their value does not necessarily belong to ``valueRange``.
|
|
121
|
+
|
|
122
|
+
Parameters
|
|
123
|
+
----------
|
|
124
|
+
valueRange : Tuple[float, float]
|
|
125
|
+
Range to choose values from when filling the CIMs.
|
|
126
|
+
n : int
|
|
127
|
+
Number of variables.
|
|
128
|
+
parMax : int
|
|
129
|
+
Maximum number of parents a variable can have.
|
|
130
|
+
modal : int
|
|
131
|
+
Number of states a variable has (domain size).
|
|
132
|
+
|
|
133
|
+
Returns
|
|
134
|
+
-------
|
|
135
|
+
CTBN
|
|
136
|
+
A randomly generated ctbn.
|
|
137
|
+
"""
|
|
138
|
+
ctbn = CTBN()
|
|
139
|
+
|
|
140
|
+
graph = pyagrum.MixedGraph()
|
|
141
|
+
|
|
142
|
+
for i in range(n):
|
|
143
|
+
name = f"V{i + 1}"
|
|
144
|
+
labels = [f"v{i + 1}_{t}" for t in range(1, modal + 1)]
|
|
145
|
+
var = pyagrum.LabelizedVariable(name, name, labels)
|
|
146
|
+
ctbn.add(var)
|
|
147
|
+
graph.addNode()
|
|
148
|
+
|
|
149
|
+
# generating Prufer's sequence and associated tree
|
|
150
|
+
if n == 2:
|
|
151
|
+
if random.random() > 0.5:
|
|
152
|
+
graph.addArc(0, 1)
|
|
153
|
+
else:
|
|
154
|
+
graph.addArc(1, 0)
|
|
155
|
+
|
|
156
|
+
elif n > 2:
|
|
157
|
+
sequence = [random.randint(0, n - 1) for _ in range(n - 2)]
|
|
158
|
+
degree = {id: 1 for id in range(0, n)}
|
|
159
|
+
par = {id: 0 for id in range(0, n)}
|
|
160
|
+
for i in range(n - 2):
|
|
161
|
+
degree[sequence[i]] += 1
|
|
162
|
+
|
|
163
|
+
for i in range(n - 2):
|
|
164
|
+
for j in range(n):
|
|
165
|
+
if degree[j] == 1:
|
|
166
|
+
graph.addEdge(j, sequence[i])
|
|
167
|
+
par[j] += 1
|
|
168
|
+
par[sequence[i]] += 1
|
|
169
|
+
degree[sequence[i]] -= 1
|
|
170
|
+
degree[j] -= 1
|
|
171
|
+
break
|
|
172
|
+
|
|
173
|
+
u = 0
|
|
174
|
+
v = 0
|
|
175
|
+
for i in range(n):
|
|
176
|
+
if degree[i] == 1:
|
|
177
|
+
if u == 0:
|
|
178
|
+
u = i
|
|
179
|
+
else:
|
|
180
|
+
v = i
|
|
181
|
+
break
|
|
182
|
+
|
|
183
|
+
graph.addEdge(u, v)
|
|
184
|
+
|
|
185
|
+
# direct it
|
|
186
|
+
directTree(graph, random.randint(0, n - 1))
|
|
187
|
+
|
|
188
|
+
# add/remove/invert arcs at random
|
|
189
|
+
|
|
190
|
+
for i in range(n * 3):
|
|
191
|
+
event = random.sample(range(0, n), k=2)
|
|
192
|
+
draw = random.randint(1, 3)
|
|
193
|
+
|
|
194
|
+
if draw == 1:
|
|
195
|
+
if len(graph.parents(event[1])) < parMax:
|
|
196
|
+
graph.addArc(event[0], event[1])
|
|
197
|
+
|
|
198
|
+
elif draw == 2:
|
|
199
|
+
if graph.existsArc(event[0], event[1]):
|
|
200
|
+
graph.eraseArc(event[0], event[1])
|
|
201
|
+
if len(graph.connectedComponents()) > 1:
|
|
202
|
+
graph.addArc(event[0], event[1])
|
|
203
|
+
|
|
204
|
+
else:
|
|
205
|
+
if graph.existsArc(event[0], event[1]):
|
|
206
|
+
if len(graph.parents(event[0])) < parMax:
|
|
207
|
+
graph.eraseArc(event[0], event[1])
|
|
208
|
+
graph.addArc(event[1], event[0])
|
|
209
|
+
|
|
210
|
+
# add arcs to the CTBN
|
|
211
|
+
for arc in graph.arcs():
|
|
212
|
+
ctbn.addArc(arc[0], arc[1])
|
|
213
|
+
|
|
214
|
+
randomCIMs(ctbn, valueRange)
|
|
215
|
+
|
|
216
|
+
return ctbn
|
|
@@ -0,0 +1,459 @@
|
|
|
1
|
+
############################################################################
|
|
2
|
+
# This file is part of the aGrUM/pyAgrum library. #
|
|
3
|
+
# #
|
|
4
|
+
# Copyright (c) 2005-2025 by #
|
|
5
|
+
# - Pierre-Henri WUILLEMIN(_at_LIP6) #
|
|
6
|
+
# - Christophe GONZALES(_at_AMU) #
|
|
7
|
+
# #
|
|
8
|
+
# The aGrUM/pyAgrum library is free software; you can redistribute it #
|
|
9
|
+
# and/or modify it under the terms of either : #
|
|
10
|
+
# #
|
|
11
|
+
# - the GNU Lesser General Public License as published by #
|
|
12
|
+
# the Free Software Foundation, either version 3 of the License, #
|
|
13
|
+
# or (at your option) any later version, #
|
|
14
|
+
# - the MIT license (MIT), #
|
|
15
|
+
# - or both in dual license, as here. #
|
|
16
|
+
# #
|
|
17
|
+
# (see https://agrum.gitlab.io/articles/dual-licenses-lgplv3mit.html) #
|
|
18
|
+
# #
|
|
19
|
+
# This aGrUM/pyAgrum library is distributed in the hope that it will be #
|
|
20
|
+
# useful, but WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
|
|
21
|
+
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES MERCHANTABILITY or FITNESS #
|
|
22
|
+
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
|
|
23
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
|
|
24
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, #
|
|
25
|
+
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #
|
|
26
|
+
# OTHER DEALINGS IN THE SOFTWARE. #
|
|
27
|
+
# #
|
|
28
|
+
# See LICENCES for more details. #
|
|
29
|
+
# #
|
|
30
|
+
# SPDX-FileCopyrightText: Copyright 2005-2025 #
|
|
31
|
+
# - Pierre-Henri WUILLEMIN(_at_LIP6) #
|
|
32
|
+
# - Christophe GONZALES(_at_AMU) #
|
|
33
|
+
# SPDX-License-Identifier: LGPL-3.0-or-later OR MIT #
|
|
34
|
+
# #
|
|
35
|
+
# Contact : info_at_agrum_dot_org #
|
|
36
|
+
# homepage : http://agrum.gitlab.io #
|
|
37
|
+
# gitlab : https://gitlab.com/agrumery/agrum #
|
|
38
|
+
# #
|
|
39
|
+
############################################################################
|
|
40
|
+
|
|
41
|
+
import csv
|
|
42
|
+
from concurrent.futures import ProcessPoolExecutor
|
|
43
|
+
from typing import Dict, Tuple
|
|
44
|
+
from numpy.random import default_rng, choice
|
|
45
|
+
from scipy.linalg import expm
|
|
46
|
+
|
|
47
|
+
import pyagrum
|
|
48
|
+
|
|
49
|
+
from pyagrum.ctbn import CIM
|
|
50
|
+
from pyagrum.ctbn import CTBN
|
|
51
|
+
from pyagrum.ctbn.constants import NodeId, NameOrId
|
|
52
|
+
|
|
53
|
+
"""
|
|
54
|
+
This file contains inference tools for CTBNs
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class CTBNInference:
|
|
59
|
+
"""
|
|
60
|
+
Class CtbnInference includes tools for inference calculation : exact inference through amalgamation
|
|
61
|
+
and (forward) sampling inference.
|
|
62
|
+
|
|
63
|
+
Parameters
|
|
64
|
+
----------
|
|
65
|
+
model : CTBN
|
|
66
|
+
The CTBN used for inference.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
def __init__(self, model: CTBN):
|
|
70
|
+
self._model = model
|
|
71
|
+
|
|
72
|
+
def makeInference(self):
|
|
73
|
+
raise NotImplementedError("Not yet implemented.")
|
|
74
|
+
|
|
75
|
+
def posterior(self, name: str) -> "pyagrum.Tensor":
|
|
76
|
+
raise NotImplementedError("Not yet implemented.")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class SimpleInference(CTBNInference):
|
|
80
|
+
"""
|
|
81
|
+
Exact inference using amalgamation to compute the Intensity Matrix corresponding to the CTBN
|
|
82
|
+
(not efficient for models with great number of variables)
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
----------
|
|
86
|
+
ctbn : CTBN
|
|
87
|
+
The CTBN used for simple inference.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
def __init__(self, ctbn: CTBN):
|
|
91
|
+
super().__init__(ctbn)
|
|
92
|
+
self._joint = None
|
|
93
|
+
|
|
94
|
+
def setEvidence(self, evs=None):
|
|
95
|
+
raise NotImplementedError("Not yet implemented.")
|
|
96
|
+
|
|
97
|
+
def makeInference(self, t: float = 5000):
|
|
98
|
+
"""
|
|
99
|
+
Compute exact inference at time ``t``. Distribution for initial state is uniform.
|
|
100
|
+
|
|
101
|
+
Parameters
|
|
102
|
+
----------
|
|
103
|
+
t : float
|
|
104
|
+
Time to make inference calculation at.
|
|
105
|
+
"""
|
|
106
|
+
q = CIM()
|
|
107
|
+
# amalgamation
|
|
108
|
+
for nod in self._model.nodes():
|
|
109
|
+
q = q.amalgamate(self._model.CIM(nod))
|
|
110
|
+
|
|
111
|
+
q.fromMatrix(expm(t * q.toMatrix()))
|
|
112
|
+
# initial state distribution
|
|
113
|
+
t0 = pyagrum.Tensor()
|
|
114
|
+
for n in q.varNames:
|
|
115
|
+
if n[-1] == "i":
|
|
116
|
+
t0.add(q._pot.variable(n))
|
|
117
|
+
t0.fillWith(1).normalize()
|
|
118
|
+
|
|
119
|
+
self._joint = (t0 * q._pot).sumOut(list(t0.names))
|
|
120
|
+
|
|
121
|
+
def posterior(self, v: NameOrId) -> "pyagrum.Tensor":
|
|
122
|
+
"""
|
|
123
|
+
Parameters
|
|
124
|
+
----------
|
|
125
|
+
v : NameOrId
|
|
126
|
+
Name or id of the variable.
|
|
127
|
+
|
|
128
|
+
Returns
|
|
129
|
+
-------
|
|
130
|
+
pyagrum.Tensor
|
|
131
|
+
The computed distribution of variable ``v`` using exact inference.
|
|
132
|
+
"""
|
|
133
|
+
vj = CIM.varJ(self._model.variable(v).name())
|
|
134
|
+
return pyagrum.Tensor().add(self._model.variable(v)).fillWith(self._joint.sumIn(vj), [vj])
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class ForwardSamplingInference(CTBNInference):
|
|
138
|
+
"""
|
|
139
|
+
Inference using forward sampling (slow convergence).
|
|
140
|
+
|
|
141
|
+
Notes
|
|
142
|
+
-----
|
|
143
|
+
Sometimes samples are called trajectories. One sample is one trajectory.
|
|
144
|
+
When making inference, the last sample is stored in the class as a trajectory.
|
|
145
|
+
idtraj indicates the number of samplings done.
|
|
146
|
+
|
|
147
|
+
Parameters
|
|
148
|
+
----------
|
|
149
|
+
ctbn : CTBN
|
|
150
|
+
The CTBN used for sampling.
|
|
151
|
+
|
|
152
|
+
Attributes
|
|
153
|
+
----------
|
|
154
|
+
idtraj : int
|
|
155
|
+
Id of the sample.
|
|
156
|
+
trajectory : List[Tuple[float, str, str]]
|
|
157
|
+
Contains the trajectory from the last sampling.
|
|
158
|
+
|
|
159
|
+
Examples
|
|
160
|
+
--------
|
|
161
|
+
How to read a trajectory?
|
|
162
|
+
A tuple ``(t, var, s)`` means that at time ``t`` the variable ``var`` transition from state ``s`` to another one.
|
|
163
|
+
|
|
164
|
+
When storing many trajectories from different samples, a trajectory is identified by an id.
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
def __init__(self, ctbn: CTBN):
|
|
168
|
+
self.trajectory = list()
|
|
169
|
+
self.idtraj = 0
|
|
170
|
+
super().__init__(ctbn)
|
|
171
|
+
self._posteriors = {nod: pyagrum.Tensor().add(self._model.variable(nod)) for nod in self._model.names()}
|
|
172
|
+
|
|
173
|
+
def makeSample(self, posteriors: Dict[str, pyagrum.Tensor], timeHorizon: float = 5000, burnIn: int = 100) -> int:
|
|
174
|
+
"""
|
|
175
|
+
Fills posteriors using forward sampling.
|
|
176
|
+
|
|
177
|
+
Parameters
|
|
178
|
+
----------
|
|
179
|
+
posteriors : Dict[str, pyagrum.Tensor]
|
|
180
|
+
A dict containing a posterior for each variable of the CTBN.
|
|
181
|
+
timeHorizon : float
|
|
182
|
+
Duration of the sampling.
|
|
183
|
+
burnIn : int
|
|
184
|
+
Number of runs before starting the sampling (to ensure ergodicity).
|
|
185
|
+
|
|
186
|
+
Returns
|
|
187
|
+
-------
|
|
188
|
+
int
|
|
189
|
+
Number of runs.
|
|
190
|
+
"""
|
|
191
|
+
|
|
192
|
+
def init(current):
|
|
193
|
+
"""
|
|
194
|
+
Initializes posteriors and draw the initial values of each variable using uniform distribution.
|
|
195
|
+
|
|
196
|
+
Parameters
|
|
197
|
+
----------
|
|
198
|
+
current : pyagrum.Instantiation
|
|
199
|
+
An instance of the variables of the CTBN.
|
|
200
|
+
"""
|
|
201
|
+
for nam in self._model.names():
|
|
202
|
+
posteriors[nam].fillWith(0)
|
|
203
|
+
|
|
204
|
+
# initial uniform distribution for each variable
|
|
205
|
+
for nod in self._model.nodes():
|
|
206
|
+
v = self._model.variable(nod)
|
|
207
|
+
newval = int(choice(range(v.domainSize())))
|
|
208
|
+
current.chgVal(v.name(), newval)
|
|
209
|
+
posteriors[v.name()][newval] = 1
|
|
210
|
+
|
|
211
|
+
def getNextEvent(current: pyagrum.Instantiation, indice: pyagrum.Instantiation) -> Tuple[NodeId, float]:
|
|
212
|
+
"""
|
|
213
|
+
Chooses the next variable to change value. The variable is chosen by drawing values of all of the variables transition
|
|
214
|
+
time (i.e how much time does a variable stay in the same state). Those durations follow an exponential distribution.
|
|
215
|
+
The variable with the shortest transition time is chosen.
|
|
216
|
+
|
|
217
|
+
Parameters
|
|
218
|
+
----------
|
|
219
|
+
current : pyagrum.Instantiation
|
|
220
|
+
An instance of the variables in the CTBN.
|
|
221
|
+
indice : pyagrum.Instantiation
|
|
222
|
+
An instance of the variables in the CTBN plus their from_state/to_state corresponding variables.
|
|
223
|
+
|
|
224
|
+
Returns
|
|
225
|
+
-------
|
|
226
|
+
Tuple[NodeId, float]
|
|
227
|
+
The nodeId and transition time of the variable with the lowest transition time.
|
|
228
|
+
"""
|
|
229
|
+
indice.setVals(current)
|
|
230
|
+
# compute next variable event
|
|
231
|
+
dt = None
|
|
232
|
+
argmin = None
|
|
233
|
+
for nod in self._model.nodes():
|
|
234
|
+
v = self._model.variable(nod)
|
|
235
|
+
indice.chgVal(CIM.varI(v.name()), indice.val(v))
|
|
236
|
+
indice.chgVal(CIM.varJ(v.name()), indice.val(v)) # just the diagonal
|
|
237
|
+
|
|
238
|
+
v_lambda = -self._model.CIM(nod)[indice]
|
|
239
|
+
if v_lambda != 0:
|
|
240
|
+
d = rand.exponential(1 / v_lambda)
|
|
241
|
+
if dt is None or dt > d:
|
|
242
|
+
dt = d
|
|
243
|
+
argmin = nod
|
|
244
|
+
|
|
245
|
+
return (argmin, dt)
|
|
246
|
+
|
|
247
|
+
def sampleNextState(current: pyagrum.Instantiation, indice: pyagrum.Instantiation, nextEvt: NameOrId) -> str:
|
|
248
|
+
"""
|
|
249
|
+
Draw the next state of the variable nextEvt using uniform distribution over its states.
|
|
250
|
+
``current`` will contain the new value of ``nextEvt``.
|
|
251
|
+
|
|
252
|
+
Parameters
|
|
253
|
+
----------
|
|
254
|
+
current : pyagrum.Instantiation
|
|
255
|
+
An instantiation of the variables in the ctbn to contain the new value of nextEvt.
|
|
256
|
+
indice : pyagrum.Instantiation
|
|
257
|
+
An instance of the variables in the ctbn plus their from_state/to_state corresponding variables.
|
|
258
|
+
nextEvt : NameOrId
|
|
259
|
+
Name of id of the next variable to change state.
|
|
260
|
+
|
|
261
|
+
Returns
|
|
262
|
+
-------
|
|
263
|
+
str
|
|
264
|
+
Name of the variable.
|
|
265
|
+
"""
|
|
266
|
+
v = self._model.variable(nextEvt)
|
|
267
|
+
indice.chgVal(CIM.varI(v.name()), indice.val(v))
|
|
268
|
+
indice.chgVal(CIM.varJ(v.name()), indice.val(v)) # just the diagonal
|
|
269
|
+
|
|
270
|
+
somme = -self._model.CIM(nextEvt)[indice]
|
|
271
|
+
choices = []
|
|
272
|
+
probas = []
|
|
273
|
+
for j in range(v.domainSize()):
|
|
274
|
+
if j != indice.val(v): # not the diagonal
|
|
275
|
+
choices.append(j)
|
|
276
|
+
indice.chgVal(CIM.varJ(v.name()), j)
|
|
277
|
+
probas.append(self._model.CIM(nextEvt)[indice] / somme)
|
|
278
|
+
|
|
279
|
+
newval = int(choice(choices, p=probas))
|
|
280
|
+
current.chgVal(v.name(), newval)
|
|
281
|
+
return v.name()
|
|
282
|
+
|
|
283
|
+
# start
|
|
284
|
+
current = self._model.completeInstantiation()
|
|
285
|
+
rand = default_rng()
|
|
286
|
+
nb_events = 0
|
|
287
|
+
indice = self._model.fullInstantiation()
|
|
288
|
+
init(current) # initialize the starting value of each variable
|
|
289
|
+
|
|
290
|
+
# BURNIN
|
|
291
|
+
while nb_events < burnIn:
|
|
292
|
+
# find next variable which will change its value
|
|
293
|
+
nextEvt, dt = getNextEvent(current, indice)
|
|
294
|
+
if nextEvt is None: # if none of variables changes
|
|
295
|
+
pass
|
|
296
|
+
else:
|
|
297
|
+
# find the value of the variable which will change its value and let this variable update his value
|
|
298
|
+
sampleNextState(current, indice, nextEvt)
|
|
299
|
+
nb_events += 1 # an event means a variable changes its value
|
|
300
|
+
|
|
301
|
+
# SAMPLING
|
|
302
|
+
duration = 0
|
|
303
|
+
for name in self._model.names():
|
|
304
|
+
current_state = self._model.variable(name).label(current.val(self._model.variable(name)))
|
|
305
|
+
self.trajectory.append((0, name, current_state))
|
|
306
|
+
while True:
|
|
307
|
+
nextEvt, dt = getNextEvent(current, indice)
|
|
308
|
+
|
|
309
|
+
# if none of variables changes before the timeHorizon (or just doesn't change at all)
|
|
310
|
+
if duration + dt > timeHorizon or nextEvt is None:
|
|
311
|
+
# update everyone for last event
|
|
312
|
+
for name in self._model.names():
|
|
313
|
+
posteriors[name][current] += timeHorizon - duration
|
|
314
|
+
current_state = self._model.variable(name).label(current.val(self._model.variable(name)))
|
|
315
|
+
self.trajectory.append((timeHorizon, name, current_state))
|
|
316
|
+
break # arriving at the timeHorizon, we quit the loop
|
|
317
|
+
|
|
318
|
+
# update posteriors
|
|
319
|
+
for name in self._model.names():
|
|
320
|
+
posteriors[name][current] += dt
|
|
321
|
+
duration += dt
|
|
322
|
+
|
|
323
|
+
# add event to trajectory
|
|
324
|
+
current_state = self._model.variable(nextEvt).label(current.val(self._model.variable(nextEvt)))
|
|
325
|
+
self.trajectory.append((duration, self._model.name(nextEvt), current_state))
|
|
326
|
+
|
|
327
|
+
sampleNextState(current, indice, nextEvt)
|
|
328
|
+
nb_events += 1
|
|
329
|
+
|
|
330
|
+
return nb_events
|
|
331
|
+
|
|
332
|
+
def makeInference(self, timeHorizon: float = 5000, burnIn: int = 100) -> int:
|
|
333
|
+
"""
|
|
334
|
+
Start a new sample and normalize resulting posteriors to get an aproximation of the inference.
|
|
335
|
+
|
|
336
|
+
Parameters
|
|
337
|
+
----------
|
|
338
|
+
timeHorizon : float
|
|
339
|
+
Duration of the sampling.
|
|
340
|
+
burnIn : int
|
|
341
|
+
Number of runs before starting the sampling (to ensure ergodicity).
|
|
342
|
+
|
|
343
|
+
Returns
|
|
344
|
+
-------
|
|
345
|
+
int
|
|
346
|
+
Number of runs.
|
|
347
|
+
"""
|
|
348
|
+
self.idtraj += 1
|
|
349
|
+
self.trajectory = list()
|
|
350
|
+
res = self.makeSample(self._posteriors, timeHorizon, burnIn)
|
|
351
|
+
for nam in self._model.names():
|
|
352
|
+
self._posteriors[nam].normalize()
|
|
353
|
+
return res
|
|
354
|
+
|
|
355
|
+
def makeParallelInference(self, nbTrajectories: int = 5, timeHorizon: float = 5000, burnIn: int = 100):
|
|
356
|
+
"""
|
|
357
|
+
Start a given number of sample and approximate the inference over all the samples.
|
|
358
|
+
|
|
359
|
+
Parameters
|
|
360
|
+
----------
|
|
361
|
+
nbTrajectories : int
|
|
362
|
+
Number of sampling in parallel.
|
|
363
|
+
timeHorizon : float
|
|
364
|
+
Duration of the sampling.
|
|
365
|
+
burnIn : int
|
|
366
|
+
Number of runs before starting the sampling (to ensure ergodicity).
|
|
367
|
+
"""
|
|
368
|
+
posteriorsList = [
|
|
369
|
+
{nod: pyagrum.Tensor().add(self._model.variable(nod)) for nod in self._model.names()}
|
|
370
|
+
for _ in range(nbTrajectories)
|
|
371
|
+
]
|
|
372
|
+
|
|
373
|
+
def runMakeSample(task: int):
|
|
374
|
+
res = self.makeSample(posteriorsList[task], timeHorizon, burnIn)
|
|
375
|
+
return res
|
|
376
|
+
|
|
377
|
+
with ProcessPoolExecutor(max_workers=1000) as executor:
|
|
378
|
+
future = {executor.submit(runMakeSample, task) for task in range(nbTrajectories)}
|
|
379
|
+
|
|
380
|
+
print([f.result() for f in future])
|
|
381
|
+
|
|
382
|
+
for nam in self._model.names():
|
|
383
|
+
self._posteriors[nam].fillWith(0)
|
|
384
|
+
for i in range(nbTrajectories):
|
|
385
|
+
self._posteriors[nam] += posteriorsList[i][nam]
|
|
386
|
+
self._posteriors[nam].normalize()
|
|
387
|
+
|
|
388
|
+
def averageInference(self, nbTrajectories: int = 5, timeHorizon: float = 5000, burnIn: int = 100):
|
|
389
|
+
"""
|
|
390
|
+
Start ``nbTrajectories`` sampling and approximate the inference over all samples.
|
|
391
|
+
|
|
392
|
+
Parameters
|
|
393
|
+
----------
|
|
394
|
+
nbTrajectories: int
|
|
395
|
+
Number of sampling in parallel.
|
|
396
|
+
timeHorizon : float
|
|
397
|
+
Duration of the sampling.
|
|
398
|
+
burnIn : int
|
|
399
|
+
Number of runs before starting the sampling (to ensure ergodicity).
|
|
400
|
+
"""
|
|
401
|
+
posteriorsList = [
|
|
402
|
+
{nod: pyagrum.Tensor().add(self._model.variable(nod)) for nod in self._model.names()}
|
|
403
|
+
for _ in range(nbTrajectories)
|
|
404
|
+
]
|
|
405
|
+
|
|
406
|
+
for i in range(nbTrajectories):
|
|
407
|
+
self.makeSample(posteriorsList[i], timeHorizon, burnIn)
|
|
408
|
+
|
|
409
|
+
for nam in self._model.names():
|
|
410
|
+
self._posteriors[nam].fillWith(0)
|
|
411
|
+
for i in range(nbTrajectories):
|
|
412
|
+
self._posteriors[nam] += posteriorsList[i][nam]
|
|
413
|
+
self._posteriors[nam].normalize()
|
|
414
|
+
|
|
415
|
+
def posterior(self, name: str) -> "pyagrum.Tensor":
|
|
416
|
+
"""
|
|
417
|
+
Parameters
|
|
418
|
+
----------
|
|
419
|
+
name : str
|
|
420
|
+
Name of the variable.
|
|
421
|
+
|
|
422
|
+
Returns
|
|
423
|
+
-------
|
|
424
|
+
pyagrum.Tensor
|
|
425
|
+
The aproximate inference of a given variable.
|
|
426
|
+
"""
|
|
427
|
+
p = pyagrum.Tensor(self._posteriors[name])
|
|
428
|
+
p._model = self._model
|
|
429
|
+
return p
|
|
430
|
+
|
|
431
|
+
def writeTrajectoryCSV(self, filename: str, n: int = 1, timeHorizon=10, burnIn=100):
|
|
432
|
+
"""
|
|
433
|
+
Makes ``n`` samples using Forward Sampling and saves trajectories into a csv file.
|
|
434
|
+
Storing format : {IdSample, time, Var, state}
|
|
435
|
+
|
|
436
|
+
Parameters
|
|
437
|
+
----------
|
|
438
|
+
filename : str
|
|
439
|
+
Name of the file to save trajectories in.
|
|
440
|
+
n : int
|
|
441
|
+
Number of sampling to run.
|
|
442
|
+
timeHorizon : float
|
|
443
|
+
Duration of the sampling.
|
|
444
|
+
burnIn : int
|
|
445
|
+
Number of preliminary iterations before starting.
|
|
446
|
+
"""
|
|
447
|
+
data = dict()
|
|
448
|
+
for i in range(n):
|
|
449
|
+
self.makeInference(timeHorizon=timeHorizon, burnIn=burnIn)
|
|
450
|
+
data[i] = self.trajectory
|
|
451
|
+
|
|
452
|
+
with open(filename, "w", newline="") as csvfile:
|
|
453
|
+
fieldnames = ["IdSample", "time", "var", "state"]
|
|
454
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
455
|
+
|
|
456
|
+
writer.writeheader()
|
|
457
|
+
for i in range(len(data)):
|
|
458
|
+
for tr in data[i]:
|
|
459
|
+
writer.writerow({"IdSample": str(i), "time": str(tr[0]), "var": tr[1], "state": tr[2]})
|