likelihood 1.5.8__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -484,12 +484,12 @@ class AutoClassifier(tf.keras.Model):
484
484
  Sets the encoder and decoder layers from another AutoClassifier instance,
485
485
  ensuring compatibility in dimensions. Only works if vae_mode is False.
486
486
 
487
- Parameters:
487
+ Parameters
488
488
  -----------
489
489
  source_model : AutoClassifier
490
490
  The source model to copy the encoder and decoder layers from.
491
491
 
492
- Raises:
492
+ Raises
493
493
  -------
494
494
  ValueError
495
495
  If the input shape or units of the source model do not match.
@@ -102,7 +102,7 @@ class GANRegressor(tf.keras.Model):
102
102
  Train the GAN model.
103
103
 
104
104
  Parameters
105
- --------
105
+ ----------
106
106
  X : array-like
107
107
  Input data.
108
108
  y : array-like
@@ -117,7 +117,7 @@ class GANRegressor(tf.keras.Model):
117
117
  Verbosity level. Default is 1.
118
118
 
119
119
  Returns
120
- --------
120
+ -------
121
121
  history : pd.DataFrame
122
122
  Training history.
123
123
  """
@@ -234,7 +234,7 @@ class GANRegressor(tf.keras.Model):
234
234
  Train the generator model.
235
235
 
236
236
  Parameters
237
- --------
237
+ ----------
238
238
  X_train : array-like
239
239
  Training data.
240
240
  y_train : array-like
@@ -249,7 +249,7 @@ class GANRegressor(tf.keras.Model):
249
249
  Number of epochs to wait before early stopping. Default is 3.
250
250
 
251
251
  Returns
252
- --------
252
+ -------
253
253
  history : pd.DataFrame
254
254
  Training history.
255
255
  """
@@ -674,6 +674,7 @@ class GetInsights:
674
674
  / (data_normalized.iloc[:, :-1].max() - data_normalized.iloc[:, :-1].min())
675
675
  - 1
676
676
  )
677
+ data_normalized.dropna(axis=1, inplace=True)
677
678
  radviz(data_normalized, color_column, color=self.colors)
678
679
  plt.title(title)
679
680
  plt.show()
@@ -0,0 +1,350 @@
1
+ import random
2
+ from collections import deque
3
+
4
+ import numpy as np
5
+ import tensorflow as tf
6
+ from packaging import version
7
+
8
+ if version.parse(tf.__version__) > version.parse("2.15.0"):
9
+ from ._autoencoders import AutoClassifier
10
+ else:
11
+ from .autoencoders import AutoClassifier
12
+
13
+
14
+ def print_progress_bar(iteration, total, length=30):
15
+ percent = f"{100 * (iteration / float(total)):.1f}"
16
+ filled_length = int(length * iteration // total)
17
+ bar = "█" * filled_length + "-" * (length - filled_length)
18
+ print(f"\rProgress: |{bar}| {percent}% Complete", end="\r")
19
+ if iteration == total:
20
+ print()
21
+
22
+
23
+ class Env:
24
+ def __init__(self, model, maxlen=100, name="likenasium"):
25
+ """
26
+ Initialize the environment with a model.
27
+
28
+ Parameters
29
+ ----------
30
+ model : Any
31
+ Model with `.predict()` method (e.g., Keras model).
32
+ maxlen : int
33
+ Maximum length of deque. By default it is set to `100`.
34
+ name : str
35
+ The name of the environment. By default it is set to `likenasium`.
36
+ """
37
+ self.model = model
38
+ self.maxlen = maxlen
39
+ self.transitions = deque(
40
+ maxlen=self.maxlen
41
+ ) # Stores (state, action, reward, next_action, done)
42
+ self.current_state = None
43
+ self.current_step = 0
44
+ self.done = False
45
+
46
+ def step(self, state, action, verbose=0):
47
+ """
48
+ Perform an environment step with the given action.
49
+
50
+ Parameters
51
+ ----------
52
+ state : `np.ndarray`
53
+ Current state to process (input to the model).
54
+ action : int
55
+ Expected action to process.
56
+
57
+ Returns
58
+ -------
59
+ tuple: (current_state, action_pred, reward, next_action, done)
60
+ """
61
+ if self.done:
62
+ return None, None, 0, None, True
63
+
64
+ # Process action through model
65
+ model_output = self.model.predict(state.reshape((1, -1)), verbose=verbose)
66
+ action_pred = np.argmax(model_output, axis=1)[0]
67
+ model_output[:, action_pred] = 0.0
68
+ next_action = np.max(model_output, axis=1)[0] # Second most probable action
69
+
70
+ # Calculate reward (1 if correct prediction, 0 otherwise)
71
+ reward = 1 if action_pred == action else 0
72
+
73
+ # Update current state
74
+ self.current_state = state
75
+ self.current_step += 1
76
+
77
+ # Add transition to history
78
+ if self.current_step <= self.maxlen:
79
+ self.transitions.append(
80
+ (
81
+ self.current_state, # Previous state
82
+ action_pred, # Current action
83
+ reward, # Reward
84
+ next_action, # Next action
85
+ self.done, # Done flag
86
+ )
87
+ )
88
+ return self.current_state, action_pred, reward, next_action, self.done
89
+
90
+ def reset(self):
91
+ """Reset the environment to initial state."""
92
+ self.current_state = None
93
+ self.current_step = 0
94
+ self.done = False
95
+ self.transitions = deque(maxlen=self.maxlen)
96
+ return self.current_state
97
+
98
+ def get_transitions(self):
99
+ """Get all stored transitions."""
100
+ return self.transitions
101
+
102
+
103
+ class AutoQL:
104
+ """
105
+ AutoQL: A reinforcement learning agent using Q-learning with Epsilon-greedy policy.
106
+
107
+ This class implements a Q-learning agent with:
108
+ - Epsilon-greedy policy for exploration
109
+ - Replay buffer for experience replay
110
+ - Automatic model version handling for TensorFlow
111
+ """
112
+
113
+ def __init__(
114
+ self,
115
+ env,
116
+ model,
117
+ maxlen=2000,
118
+ ):
119
+ """Initialize AutoQL agent
120
+
121
+ Parameters
122
+ ----------
123
+ env : Any
124
+ The environment to interact with
125
+ model : tf.keras.Model
126
+ The Q-network model
127
+ """
128
+
129
+ self.env = env
130
+ self.model = model
131
+ self.maxlen = maxlen
132
+ self.replay_buffer = deque(maxlen=self.maxlen)
133
+
134
+ def epsilon_greedy_policy(self, state, action, epsilon=0):
135
+ """
136
+ Epsilon-greedy policy for action selection
137
+
138
+ Parameters
139
+ ----------
140
+ state : `np.ndarray`
141
+ Current state.
142
+ action : int
143
+ Expected action to process.
144
+ epsilon : float
145
+ Exploration probability. By default it is set to `0`
146
+
147
+ Returns
148
+ -------
149
+ tuple: (state, action, reward, next_action, done)
150
+ """
151
+ current_state, value, reward, next_action, done = self.env.step(state, action)
152
+
153
+ if np.random.rand() > epsilon:
154
+ state = np.asarray(state).astype(np.float32)
155
+ return current_state, value, reward, next_action, done
156
+ step_ = random.sample(self.env.get_transitions(), 1)
157
+ _state, greedy_action, _reward, _next_action, _done = zip(*step_)
158
+
159
+ return _state[0], greedy_action[0], _reward[0], _next_action[0], _done[0]
160
+
161
+ def play_one_step(self, state, action, epsilon):
162
+ """
163
+ Perform one step in the environment and add experience to buffer
164
+
165
+ Parameters
166
+ ----------
167
+ state : `np.ndarray`
168
+ Current state
169
+ action : int
170
+ Expected action to process.
171
+
172
+ epsilon : float
173
+ Exploration probability.
174
+
175
+ Returns
176
+ -------
177
+ tuple: (state, action, reward, next_action, done)
178
+ """
179
+ current_state, greedy_action, reward, next_action, done = self.epsilon_greedy_policy(
180
+ state, action, epsilon
181
+ )
182
+
183
+ done = 1 if done else 0
184
+
185
+ # Add experience to replay buffer
186
+ self.replay_buffer.append(
187
+ (
188
+ current_state, # Previous state
189
+ greedy_action, # Current action
190
+ reward, # Reward
191
+ next_action, # Next action
192
+ done, # Done flag
193
+ )
194
+ )
195
+
196
+ return current_state, greedy_action, reward, next_action, done
197
+
198
+ @tf.function
199
+ def _training_step(self):
200
+ """
201
+ Perform one training step using experience replay
202
+
203
+ Returns
204
+ -------
205
+ float: Training loss
206
+ """
207
+
208
+ batch_ = random.sample(self.replay_buffer, self.batch_size)
209
+ states, actions, rewards, next_actions, dones = zip(*batch_)
210
+ states = np.array(states).reshape(self.batch_size, -1)
211
+ actions = np.array(actions).reshape(
212
+ self.batch_size,
213
+ )
214
+ rewards = np.array(rewards).reshape(
215
+ self.batch_size,
216
+ )
217
+ max_next_Q_values = np.array(next_actions).reshape(self.batch_size, -1)
218
+ dones = np.array(dones).reshape(
219
+ self.batch_size,
220
+ )
221
+ target_Q_values = rewards + (1 - dones) * self.gamma * max_next_Q_values
222
+
223
+ actions = tf.convert_to_tensor(actions, dtype=tf.int32)
224
+ states = tf.convert_to_tensor(states, dtype=tf.float32)
225
+ target_Q_values = tf.convert_to_tensor(target_Q_values, dtype=tf.float32)
226
+
227
+ with tf.GradientTape() as tape:
228
+ all_Q_values = self.model(states)
229
+ indices = tf.stack([tf.range(tf.shape(actions)[0]), actions], axis=1)
230
+ Q_values = tf.gather_nd(all_Q_values, indices)
231
+ loss = tf.reduce_mean(self.loss_fn(target_Q_values, Q_values))
232
+ grads = tape.gradient(loss, self.model.trainable_variables)
233
+ self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
234
+ return loss
235
+
236
+ def train(
237
+ self,
238
+ x_data,
239
+ y_data,
240
+ optimizer="adam",
241
+ loss_fn="mse",
242
+ num_episodes=50,
243
+ num_steps=100,
244
+ gamma=0.7,
245
+ batch_size=32,
246
+ patience=10,
247
+ alpha=0.01,
248
+ ):
249
+ """Train the agent for a fixed number of episodes
250
+
251
+ Parameters
252
+ ----------
253
+ optimizer : str
254
+ The optimizer for training (e.g., `sgd`). By default it is set to `adam`.
255
+ loss_fn : str
256
+ The loss function. By default it is set to `mse`.
257
+ num_episodes : int
258
+ Total number of episodes to train. By default it is set to `50`.
259
+ num_steps : int
260
+ Steps per episode. By default it is set to `100`. If `num_steps` is less than `self.env.maxlen`, then the second will be chosen.
261
+ gamma : float
262
+ Discount factor. By default it is set to `0.7`.
263
+ batch_size : int
264
+ Size of training batches. By default it is set to `32`.
265
+ patience : int
266
+ How many episodes to wait for improvement.
267
+ alpha : float
268
+ Trade-off factor between loss and reward.
269
+ """
270
+ rewards = []
271
+ self.best_weights = None
272
+ self.best_loss = float("inf")
273
+
274
+ optimizers = {
275
+ "sgd": tf.keras.optimizers.SGD(),
276
+ "adam": tf.keras.optimizers.Adam(),
277
+ "adamw": tf.keras.optimizers.AdamW(),
278
+ "adadelta": tf.keras.optimizers.Adadelta(),
279
+ "rmsprop": tf.keras.optimizers.RMSprop(),
280
+ }
281
+ self.optimizer = optimizers[optimizer]
282
+ losses = {
283
+ "mse": tf.keras.losses.MeanSquaredError(),
284
+ "mae": tf.keras.losses.MeanAbsoluteError(),
285
+ "mape": tf.keras.losses.MeanAbsolutePercentageError(),
286
+ }
287
+ self.loss_fn = losses[loss_fn]
288
+ self.num_episodes = num_episodes
289
+ self.num_steps = num_steps if num_steps >= self.env.maxlen else self.env.maxlen
290
+ self.gamma = gamma
291
+ self.batch_size = batch_size
292
+ loss = float("inf")
293
+ no_improve_count = 0
294
+ best_combined_metric = float("inf")
295
+
296
+ for episode in range(self.num_episodes):
297
+ print_progress_bar(episode + 1, self.num_episodes)
298
+ self.env.reset()
299
+ sum_rewards = 0
300
+ epsilon = max(1 - episode / (self.num_episodes * 0.8), 0.01)
301
+
302
+ for step in range(self.num_steps):
303
+ state, action, reward, next_action, done = self.play_one_step(
304
+ x_data[step], y_data[step], epsilon
305
+ )
306
+ sum_rewards += reward if isinstance(reward, int) else reward[0]
307
+
308
+ # Train if buffer has enough samples
309
+ if len(self.replay_buffer) > self.batch_size:
310
+ loss = self._training_step()
311
+
312
+ if done:
313
+ break
314
+
315
+ combined_metric = loss - alpha * sum_rewards
316
+
317
+ if combined_metric < best_combined_metric:
318
+ best_combined_metric = combined_metric
319
+ self.best_weights = self.model.get_weights()
320
+ self.best_loss = loss
321
+ no_improve_count = 0 # Reset counter on improvement
322
+ else:
323
+ no_improve_count += 1
324
+
325
+ rewards.append(sum_rewards)
326
+
327
+ # Logging
328
+ if episode % (self.num_episodes // 10) == 0:
329
+ print(
330
+ f"Episode: {episode}, Steps: {step+1}, Epsilon: {epsilon:.3f}, Loss: {loss:.2e}, Reward: {sum_rewards}, No Improve Count: {no_improve_count}"
331
+ )
332
+
333
+ # Early stopping condition
334
+ if no_improve_count >= patience:
335
+ print(
336
+ f"Early stopping at episode {episode} due to no improvement in {patience} episodes."
337
+ )
338
+ break
339
+
340
+ # Save best model
341
+ self.model.set_weights(self.best_weights)
342
+
343
+ def __str__(self):
344
+ return (
345
+ f"AutoQL (Env: {self.env.name}, Episodes: {self.num_episodes}, Steps: {self.num_steps})"
346
+ )
347
+
348
+
349
+ if __name__ == "__main__":
350
+ pass
@@ -4,11 +4,15 @@ from typing import Dict, List, Tuple, Union
4
4
 
5
5
  import numpy as np
6
6
  import pandas as pd
7
+ from packaging import version
7
8
  from pandas.core.frame import DataFrame
8
9
 
9
10
  from likelihood.tools import DataScaler, FeatureSelection, OneHotEncoder, cdf, check_nan_inf
10
11
 
11
- warnings.simplefilter("ignore", np.RankWarning)
12
+ if version.parse(np.__version__) < version.parse("2.0.0"):
13
+ filter = np.RankWarning
14
+ else:
15
+ filter = np.exceptions.RankWarning
12
16
 
13
17
 
14
18
  # --------------------------------------------------------------------------------------------------------------------------------------
@@ -128,14 +132,15 @@ class SimulationEngine(FeatureSelection):
128
132
  )
129
133
  poly = kwargs.get("poly", 9)
130
134
  plot = kwargs.get("plot", False)
135
+ bandwidth = kwargs.get("bandwidth", 1.5)
131
136
  if not x[1]:
132
137
  media = self.df[key].mean()
133
138
  standard_deviation = self.df[key].std()
134
- lower_limit = media - 1.5 * standard_deviation
135
- upper_limit = media + 1.5 * standard_deviation
139
+ lower_limit = media - bandwidth * standard_deviation
140
+ upper_limit = media + bandwidth * standard_deviation
136
141
  if plot:
137
142
  print(f"Cumulative Distribution Function ({key})")
138
- f, cdf_, ox = cdf(x[0].flatten(), poly=poly, plot=plot)
143
+ f, _, ox = cdf(x[0].flatten(), poly=poly, plot=plot)
139
144
  else:
140
145
  f, ox = None, None
141
146
  least_frequent_category, most_frequent_category = categories_by_quartile(
likelihood/tools/tools.py CHANGED
@@ -8,10 +8,15 @@ import matplotlib.pyplot as plt
8
8
  import numpy as np
9
9
  import pandas as pd
10
10
  import yaml
11
+ from packaging import version
11
12
  from pandas.core.frame import DataFrame
12
13
 
13
- # Suppress RankWarning
14
- warnings.simplefilter("ignore", np.RankWarning)
14
+ if version.parse(np.__version__) < version.parse("2.0.0"):
15
+ filter = np.RankWarning
16
+ else:
17
+ filter = np.exceptions.RankWarning
18
+
19
+ warnings.simplefilter("ignore", filter)
15
20
 
16
21
  # -------------------------------------------------------------------------
17
22
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: likelihood
3
- Version: 1.5.8
3
+ Version: 2.0.0
4
4
  Summary: A package that performs the maximum likelihood algorithm.
5
5
  Home-page: https://github.com/jzsmoreno/likelihood/
6
6
  Author: J. A. Moreno-Guerra
@@ -20,9 +20,10 @@ Requires-Dist: pydocstyle>=6.3.0
20
20
  Requires-Dist: flake8>=6.0.0
21
21
  Requires-Dist: isort>=5.12.0
22
22
  Requires-Dist: mypy>=1.4.1
23
- Requires-Dist: numpy<2.0.0
23
+ Requires-Dist: numpy<3.0.0,>=1.26.4
24
24
  Requires-Dist: pydot==2.0.0
25
25
  Requires-Dist: matplotlib
26
+ Requires-Dist: packaging
26
27
  Requires-Dist: graphviz
27
28
  Requires-Dist: seaborn
28
29
  Requires-Dist: pyyaml
@@ -32,7 +33,7 @@ Requires-Dist: tqdm
32
33
  Provides-Extra: full
33
34
  Requires-Dist: networkx; extra == "full"
34
35
  Requires-Dist: pyvis; extra == "full"
35
- Requires-Dist: tensorflow==2.15.0; extra == "full"
36
+ Requires-Dist: tensorflow>=2.15.0; extra == "full"
36
37
  Requires-Dist: keras-tuner; extra == "full"
37
38
  Requires-Dist: scikit-learn; extra == "full"
38
39
  Dynamic: author
@@ -0,0 +1,30 @@
1
+ likelihood/__init__.py,sha256=5C0hapdsk85XZhN_rssRAEFpkRRuKNtj6cyRbqD2_gM,994
2
+ likelihood/main.py,sha256=fcCkGOOWKjfvw2tLVqjuKPV8t0rVCIT9FlbYcOv4EYo,7974
3
+ likelihood/graph/__init__.py,sha256=vUY4pKlnm3eSVTXd2d-5JDPawhqGNRIKRhaHIobsNws,188
4
+ likelihood/graph/_nn.py,sha256=Sh7dRz8QSI08Ydfw9e--uCxc4KMtHUsCz_-C-loXklQ,13883
5
+ likelihood/graph/graph.py,sha256=bLrNMvIh7GOTdPTwnNss8oPZ7cbSHQScAsH_ttmVUK0,3294
6
+ likelihood/graph/nn.py,sha256=uxCxGt1suKmThmEjFope2ew93-WlgvGhgr6RVCHwzhM,11420
7
+ likelihood/models/__init__.py,sha256=e6nB4w47w0Q9DrAFeP3OcUgcoHOtf7Il4mBhgf4AARg,52
8
+ likelihood/models/hmm.py,sha256=0s0gFySH1u4NjRaZDxiZ8oeTaFhFrw1x0GJxwy3dFrA,6253
9
+ likelihood/models/regression.py,sha256=9cakyGlJCEO6WfpoKLh3GxdXQeQp7cUvJIkQ5odT0TA,9404
10
+ likelihood/models/simulation.py,sha256=xsl4mJ2qFCuZR_B9LfQcLjV6OtONU1zyESX3CCUfOiw,8619
11
+ likelihood/models/utils.py,sha256=dvigPi_hxcs5ntfHr7Y1JvP5ULtMW3kkN0nJpS4orE8,1319
12
+ likelihood/models/deep/__init__.py,sha256=I55FciI0BfljYdhW2OGNqcpYV57FhPZETZX7Y1y9GVQ,303
13
+ likelihood/models/deep/_autoencoders.py,sha256=CeD79YzU7DdPd92wUNG_EtPVQOBgsgYoC4uS2JF3b6o,30939
14
+ likelihood/models/deep/_predictor.py,sha256=XI4QfVM7PS_60zYtmi-V8UzNDrASFiDMVPmV17BB8lM,27984
15
+ likelihood/models/deep/autoencoders.py,sha256=muUBH9BclOK8ViI7PijyMOBBLVox6uwuIabyJvpU5qw,30729
16
+ likelihood/models/deep/gan.py,sha256=rTnaLmIPjsKg6_0B8JZOVwPxdx59rHmqvzDitdJMCQ4,10924
17
+ likelihood/models/deep/predictor.py,sha256=q5tPaAbF7s5XIcxVr6fyHTQdZa9tlixO9vb9a9Cw0wM,27831
18
+ likelihood/models/deep/rl.py,sha256=9dhhnVTIETi9zvVeyOXYo1hl-LQJezmv0rgsUq11Qwc,11611
19
+ likelihood/tools/__init__.py,sha256=N1IhMDzacsGQT2MIYBMBC0zTxes78vC_0gGrwkuPgmg,78
20
+ likelihood/tools/cat_embed.py,sha256=SJ7o1vbrNYp21fLLcjRnWpUDcz1nVSe8TmMvsLIz5CI,7346
21
+ likelihood/tools/figures.py,sha256=waF0NHIMrctCmaLhcuz5DMcXyRKynmn6aG0XITYCTLc,10940
22
+ likelihood/tools/impute.py,sha256=n87Tv-xLUAdPl7BQLFcLWSsXBZbXksahyCayJWMydXc,9485
23
+ likelihood/tools/models_tools.py,sha256=c3-vac-1MYSarYDtfR6XfVC7X_WY9auS7y2_3Z973IQ,8875
24
+ likelihood/tools/numeric_tools.py,sha256=Hwf-lbqROqPPZ9N7eVzKIDyZxFGQdP53isWxPqpG0eo,12254
25
+ likelihood/tools/tools.py,sha256=GKZsqjyO5tGXWGSfn3jlQBTjRlmBv2byfvpu-QclUx0,42188
26
+ likelihood-2.0.0.dist-info/licenses/LICENSE,sha256=XWHWt9egYEUHGPTnlcZfJKLPmysacOwdiLj_-J7Z9ew,1066
27
+ likelihood-2.0.0.dist-info/METADATA,sha256=Ziysy1MQuW77OHHd1UzMtlfeUT9wsdgCl6rxW3uLBEE,2917
28
+ likelihood-2.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
29
+ likelihood-2.0.0.dist-info/top_level.txt,sha256=KDiBLr870YTxqLFqObTOSrTK10uw8dFsITSNLlte3PA,11
30
+ likelihood-2.0.0.dist-info/RECORD,,
@@ -1,26 +0,0 @@
1
- likelihood/__init__.py,sha256=5C0hapdsk85XZhN_rssRAEFpkRRuKNtj6cyRbqD2_gM,994
2
- likelihood/main.py,sha256=fcCkGOOWKjfvw2tLVqjuKPV8t0rVCIT9FlbYcOv4EYo,7974
3
- likelihood/graph/__init__.py,sha256=6TuFDfmXTwpLyHl7_KqBfdzW6zqHjGzIFvymjFPlvjI,21
4
- likelihood/graph/graph.py,sha256=bLrNMvIh7GOTdPTwnNss8oPZ7cbSHQScAsH_ttmVUK0,3294
5
- likelihood/graph/nn.py,sha256=uxCxGt1suKmThmEjFope2ew93-WlgvGhgr6RVCHwzhM,11420
6
- likelihood/models/__init__.py,sha256=e6nB4w47w0Q9DrAFeP3OcUgcoHOtf7Il4mBhgf4AARg,52
7
- likelihood/models/hmm.py,sha256=0s0gFySH1u4NjRaZDxiZ8oeTaFhFrw1x0GJxwy3dFrA,6253
8
- likelihood/models/regression.py,sha256=9cakyGlJCEO6WfpoKLh3GxdXQeQp7cUvJIkQ5odT0TA,9404
9
- likelihood/models/simulation.py,sha256=6OD2IXAnbctxtOzUJ2b9vKW7_tdGs4dQYmQQShqsioA,8443
10
- likelihood/models/utils.py,sha256=dvigPi_hxcs5ntfHr7Y1JvP5ULtMW3kkN0nJpS4orE8,1319
11
- likelihood/models/deep/__init__.py,sha256=UV_VYhySvrNnB4a0VXYM4wK3KKF7ytjLFFfwvnaZWaA,82
12
- likelihood/models/deep/autoencoders.py,sha256=02sgVTB-78DNUndyrzFGoiNZAY87KF953C-bdB2Dj3I,30731
13
- likelihood/models/deep/gan.py,sha256=prLgKEoJu6NdvT_ICfn7rBdjppga2LlvDRsTjVA8Ug0,10922
14
- likelihood/models/deep/predictor.py,sha256=Q9-PsgcViTDXm52h67Qdjd3HbjpLlXyAPxSqioUvgiA,27778
15
- likelihood/tools/__init__.py,sha256=N1IhMDzacsGQT2MIYBMBC0zTxes78vC_0gGrwkuPgmg,78
16
- likelihood/tools/cat_embed.py,sha256=SJ7o1vbrNYp21fLLcjRnWpUDcz1nVSe8TmMvsLIz5CI,7346
17
- likelihood/tools/figures.py,sha256=waF0NHIMrctCmaLhcuz5DMcXyRKynmn6aG0XITYCTLc,10940
18
- likelihood/tools/impute.py,sha256=n87Tv-xLUAdPl7BQLFcLWSsXBZbXksahyCayJWMydXc,9485
19
- likelihood/tools/models_tools.py,sha256=c3-vac-1MYSarYDtfR6XfVC7X_WY9auS7y2_3Z973IQ,8875
20
- likelihood/tools/numeric_tools.py,sha256=Hwf-lbqROqPPZ9N7eVzKIDyZxFGQdP53isWxPqpG0eo,12254
21
- likelihood/tools/tools.py,sha256=lk9BIskjUKYQ1XVwARm9jAjHuLQ4UO68aZY8oxkzk5c,42056
22
- likelihood-1.5.8.dist-info/licenses/LICENSE,sha256=XWHWt9egYEUHGPTnlcZfJKLPmysacOwdiLj_-J7Z9ew,1066
23
- likelihood-1.5.8.dist-info/METADATA,sha256=RmHunm_vrHb6AbiVasPO4J3GogK7U2lEpmwqVr8QU0E,2883
24
- likelihood-1.5.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- likelihood-1.5.8.dist-info/top_level.txt,sha256=KDiBLr870YTxqLFqObTOSrTK10uw8dFsITSNLlte3PA,11
26
- likelihood-1.5.8.dist-info/RECORD,,