likelihood 2.2.0.dev1__cp312-cp312-musllinux_1_2_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. likelihood/VERSION +1 -0
  2. likelihood/__init__.py +20 -0
  3. likelihood/graph/__init__.py +9 -0
  4. likelihood/graph/_nn.py +283 -0
  5. likelihood/graph/graph.py +86 -0
  6. likelihood/graph/nn.py +329 -0
  7. likelihood/main.py +273 -0
  8. likelihood/models/__init__.py +3 -0
  9. likelihood/models/deep/__init__.py +13 -0
  10. likelihood/models/deep/_autoencoders.py +896 -0
  11. likelihood/models/deep/_predictor.py +809 -0
  12. likelihood/models/deep/autoencoders.py +903 -0
  13. likelihood/models/deep/bandit.py +97 -0
  14. likelihood/models/deep/gan.py +313 -0
  15. likelihood/models/deep/predictor.py +805 -0
  16. likelihood/models/deep/rl.py +345 -0
  17. likelihood/models/environments.py +202 -0
  18. likelihood/models/hmm.py +163 -0
  19. likelihood/models/regression.py +451 -0
  20. likelihood/models/simulation.py +213 -0
  21. likelihood/models/utils.py +87 -0
  22. likelihood/pipes.py +382 -0
  23. likelihood/rust_py_integration.cpython-312-x86_64-linux-musl.so +0 -0
  24. likelihood/tools/__init__.py +4 -0
  25. likelihood/tools/cat_embed.py +212 -0
  26. likelihood/tools/figures.py +348 -0
  27. likelihood/tools/impute.py +278 -0
  28. likelihood/tools/models_tools.py +866 -0
  29. likelihood/tools/numeric_tools.py +390 -0
  30. likelihood/tools/reports.py +375 -0
  31. likelihood/tools/tools.py +1336 -0
  32. likelihood-2.2.0.dev1.dist-info/METADATA +68 -0
  33. likelihood-2.2.0.dev1.dist-info/RECORD +39 -0
  34. likelihood-2.2.0.dev1.dist-info/WHEEL +5 -0
  35. likelihood-2.2.0.dev1.dist-info/licenses/LICENSE +21 -0
  36. likelihood-2.2.0.dev1.dist-info/sboms/auditwheel.cdx.json +1 -0
  37. likelihood-2.2.0.dev1.dist-info/top_level.txt +7 -0
  38. likelihood.libs/libgcc_s-0cd532bd.so.1 +0 -0
  39. src/lib.rs +12 -0
@@ -0,0 +1,809 @@
1
+ import random
2
+ import warnings
3
+ from typing import List
4
+
5
+ import matplotlib
6
+ import matplotlib.colors as mcolors
7
+ import matplotlib.pyplot as plt
8
+ import networkx as nx
9
+ import numpy as np
10
+ import pandas as pd
11
+ import tensorflow as tf
12
+ from IPython.display import HTML, display
13
+ from matplotlib import cm
14
+ from matplotlib.colors import Normalize
15
+ from pandas.plotting import radviz
16
+ from sklearn.manifold import TSNE
17
+ from tensorflow.keras.layers import InputLayer
18
+
19
+ from likelihood.models.deep._autoencoders import AutoClassifier, sampling
20
+
21
+
22
+ class GetInsights:
23
+ """
24
+ A class to analyze the output of a neural network model, including visualizations
25
+ of the weights, t-SNE representation, and feature statistics.
26
+
27
+ Parameters
28
+ ----------
29
+ model : `AutoClassifier`
30
+ The trained model to analyze.
31
+ inputs : `np.ndarray`
32
+ The input data for analysis.
33
+ """
34
+
35
+ def __init__(self, model: AutoClassifier, inputs: np.ndarray) -> None:
36
+ """
37
+ Initializes the GetInsights class.
38
+
39
+ Parameters
40
+ ----------
41
+ model : `AutoClassifier`
42
+ The trained model to analyze.
43
+ inputs : `np.ndarray`
44
+ The input data for analysis.
45
+ """
46
+ self.inputs = inputs
47
+ self.model = model
48
+
49
+ self.encoder_layer = (
50
+ self.model._encoder.layers[1]
51
+ if isinstance(self.model._encoder.layers[0], InputLayer)
52
+ else self.model._encoder.layers[0]
53
+ )
54
+ self.decoder_layer = (
55
+ self.model._decoder.layers[1]
56
+ if isinstance(self.model._decoder.layers[0], InputLayer)
57
+ else self.model._decoder.layers[0]
58
+ )
59
+
60
+ self.encoder_weights = self.encoder_layer.get_weights()[0]
61
+ self.decoder_weights = self.decoder_layer.get_weights()[0]
62
+
63
+ self.sorted_names = self._generate_sorted_color_names()
64
+
65
+ def _generate_sorted_color_names(self) -> list:
66
+ """
67
+ Generate sorted color names based on their HSV values.
68
+
69
+ Parameters
70
+ ----------
71
+ `None`
72
+
73
+ Returns
74
+ -------
75
+ `list` : Sorted color names.
76
+ """
77
+ colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS)
78
+ by_hsv = sorted(
79
+ (tuple(mcolors.rgb_to_hsv(mcolors.to_rgba(color)[:3])), name)
80
+ for name, color in colors.items()
81
+ )
82
+ sorted_names = [name for hsv, name in by_hsv if hsv[1] > 0.4 and hsv[2] >= 0.4]
83
+ random.shuffle(sorted_names)
84
+ return sorted_names
85
+
86
+ def render_html_report(
87
+ self,
88
+ frac: float = 0.2,
89
+ top_k: int = 5,
90
+ threshold_factor: float = 1.0,
91
+ max_rows: int = 5,
92
+ **kwargs,
93
+ ) -> None:
94
+ """
95
+ Generate and display an embedded HTML report in a Jupyter Notebook cell.
96
+ """
97
+ display(HTML("<h2 style='margin-top:20px;'>📊 Predictor Analysis</h2>"))
98
+ display(
99
+ HTML(
100
+ "<p>This section visualizes how the model predicts the data. "
101
+ "You will see original inputs, reconstructed outputs, and analyses such as t-SNE "
102
+ "that reduce dimensionality to visualize latent space clustering.</p>"
103
+ )
104
+ )
105
+ stats_df = self.predictor_analyzer(frac=frac, **kwargs)
106
+
107
+ display(HTML("<h2 style='margin-top:30px;'>🔁 Encoder-Decoder Graph</h2>"))
108
+ display(
109
+ HTML(
110
+ "<p>This visualization displays the connections between layers in the encoder and decoder. "
111
+ "Edges with the strongest weights are highlighted to emphasize influential features "
112
+ "in the model's transformation.</p>"
113
+ )
114
+ )
115
+ if not self.model._encoder.name.startswith("vae"):
116
+ self.viz_encoder_decoder_graphs(threshold_factor=threshold_factor, top_k=top_k)
117
+
118
+ display(HTML("<h2 style='margin-top:30px;'>🧠 Classifier Layer Graphs</h2>"))
119
+ display(
120
+ HTML(
121
+ "<p>This visualization shows how features propagate through each dense layer in the classifier. "
122
+ "Only the strongest weighted connections are shown to highlight influential paths through the network.</p>"
123
+ )
124
+ )
125
+ self.viz_classifier_graphs(threshold_factor=threshold_factor, top_k=top_k)
126
+
127
+ display(HTML("<h2 style='margin-top:30px;'>📈 Statistical Summary</h2>"))
128
+ display(
129
+ HTML(
130
+ "<p>This table summarizes feature statistics grouped by predicted classes, "
131
+ "including means, standard deviations, and modes, providing insight into "
132
+ "feature distributions across different classes.</p>"
133
+ )
134
+ )
135
+
136
+ if max_rows is not None and max_rows > 0:
137
+ stats_to_display = stats_df.head(max_rows)
138
+ else:
139
+ stats_to_display = stats_df
140
+
141
+ display(
142
+ stats_to_display.style.set_table_attributes(
143
+ "style='display:inline;border-collapse:collapse;'"
144
+ )
145
+ .set_caption("Feature Summary per Class")
146
+ .set_properties(
147
+ **{
148
+ "border": "1px solid #ddd",
149
+ "padding": "8px",
150
+ "text-align": "center",
151
+ }
152
+ )
153
+ )
154
+
155
+ display(
156
+ HTML(
157
+ "<p style='color: gray; margin-top:30px;'>Report generated with "
158
+ "<code>GetInsights</code> class. For detailed customization, extend "
159
+ "<code>render_html_report</code>.</p>"
160
+ )
161
+ )
162
+
163
+ def viz_classifier_graphs(self, threshold_factor=1.0, top_k=5, save_path=None):
164
+ """
165
+ Visualize all Dense layers in self.model.classifier as a single directed graph,
166
+ connecting each Dense layer to the next.
167
+ """
168
+
169
+ def get_top_k_edges(weights, src_prefix, dst_prefix, k):
170
+ flat_weights = np.abs(weights.flatten())
171
+ indices = np.argpartition(flat_weights, -k)[-k:]
172
+ top_k_flat_indices = indices[np.argsort(-flat_weights[indices])]
173
+ top_k_edges = []
174
+
175
+ for flat_index in top_k_flat_indices:
176
+ i, j = np.unravel_index(flat_index, weights.shape)
177
+ top_k_edges.append((f"{src_prefix}_{i}", f"{dst_prefix}_{j}", weights[i, j]))
178
+ return top_k_edges
179
+
180
+ def add_dense_layer_edges(G, weights, layer_idx, threshold_factor, top_k):
181
+ src_prefix = f"L{layer_idx}"
182
+ dst_prefix = f"L{layer_idx + 1}"
183
+ input_nodes = [f"{src_prefix}_{i}" for i in range(weights.shape[0])]
184
+ output_nodes = [f"{dst_prefix}_{j}" for j in range(weights.shape[1])]
185
+
186
+ G.add_nodes_from(input_nodes + output_nodes)
187
+
188
+ abs_weights = np.abs(weights)
189
+ threshold = threshold_factor * np.mean(abs_weights)
190
+ top_k_edges = get_top_k_edges(weights, src_prefix, dst_prefix, top_k)
191
+ top_k_set = set((u, v) for u, v, _ in top_k_edges)
192
+
193
+ for i, src in enumerate(input_nodes):
194
+ for j, dst in enumerate(output_nodes):
195
+ w = weights[i, j]
196
+ if abs(w) > threshold:
197
+ G.add_edge(src, dst, weight=w, highlight=(src, dst) in top_k_set)
198
+
199
+ def compute_layout(G):
200
+ pos = {}
201
+ layer_nodes = {}
202
+
203
+ for node in G.nodes():
204
+ layer_idx = int(node.split("_")[0][1:])
205
+ layer_nodes.setdefault(layer_idx, []).append(node)
206
+
207
+ for layer_idx, nodes in sorted(layer_nodes.items()):
208
+ y_positions = np.linspace(1, -1, len(nodes))
209
+ for y, node in zip(y_positions, nodes):
210
+ pos[node] = (layer_idx * 2, y)
211
+
212
+ return pos
213
+
214
+ def draw_graph(G, pos, title, save_path=None):
215
+ weights = [abs(G[u][v]["weight"]) for u, v in G.edges()]
216
+ if not weights:
217
+ print("No edges to draw.")
218
+ return
219
+
220
+ norm = Normalize(vmin=min(weights), vmax=max(weights))
221
+ cmap = cm.get_cmap("coolwarm")
222
+
223
+ edge_colors = [cmap(norm(G[u][v]["weight"])) for u, v in G.edges()]
224
+ edge_widths = [1.0 + 2.0 * norm(abs(G[u][v]["weight"])) for u, v in G.edges()]
225
+
226
+ fig, ax = plt.subplots(figsize=(12, 8))
227
+
228
+ nx.draw(
229
+ G,
230
+ pos,
231
+ ax=ax,
232
+ with_labels=True,
233
+ node_color="lightgray",
234
+ node_size=1000,
235
+ font_size=8,
236
+ edge_color=edge_colors,
237
+ width=edge_widths,
238
+ arrows=True,
239
+ )
240
+
241
+ ax.set_title(title, fontsize=14)
242
+
243
+ sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
244
+ sm.set_array([])
245
+ plt.colorbar(sm, ax=ax, orientation="vertical", label="Edge Weight")
246
+
247
+ plt.tight_layout()
248
+ if save_path:
249
+ plt.savefig(save_path)
250
+ plt.show()
251
+
252
+ dense_layers = [
253
+ layer
254
+ for layer in self.model._classifier.layers
255
+ if isinstance(layer, tf.keras.layers.Dense)
256
+ ]
257
+
258
+ if len(dense_layers) < 1:
259
+ print("No Dense layers found in classifier.")
260
+ return
261
+
262
+ G = nx.DiGraph()
263
+ for idx, layer in enumerate(dense_layers):
264
+ weights = layer.get_weights()[0]
265
+ add_dense_layer_edges(G, weights, idx, threshold_factor, top_k)
266
+
267
+ pos = compute_layout(G)
268
+ draw_graph(G, pos, "Classifier Dense Layers Graph", save_path)
269
+
270
+ def viz_encoder_decoder_graphs(self, threshold_factor=1.0, top_k=5, save_path=None):
271
+ """
272
+ Visualize Dense layers in self.model.encoder and self.model.decoder as directed graphs.
273
+ """
274
+
275
+ def get_top_k_edges(weights, labels_src, labels_dst_prefix, k):
276
+ flat_weights = np.abs(weights.flatten())
277
+ indices = np.argpartition(flat_weights, -k)[-k:]
278
+ top_k_flat_indices = indices[np.argsort(-flat_weights[indices])]
279
+ top_k_edges = []
280
+ for flat_index in top_k_flat_indices:
281
+ i, j = np.unravel_index(flat_index, weights.shape)
282
+ src_label = labels_src[i] if isinstance(labels_src, list) else f"{labels_src}_{i}"
283
+ dst_label = f"{labels_dst_prefix}_{j}"
284
+ top_k_edges.append((src_label, dst_label, weights[i, j]))
285
+ return top_k_edges
286
+
287
+ def add_layer_to_graph(
288
+ G, weights, labels_src, labels_dst_prefix, x_offset, top_k_set, threshold
289
+ ):
290
+ output_nodes = [f"{labels_dst_prefix}_{j}" for j in range(weights.shape[1])]
291
+
292
+ for node in labels_src + output_nodes:
293
+ if node not in G:
294
+ G.add_node(node, x=x_offset if node in labels_src else x_offset + 1)
295
+
296
+ for i, src in enumerate(labels_src):
297
+ for j, dst in enumerate(output_nodes):
298
+ w = weights[i, j]
299
+ if abs(w) > threshold:
300
+ G.add_edge(src, dst, weight=w, highlight=(src, dst) in top_k_set)
301
+ return output_nodes
302
+
303
+ def layout_graph(G):
304
+ pos = {}
305
+ layers = {}
306
+ for node, data in G.nodes(data=True):
307
+ x = data["x"]
308
+ layers.setdefault(x, []).append(node)
309
+
310
+ for x in sorted(layers):
311
+ nodes = layers[x]
312
+ y_positions = np.linspace(1, -1, len(nodes))
313
+ for y, node in zip(y_positions, nodes):
314
+ pos[node] = (x, y)
315
+ return pos
316
+
317
+ def draw_graph(G, title, ax):
318
+ weights = [abs(G[u][v]["weight"]) for u, v in G.edges()]
319
+ if not weights:
320
+ return
321
+
322
+ norm = Normalize(vmin=min(weights), vmax=max(weights))
323
+ cmap = cm.get_cmap("coolwarm")
324
+
325
+ edge_colors = [cmap(norm(G[u][v]["weight"])) for u, v in G.edges()]
326
+ edge_widths = [1.0 + 2.0 * norm(abs(G[u][v]["weight"])) for u, v in G.edges()]
327
+
328
+ pos = layout_graph(G)
329
+ nx.draw(
330
+ G,
331
+ pos,
332
+ ax=ax,
333
+ with_labels=True,
334
+ node_color="lightgray",
335
+ node_size=1000,
336
+ font_size=8,
337
+ edge_color=edge_colors,
338
+ width=edge_widths,
339
+ arrows=True,
340
+ )
341
+
342
+ ax.set_title(title, fontsize=12)
343
+ sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
344
+ sm.set_array([])
345
+ plt.colorbar(sm, ax=ax, orientation="vertical", label="Edge Weight")
346
+
347
+ def build_graph(layers, label_prefix, input_labels=None):
348
+ G = nx.DiGraph()
349
+ x_offset = 0
350
+ prev_labels = input_labels or [
351
+ f"{label_prefix}0_{i}" for i in range(layers[0].get_weights()[0].shape[0])
352
+ ]
353
+
354
+ for idx, layer in enumerate(layers):
355
+ weights = layer.get_weights()[0]
356
+ label = f"{label_prefix}{idx+1}"
357
+ threshold = threshold_factor * np.mean(np.abs(weights))
358
+ top_k_edges = get_top_k_edges(weights, prev_labels, label, top_k)
359
+ top_k_set = set((src, dst) for src, dst, _ in top_k_edges)
360
+
361
+ prev_labels = add_layer_to_graph(
362
+ G, weights, prev_labels, label, x_offset, top_k_set, threshold
363
+ )
364
+ x_offset += 2
365
+
366
+ return G
367
+
368
+ encoder_layers = [
369
+ l for l in self.model._encoder.layers if isinstance(l, tf.keras.layers.Dense)
370
+ ]
371
+ decoder_layers = [
372
+ l for l in self.model._decoder.layers if isinstance(l, tf.keras.layers.Dense)
373
+ ]
374
+
375
+ if not encoder_layers and not decoder_layers:
376
+ print("No Dense layers found in encoder or decoder.")
377
+ return
378
+
379
+ n_graphs = int(bool(encoder_layers)) + int(bool(decoder_layers))
380
+ fig, axes = plt.subplots(1, n_graphs, figsize=(7 * n_graphs, 6), squeeze=False)
381
+
382
+ col = 0
383
+ if encoder_layers:
384
+ input_labels = (
385
+ self.y_labels
386
+ if self.y_labels
387
+ and len(self.y_labels) == encoder_layers[0].get_weights()[0].shape[0]
388
+ else None
389
+ )
390
+ encoder_graph = build_graph(encoder_layers, "E", input_labels)
391
+ draw_graph(encoder_graph, "Encoder", axes[0][col])
392
+ col += 1
393
+
394
+ if decoder_layers:
395
+ decoder_graph = build_graph(decoder_layers, "D")
396
+ draw_graph(decoder_graph, "Decoder", axes[0][col])
397
+
398
+ fig.suptitle("Encoder & Decoder Dense Layer Graphs", fontsize=15)
399
+ plt.tight_layout(rect=[0, 0, 1, 0.95])
400
+
401
+ if save_path:
402
+ plt.savefig(save_path)
403
+ plt.show()
404
+
405
+ if encoder_layers:
406
+ weights = encoder_layers[0].get_weights()[0]
407
+ importances = np.abs(weights).mean(axis=1)
408
+ sorted_idx = np.argsort(-importances)
409
+ xticks = [
410
+ (
411
+ self.y_labels[i]
412
+ if self.y_labels and len(self.y_labels) == weights.shape[0]
413
+ else f"Input_{i}"
414
+ )
415
+ for i in sorted_idx
416
+ ]
417
+
418
+ plt.figure(figsize=(10, 4))
419
+ plt.bar(range(len(importances)), importances[sorted_idx], color="skyblue")
420
+ plt.xticks(range(len(importances)), xticks, rotation=45, ha="right")
421
+ plt.title("Feature Importances (Encoder Input Layer)", fontsize=13)
422
+ plt.ylabel("Mean |Weight|")
423
+ plt.tight_layout()
424
+ plt.show()
425
+
426
+ def predictor_analyzer(
427
+ self,
428
+ frac: float = None,
429
+ cmap: str = "viridis",
430
+ aspect: str = "auto",
431
+ highlight: bool = True,
432
+ **kwargs,
433
+ ) -> None:
434
+ """
435
+ Analyze the model's predictions and visualize data.
436
+
437
+ Parameters
438
+ ----------
439
+ frac : `float`, optional
440
+ Fraction of data to use for analysis (default is `None`).
441
+ cmap : `str`, optional
442
+ The colormap for visualization (default is `"viridis"`).
443
+ aspect : `str`, optional
444
+ Aspect ratio for the visualization (default is `"auto"`).
445
+ highlight : `bool`, optional
446
+ Whether to highlight the maximum weights (default is `True`).
447
+ **kwargs : `dict`, optional
448
+ Additional keyword arguments for customization.
449
+
450
+ Returns
451
+ -------
452
+ `pd.DataFrame` : The statistical summary of the input data.
453
+ """
454
+ self._viz_weights(cmap=cmap, aspect=aspect, highlight=highlight, **kwargs)
455
+ inputs = self.inputs.copy()
456
+ inputs = self._prepare_inputs(inputs, frac)
457
+ self.y_labels = kwargs.get("y_labels", None)
458
+ encoded, reconstructed = self._encode_decode(inputs)
459
+ self._visualize_data(inputs, reconstructed, cmap, aspect)
460
+ self._prepare_data_for_analysis(inputs, reconstructed, encoded, self.y_labels)
461
+
462
+ try:
463
+ self._get_tsne_repr(inputs, frac)
464
+ self._viz_tsne_repr(c=self.classification)
465
+
466
+ self._viz_radviz(self.data, "class", "Radviz Visualization of Latent Space")
467
+ self._viz_radviz(self.data_input, "class", "Radviz Visualization of Input Data")
468
+ except ValueError:
469
+ warnings.warn(
470
+ "Some functions or processes will not be executed for regression problems.",
471
+ UserWarning,
472
+ )
473
+
474
+ return self._statistics(self.data_input)
475
+
476
+ def _prepare_inputs(self, inputs: np.ndarray, frac: float) -> np.ndarray:
477
+ """
478
+ Prepare the input data, possibly selecting a fraction of it.
479
+
480
+ Parameters
481
+ ----------
482
+ inputs : `np.ndarray`
483
+ The input data.
484
+ frac : `float`
485
+ Fraction of data to use.
486
+
487
+ Returns
488
+ -------
489
+ `np.ndarray` : The prepared input data.
490
+ """
491
+ if frac:
492
+ n = int(frac * self.inputs.shape[0])
493
+ indexes = np.random.choice(np.arange(inputs.shape[0]), n, replace=False)
494
+ inputs = inputs[indexes]
495
+ inputs[np.isnan(inputs)] = 0.0
496
+ return inputs
497
+
498
+ def _encode_decode(self, inputs: np.ndarray) -> tuple:
499
+ """
500
+ Perform encoding and decoding on the input data.
501
+
502
+ Parameters
503
+ ----------
504
+ inputs : `np.ndarray`
505
+ The input data.
506
+
507
+ Returns
508
+ -------
509
+ `tuple` : The encoded and reconstructed data.
510
+ """
511
+ try:
512
+ mean, log_var = self.model._encoder(inputs)
513
+ encoded = sampling(mean, log_var)
514
+ except:
515
+ encoded = self.model._encoder(inputs)
516
+ reconstructed = self.model._decoder(encoded)
517
+ return encoded, reconstructed
518
+
519
+ def _visualize_data(
520
+ self, inputs: np.ndarray, reconstructed: np.ndarray, cmap: str, aspect: str
521
+ ) -> None:
522
+ """
523
+ Visualize the original data and the reconstructed data.
524
+
525
+ Parameters
526
+ ----------
527
+ inputs : `np.ndarray`
528
+ The input data.
529
+ reconstructed : `np.ndarray`
530
+ The reconstructed data.
531
+ cmap : `str`
532
+ The colormap for visualization.
533
+ aspect : `str`
534
+ Aspect ratio for the visualization.
535
+
536
+ Returns
537
+ -------
538
+ `None`
539
+ """
540
+ ax = plt.subplot(1, 2, 1)
541
+ plt.imshow(inputs, cmap=cmap, aspect=aspect)
542
+ plt.colorbar()
543
+ plt.title("Original Data")
544
+
545
+ plt.subplot(1, 2, 2, sharex=ax, sharey=ax)
546
+ plt.imshow(reconstructed, cmap=cmap, aspect=aspect)
547
+ plt.colorbar()
548
+ plt.title("Decoder Layer Reconstruction")
549
+ plt.show()
550
+
551
+ def _prepare_data_for_analysis(
552
+ self,
553
+ inputs: np.ndarray,
554
+ reconstructed: np.ndarray,
555
+ encoded: np.ndarray,
556
+ y_labels: List[str],
557
+ ) -> None:
558
+ """
559
+ Prepare data for statistical analysis.
560
+
561
+ Parameters
562
+ ----------
563
+ inputs : `np.ndarray`
564
+ The input data.
565
+ reconstructed : `np.ndarray`
566
+ The reconstructed data.
567
+ encoded : `np.ndarray`
568
+ The encoded data.
569
+ y_labels : `List[str]`
570
+ The labels of features.
571
+
572
+ Returns
573
+ -------
574
+ `None`
575
+ """
576
+ self.classification = (
577
+ self.model._classifier(tf.concat([reconstructed, encoded], axis=1))
578
+ .numpy()
579
+ .argmax(axis=1)
580
+ )
581
+
582
+ self.data = pd.DataFrame(encoded, columns=[f"Feature {i}" for i in range(encoded.shape[1])])
583
+ self.data_input = pd.DataFrame(
584
+ inputs,
585
+ columns=(
586
+ [f"Feature {i}" for i in range(inputs.shape[1])] if y_labels is None else y_labels
587
+ ),
588
+ )
589
+
590
+ self.data["class"] = self.classification
591
+ self.data_input["class"] = self.classification
592
+
593
+ def _get_tsne_repr(self, inputs: np.ndarray = None, frac: float = None) -> None:
594
+ """
595
+ Perform t-SNE dimensionality reduction on the input data.
596
+
597
+ Parameters
598
+ ----------
599
+ inputs : `np.ndarray`
600
+ The input data.
601
+ frac : `float`
602
+ Fraction of data to use.
603
+
604
+ Returns
605
+ -------
606
+ `None`
607
+ """
608
+ if inputs is None:
609
+ inputs = self.inputs.copy()
610
+ if frac:
611
+ n = int(frac * self.inputs.shape[0])
612
+ indexes = np.random.choice(np.arange(inputs.shape[0]), n, replace=False)
613
+ inputs = inputs[indexes]
614
+ inputs[np.isnan(inputs)] = 0.0
615
+ self.latent_representations = inputs @ self.encoder_weights
616
+
617
+ tsne = TSNE(n_components=2)
618
+ self.reduced_data_tsne = tsne.fit_transform(self.latent_representations)
619
+
620
+ def _viz_tsne_repr(self, **kwargs) -> None:
621
+ """
622
+ Visualize the t-SNE representation of the latent space.
623
+
624
+ Parameters
625
+ ----------
626
+ **kwargs : `dict`
627
+ Additional keyword arguments for customization.
628
+
629
+ Returns
630
+ -------
631
+ `None`
632
+ """
633
+ c = kwargs.get("c", None)
634
+ self.colors = (
635
+ kwargs.get("colors", self.sorted_names[: len(np.unique(c))]) if c is not None else None
636
+ )
637
+
638
+ plt.scatter(
639
+ self.reduced_data_tsne[:, 0],
640
+ self.reduced_data_tsne[:, 1],
641
+ cmap=matplotlib.colors.ListedColormap(self.colors) if c is not None else None,
642
+ c=c,
643
+ )
644
+
645
+ if c is not None:
646
+ cb = plt.colorbar()
647
+ loc = np.arange(0, max(c), max(c) / float(len(self.colors)))
648
+ cb.set_ticks(loc)
649
+ cb.set_ticklabels(np.unique(c))
650
+
651
+ plt.title("t-SNE Visualization of Latent Space")
652
+ plt.xlabel("t-SNE 1")
653
+ plt.ylabel("t-SNE 2")
654
+ plt.show()
655
+
656
+ def _viz_radviz(self, data: pd.DataFrame, color_column: str, title: str) -> None:
657
+ """
658
+ Visualize the data using RadViz.
659
+
660
+ Parameters
661
+ ----------
662
+ data : `pd.DataFrame`
663
+ The data to visualize.
664
+ color_column : `str`
665
+ The column to use for coloring.
666
+ title : `str`
667
+ The title of the plot.
668
+
669
+ Returns
670
+ -------
671
+ `None`
672
+ """
673
+ data_normalized = data.copy(deep=True)
674
+ data_normalized.iloc[:, :-1] = (
675
+ 2.0
676
+ * (data_normalized.iloc[:, :-1] - data_normalized.iloc[:, :-1].min())
677
+ / (data_normalized.iloc[:, :-1].max() - data_normalized.iloc[:, :-1].min())
678
+ - 1
679
+ )
680
+ data_normalized.dropna(axis=1, inplace=True)
681
+ radviz(data_normalized, color_column, color=self.colors)
682
+ plt.title(title)
683
+ plt.show()
684
+
685
+ def _viz_weights(
686
+ self, cmap: str = "viridis", aspect: str = "auto", highlight: bool = True, **kwargs
687
+ ) -> None:
688
+ """
689
+ Visualize the encoder layer weights of the model.
690
+
691
+ Parameters
692
+ ----------
693
+ cmap : `str`, optional
694
+ The colormap for visualization (default is `"viridis"`).
695
+ aspect : `str`, optional
696
+ Aspect ratio for the visualization (default is `"auto"`).
697
+ highlight : `bool`, optional
698
+ Whether to highlight the maximum weights (default is `True`).
699
+ **kwargs : `dict`, optional
700
+ Additional keyword arguments for customization.
701
+
702
+ Returns
703
+ -------
704
+ `None`
705
+ """
706
+ title = kwargs.get("title", "Encoder Layer Weights (Dense Layer)")
707
+ y_labels = kwargs.get("y_labels", None)
708
+ cmap_highlight = kwargs.get("cmap_highlight", "Pastel1")
709
+ highlight_mask = np.zeros_like(self.encoder_weights, dtype=bool)
710
+
711
+ plt.imshow(self.encoder_weights, cmap=cmap, aspect=aspect)
712
+ plt.colorbar()
713
+ plt.title(title)
714
+ if y_labels is not None:
715
+ plt.yticks(ticks=np.arange(self.encoder_weights.shape[0]), labels=y_labels)
716
+ if highlight:
717
+ for i, j in enumerate(self.encoder_weights.argmax(axis=1)):
718
+ highlight_mask[i, j] = True
719
+ plt.imshow(
720
+ np.ma.masked_where(~highlight_mask, self.encoder_weights),
721
+ cmap=cmap_highlight,
722
+ alpha=0.5,
723
+ aspect=aspect,
724
+ )
725
+ plt.show()
726
+
727
+ def _statistics(self, data_input: pd.DataFrame) -> pd.DataFrame:
728
+ """
729
+ Compute statistical summaries of the input data.
730
+
731
+ Parameters
732
+ ----------
733
+ data_input : `pd.DataFrame`
734
+ The data to compute statistics for.
735
+
736
+ Returns
737
+ -------
738
+ `pd.DataFrame` : The statistical summary of the input data.
739
+ """
740
+ data = data_input.copy(deep=True)
741
+
742
+ if not pd.api.types.is_string_dtype(data["class"]):
743
+ data["class"] = data["class"].astype(str)
744
+
745
+ data.ffill(inplace=True)
746
+ grouped_data = data.groupby("class")
747
+
748
+ numerical_stats = grouped_data.agg(["mean", "min", "max", "std", "median"])
749
+ numerical_stats.columns = ["_".join(col).strip() for col in numerical_stats.columns.values]
750
+
751
+ def get_mode(x):
752
+ mode_series = x.mode()
753
+ return mode_series.iloc[0] if not mode_series.empty else None
754
+
755
+ mode_stats = grouped_data.apply(get_mode, include_groups=False)
756
+ mode_stats.columns = [f"{col}_mode" for col in mode_stats.columns]
757
+ combined_stats = pd.concat([numerical_stats, mode_stats], axis=1)
758
+
759
+ return combined_stats.T
760
+
761
+
762
+ ########################################################################################
763
+
764
+ if __name__ == "__main__":
765
+ # Example usage
766
+ import pandas as pd
767
+ from sklearn.datasets import load_iris
768
+ from sklearn.preprocessing import OneHotEncoder
769
+
770
+ # Load the dataset
771
+ iris = load_iris()
772
+
773
+ # Convert to a DataFrame for easy exploration
774
+ iris_df = pd.DataFrame(data=iris.data, columns=iris.feature_names)
775
+ iris_df["species"] = iris.target
776
+
777
+ X = iris_df.drop(columns="species")
778
+ y_labels = X.columns
779
+ X = X.values
780
+ y = iris_df["species"].values
781
+
782
+ X = np.asarray(X).astype(np.float32)
783
+
784
+ encoder = OneHotEncoder()
785
+ y = encoder.fit_transform(y.reshape(-1, 1)).toarray()
786
+ y = np.asarray(y).astype(np.float32)
787
+
788
+ model = AutoClassifier(
789
+ input_shape_parm=X.shape[1],
790
+ num_classes=3,
791
+ units=27,
792
+ activation="tanh",
793
+ num_layers=2,
794
+ dropout=0.2,
795
+ )
796
+ model.compile(
797
+ optimizer="adam",
798
+ loss=tf.keras.losses.CategoricalCrossentropy(),
799
+ metrics=[tf.keras.metrics.F1Score(threshold=0.5)],
800
+ )
801
+ model.fit(X, y, epochs=50, validation_split=0.2)
802
+
803
+ insights = GetInsights(model, X)
804
+ summary = insights.predictor_analyzer(frac=1.0, y_labels=y_labels)
805
+ insights._get_tsne_repr()
806
+ insights._viz_tsne_repr()
807
+ insights._viz_tsne_repr(c=iris_df["species"])
808
+ insights._viz_weights()
809
+ print(summary)