likelihood 1.2.21__tar.gz → 1.2.22__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {likelihood-1.2.21 → likelihood-1.2.22}/PKG-INFO +1 -1
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/graph/nn.py +13 -11
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood.egg-info/PKG-INFO +1 -1
- {likelihood-1.2.21 → likelihood-1.2.22}/LICENSE +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/README.md +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/__init__.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/graph/__init__.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/graph/graph.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/main.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/models/__init__.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/models/deep/__init__.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/models/deep/autoencoders.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/models/regression.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/models/simulation.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/models/utils.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/tools/__init__.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/tools/numeric_tools.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood/tools/tools.py +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood.egg-info/SOURCES.txt +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood.egg-info/dependency_links.txt +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood.egg-info/requires.txt +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/likelihood.egg-info/top_level.txt +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/setup.cfg +0 -0
- {likelihood-1.2.21 → likelihood-1.2.22}/setup.py +0 -0
|
@@ -1,9 +1,14 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
|
|
4
|
+
import logging
|
|
1
5
|
import warnings
|
|
2
6
|
from typing import List, Tuple
|
|
3
7
|
|
|
4
8
|
import numpy as np
|
|
5
9
|
import pandas as pd
|
|
6
10
|
import tensorflow as tf
|
|
11
|
+
from IPython.display import clear_output
|
|
7
12
|
from numpy import ndarray
|
|
8
13
|
from pandas.core.frame import DataFrame
|
|
9
14
|
from sklearn.metrics import f1_score
|
|
@@ -11,6 +16,10 @@ from sklearn.model_selection import train_test_split
|
|
|
11
16
|
|
|
12
17
|
from likelihood.tools import generate_feature_yaml
|
|
13
18
|
|
|
19
|
+
logging.getLogger("tensorflow").setLevel(logging.ERROR)
|
|
20
|
+
|
|
21
|
+
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
|
|
22
|
+
|
|
14
23
|
|
|
15
24
|
def compare_similarity(arr1: ndarray, arr2: ndarray) -> int:
|
|
16
25
|
"""Compares the similarity between two arrays of categories.
|
|
@@ -180,14 +189,6 @@ class VanillaGNN(tf.keras.Model):
|
|
|
180
189
|
self.gnn2 = VanillaGNNLayer(self.dim_h, self.dim_h)
|
|
181
190
|
self.gnn3 = VanillaGNNLayer(self.dim_h, self.dim_out)
|
|
182
191
|
|
|
183
|
-
def build(self, input_shape):
|
|
184
|
-
super(VanillaGNN, self).build(input_shape)
|
|
185
|
-
dummy_input = tf.keras.Input(shape=input_shape[1:])
|
|
186
|
-
dummy_adjacency = tf.sparse.SparseTensor(
|
|
187
|
-
indices=[[0, 0]], values=[1.0], dense_shape=[input_shape[0], input_shape[0]]
|
|
188
|
-
)
|
|
189
|
-
_ = self(dummy_input, dummy_adjacency)
|
|
190
|
-
|
|
191
192
|
def call(self, x, adjacency):
|
|
192
193
|
h = self.gnn1(x, adjacency)
|
|
193
194
|
h = tf.nn.tanh(h)
|
|
@@ -289,10 +290,11 @@ class VanillaGNN(tf.keras.Model):
|
|
|
289
290
|
train_losses.append(train_loss)
|
|
290
291
|
train_f1_scores.append(train_f1)
|
|
291
292
|
|
|
292
|
-
if epoch %
|
|
293
|
+
if epoch % 5 == 0:
|
|
293
294
|
val_loss, val_f1 = self.evaluate(X_test, adjacency_test, y_test)
|
|
294
295
|
val_losses.append(val_loss)
|
|
295
296
|
val_f1_scores.append(val_f1)
|
|
297
|
+
clear_output(wait=True)
|
|
296
298
|
print(
|
|
297
299
|
f"Epoch {epoch:>3} | Train Loss: {train_loss:.3f} | Train F1: {train_f1:.3f} | Val Loss: {val_loss:.3f} | Val F1: {val_f1:.3f}"
|
|
298
300
|
)
|
|
@@ -327,9 +329,9 @@ if __name__ == "__main__":
|
|
|
327
329
|
model = VanillaGNN(dim_in=data.x.shape[1], dim_h=8, dim_out=len(iris_df["species"].unique()))
|
|
328
330
|
print("Before training F1:", model.test(data))
|
|
329
331
|
model.fit(data, epochs=200, batch_size=32, test_size=0.5)
|
|
330
|
-
model.save("./best_model
|
|
332
|
+
model.save("./best_model", save_format="tf")
|
|
331
333
|
print("After training F1:", model.test(data))
|
|
332
|
-
best_model = tf.keras.models.load_model("./best_model
|
|
334
|
+
best_model = tf.keras.models.load_model("./best_model")
|
|
333
335
|
|
|
334
336
|
print("After loading F1:", best_model.test(data))
|
|
335
337
|
df_results = pd.DataFrame()
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|