pyerualjetwork 4.1.5__py3-none-any.whl → 4.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyerualjetwork/__init__.py +4 -3
- pyerualjetwork/activation_functions_cuda.py +1 -1
- pyerualjetwork/data_operations.py +41 -80
- pyerualjetwork/data_operations_cuda.py +45 -92
- pyerualjetwork/memory_operations.py +171 -60
- pyerualjetwork/metrics_cuda.py +3 -4
- pyerualjetwork/model_operations.py +4 -5
- pyerualjetwork/model_operations_cuda.py +7 -6
- pyerualjetwork/plan.py +35 -23
- pyerualjetwork/plan_cuda.py +113 -81
- pyerualjetwork/visualizations.py +147 -125
- pyerualjetwork/visualizations_cuda.py +160 -130
- {pyerualjetwork-4.1.5.dist-info → pyerualjetwork-4.1.6.dist-info}/METADATA +25 -9
- pyerualjetwork-4.1.6.dist-info/RECORD +24 -0
- pyerualjetwork-4.1.5.dist-info/RECORD +0 -24
- {pyerualjetwork-4.1.5.dist-info → pyerualjetwork-4.1.6.dist-info}/WHEEL +0 -0
- {pyerualjetwork-4.1.5.dist-info → pyerualjetwork-4.1.6.dist-info}/top_level.txt +0 -0
pyerualjetwork/visualizations.py
CHANGED
@@ -68,131 +68,117 @@ def draw_neural_web(W, ax, G, return_objs=False):
|
|
68
68
|
return art1, art2, art3
|
69
69
|
|
70
70
|
|
71
|
-
def draw_model_architecture(model_name, model_path=''
|
71
|
+
def draw_model_architecture(model_name, model_path=''):
|
72
72
|
"""
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
model_path
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
- 'detailed': Shows a more comprehensive view, including layer details and parameters.
|
88
|
-
Default is 'basic'.
|
89
|
-
|
90
|
-
Returns
|
91
|
-
-------
|
92
|
-
None
|
93
|
-
Draws and displays the architecture of the specified model.
|
94
|
-
|
95
|
-
|
96
|
-
Examples
|
97
|
-
--------
|
98
|
-
>>> draw_model_architecture("MyModel", "path/to/model", style='detailed')
|
73
|
+
The `draw_model_architecture` function visualizes the architecture of a neural network model with
|
74
|
+
multiple inputs based on activation functions.
|
75
|
+
|
76
|
+
:param model_name: The `model_name` parameter in the `draw_model_architecture` function is used to
|
77
|
+
specify the name of the neural network model whose architecture you want to visualize. This function
|
78
|
+
visualizes the architecture of a neural network model with multiple inputs based on activation
|
79
|
+
functions
|
80
|
+
:param model_path: The `model_path` parameter in the `draw_model_architecture` function is used to
|
81
|
+
specify the path where the neural network model is saved. If the model is saved in a specific
|
82
|
+
directory or file location, you can provide that path as a string when calling the function. If the
|
83
|
+
model is saved
|
84
|
+
"""
|
85
|
+
"""
|
86
|
+
Visualizes the architecture of a neural network model with multiple inputs based on activation functions.
|
99
87
|
"""
|
100
|
-
|
101
|
-
from .model_operations import load_model
|
88
|
+
|
89
|
+
from .model_operations import load_model, get_scaler, get_act_pot, get_weights
|
102
90
|
|
103
91
|
model = load_model(model_name=model_name, model_path=model_path)
|
104
92
|
|
105
93
|
W = model[get_weights()]
|
106
94
|
activation_potentiation = model[get_act_pot()]
|
107
95
|
scaler_params = model[get_scaler()]
|
108
|
-
|
109
|
-
text_1 = f"Input Shape:\n{W.shape[1]}"
|
110
|
-
text_2 = f"Output Shape:\n{W.shape[0]}"
|
111
|
-
|
112
|
-
if scaler_params is None:
|
113
|
-
bottom_left_text = 'Standard Scaler=No'
|
114
|
-
else:
|
115
|
-
bottom_left_text = 'Standard Scaler=Yes'
|
116
|
-
|
117
|
-
if len(activation_potentiation) != 1 or (len(activation_potentiation) == 1 and activation_potentiation[0] != 'linear'):
|
118
|
-
|
119
|
-
bottom_left_text_1 = f'Aggregation Layers(Aggregates All Conversions)={len(activation_potentiation)}'
|
120
|
-
|
121
|
-
else:
|
122
|
-
|
123
|
-
bottom_left_text_1 = 'Aggregation Layers(Aggregates All Conversions)=0'
|
124
|
-
|
125
|
-
bottom_left_text_2 = 'Potentiation Layer(Fully Connected)=1'
|
126
|
-
|
127
|
-
if scaler_params is None:
|
128
|
-
bottom_left_text = 'Standard Scaler=No'
|
129
|
-
else:
|
130
|
-
bottom_left_text = 'Standard Scaler=Yes'
|
131
|
-
|
132
|
-
num_middle_axes = len(activation_potentiation)
|
133
|
-
|
134
|
-
if style == 'detailed':
|
135
|
-
|
136
|
-
col = 1
|
137
|
-
|
138
|
-
elif style == 'basic':
|
139
96
|
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
fig.suptitle("Model Architecture", fontsize=16, fontweight='bold')
|
145
|
-
|
146
|
-
for i, activation in enumerate(activation_potentiation):
|
147
|
-
x = np.linspace(-100, 100, 100)
|
148
|
-
translated_x_train = draw_activations(x, activation)
|
149
|
-
y = translated_x_train
|
150
|
-
|
151
|
-
axes[i].plot(x, y, color='b', markersize=6, linewidth=2, label='Activations Over Depth')
|
152
|
-
axes[i].set_title(activation_potentiation[i])
|
97
|
+
# Calculate dimensions based on number of activation functions
|
98
|
+
num_activations = len(activation_potentiation)
|
99
|
+
input_groups = num_activations # Number of input groups equals number of activations
|
100
|
+
num_inputs = W.shape[1]
|
153
101
|
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
102
|
+
# Create figure
|
103
|
+
fig = plt.figure(figsize=(15, 10))
|
104
|
+
|
105
|
+
# Calculate positions for nodes
|
106
|
+
def get_node_positions():
|
107
|
+
positions = {}
|
160
108
|
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
109
|
+
# Input layer positions
|
110
|
+
total_height = 0.8 # Maksimum dikey alan
|
111
|
+
group_height = total_height / input_groups # Her grup için ayrılan dikey alan
|
112
|
+
input_spacing = min(group_height / (num_inputs + 1), 0.1) # Her girdi arasındaki mesafe
|
113
|
+
|
114
|
+
for group in range(input_groups):
|
115
|
+
group_start_y = 0.9 - (group * group_height) # Grubun başlangıç y koordinatı
|
116
|
+
for i in range(num_inputs):
|
117
|
+
y_pos = group_start_y - ((i + 1) * input_spacing)
|
118
|
+
positions[f'input_{group}_{i}'] = (0.2, y_pos)
|
119
|
+
|
120
|
+
# Aggregation layer positions
|
121
|
+
agg_spacing = total_height / (num_inputs + 1)
|
122
|
+
for i in range(num_inputs):
|
123
|
+
positions[f'summed_{i}'] = (0.5, 0.9 - ((i + 1) * agg_spacing))
|
124
|
+
|
125
|
+
# Output layer positions
|
126
|
+
output_spacing = total_height / (W.shape[0] + 1)
|
127
|
+
for i in range(W.shape[0]):
|
128
|
+
positions[f'output_{i}'] = (0.8, 0.9 - ((i + 1) * output_spacing))
|
129
|
+
|
130
|
+
return positions
|
131
|
+
|
132
|
+
# Draw the network
|
133
|
+
pos = get_node_positions()
|
134
|
+
|
135
|
+
# Draw nodes
|
136
|
+
for group in range(input_groups):
|
137
|
+
# Draw input nodes
|
138
|
+
for i in range(num_inputs):
|
139
|
+
plt.plot(*pos[f'input_{group}_{i}'], 'o', color='lightgreen', markersize=20)
|
140
|
+
plt.text(pos[f'input_{group}_{i}'][0] - 0.05, pos[f'input_{group}_{i}'][1],
|
141
|
+
f'Input #{i+1} ({activation_potentiation[group]})', ha='right', va='center')
|
142
|
+
|
143
|
+
# Draw connections from input to summed input directly
|
144
|
+
plt.plot([pos[f'input_{group}_{i}'][0], pos[f'summed_{i}'][0]],
|
145
|
+
[pos[f'input_{group}_{i}'][1], pos[f'summed_{i}'][1]], 'k-')
|
146
|
+
# Draw aggregation nodes
|
147
|
+
if group == 0:
|
148
|
+
plt.plot(*pos[f'summed_{i}'], 'o', color='lightgreen', markersize=20)
|
149
|
+
plt.text(pos[f'summed_{i}'][0], pos[f'summed_{i}'][1] + 0.02,
|
150
|
+
f'Summed\nInput #{i+1}', ha='center', va='bottom')
|
151
|
+
|
152
|
+
# Draw output nodes and connections
|
153
|
+
for i in range(W.shape[0]):
|
154
|
+
plt.plot(*pos[f'output_{i}'], 'o', color='gold', markersize=20)
|
155
|
+
plt.text(pos[f'output_{i}'][0] + 0.05, pos[f'output_{i}'][1],
|
156
|
+
f'Output #{i+1}', ha='left', va='center', color='purple')
|
157
|
+
|
158
|
+
# Connect all aggregation nodes to each output
|
159
|
+
for group in range(num_inputs):
|
160
|
+
plt.plot([pos[f'summed_{group}'][0], pos[f'output_{i}'][0]],
|
161
|
+
[pos[f'summed_{group}'][1], pos[f'output_{i}'][1]], 'k-')
|
166
162
|
|
167
|
-
|
163
|
+
# Add labels and annotations
|
164
|
+
plt.text(0.2, 0.95, 'Input Layer', ha='center', va='bottom', fontsize=12)
|
165
|
+
plt.text(0.5, 0.95, 'Aggregation\nLayer', ha='center', va='bottom', fontsize=12)
|
166
|
+
plt.text(0.8, 0.95, 'Output Layer', ha='center', va='bottom', fontsize=12)
|
168
167
|
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
axes[num_middle_axes].set_xlim(0, 1)
|
178
|
-
axes[num_middle_axes].set_ylim(0, 1)
|
179
|
-
axes[num_middle_axes].axis('off')
|
180
|
-
|
181
|
-
circle2 = plt.Circle((0.5, 0.5), 0.4, color='lightcoral', ec='black', lw=1.5)
|
182
|
-
axes[-1].add_patch(circle2)
|
183
|
-
axes[-1].text(0.5, 0.5, text_2, ha='center', va='center', fontsize=12)
|
184
|
-
axes[-1].set_xlim(0, 1)
|
185
|
-
axes[-1].set_ylim(0, 1)
|
186
|
-
axes[-1].axis('off')
|
187
|
-
|
188
|
-
|
189
|
-
fig.text(0.01, 0, bottom_left_text, ha='left', va='bottom', fontsize=10)
|
190
|
-
fig.text(0.01, 0.04, bottom_left_text_1, ha='left', va='bottom', fontsize=10)
|
191
|
-
fig.text(0.01, 0.08, bottom_left_text_2, ha='left', va='bottom', fontsize=10)
|
168
|
+
# Remove axes
|
169
|
+
plt.axis('off')
|
170
|
+
|
171
|
+
# Add model information
|
172
|
+
if scaler_params is None:
|
173
|
+
plt.text(0.95, 0.05, 'Standard Scaler=No', fontsize=10, ha='right', va='bottom')
|
174
|
+
else:
|
175
|
+
plt.text(0.95, 0.05, 'Standard Scaler=Yes', fontsize=10, ha='right', va='bottom')
|
192
176
|
|
177
|
+
# Add model architecture title
|
178
|
+
plt.text(0.95, 0.1, f"PLAN Model Architecture: {model_name}", fontsize=12, ha='right', va='bottom', fontweight='bold')
|
193
179
|
plt.tight_layout()
|
194
180
|
plt.show()
|
195
|
-
|
181
|
+
|
196
182
|
|
197
183
|
def draw_activations(x_train, activation):
|
198
184
|
|
@@ -479,7 +465,7 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non
|
|
479
465
|
np.arange(y_min, y_max, h))
|
480
466
|
|
481
467
|
grid = np.c_[xx.ravel(), yy.ravel()]
|
482
|
-
grid_full = np.zeros((grid.shape[0], x.shape[1]))
|
468
|
+
grid_full = np.zeros((grid.shape[0], x.shape[1]), dtype=np.float32)
|
483
469
|
grid_full[:, feature_indices] = grid
|
484
470
|
|
485
471
|
Z = [None] * len(grid_full)
|
@@ -487,7 +473,7 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non
|
|
487
473
|
for i in range(len(grid_full)):
|
488
474
|
Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
|
489
475
|
|
490
|
-
Z = np.array(Z)
|
476
|
+
Z = np.array(Z, dtype=np.int32)
|
491
477
|
Z = Z.reshape(xx.shape)
|
492
478
|
|
493
479
|
if ax is None:
|
@@ -513,11 +499,11 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non
|
|
513
499
|
|
514
500
|
except:
|
515
501
|
|
516
|
-
art1_1 = ax
|
517
|
-
art1_2 = ax
|
518
|
-
ax
|
519
|
-
ax
|
520
|
-
ax
|
502
|
+
art1_1 = ax.contourf(xx, yy, Z, alpha=0.8)
|
503
|
+
art1_2 = ax.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
|
504
|
+
ax.set_xlabel(f'Feature {0 + 1}')
|
505
|
+
ax.set_ylabel(f'Feature {1 + 1}')
|
506
|
+
ax.set_title('Decision Boundary')
|
521
507
|
|
522
508
|
|
523
509
|
return art1_1, art1_2
|
@@ -558,7 +544,7 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
|
|
558
544
|
if y_preds[i] == cls:
|
559
545
|
class_points.append(X_pca[i])
|
560
546
|
|
561
|
-
class_points = np.array(class_points)
|
547
|
+
class_points = np.array(class_points, dtype=y.dtype)
|
562
548
|
|
563
549
|
|
564
550
|
if len(class_points) > 2:
|
@@ -573,8 +559,8 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
|
|
573
559
|
|
574
560
|
plt.draw()
|
575
561
|
|
576
|
-
|
577
|
-
def
|
562
|
+
|
563
|
+
def update_neuron_history_for_learner(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=False, loss=False):
|
578
564
|
|
579
565
|
for j in range(len(class_count)):
|
580
566
|
|
@@ -601,15 +587,40 @@ def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=Fa
|
|
601
587
|
|
602
588
|
return artist5
|
603
589
|
|
590
|
+
def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=False, loss=False):
|
591
|
+
|
592
|
+
for j in range(class_count):
|
593
|
+
|
594
|
+
if acc != False and loss != False:
|
595
|
+
suptitle_info = ' Accuracy:' + str(acc) + '\n' + '\nNeurons Memory:'
|
596
|
+
else:
|
597
|
+
suptitle_info = 'Neurons Memory:'
|
598
|
+
|
599
|
+
mat = LTPW[j,:].reshape(row, col)
|
600
|
+
|
601
|
+
title_info = f'{j+1}. Neuron'
|
602
|
+
|
603
|
+
art5 = ax1[j].imshow(mat, interpolation='sinc', cmap='viridis')
|
604
|
+
|
605
|
+
ax1[j].set_aspect('equal')
|
606
|
+
ax1[j].set_xticks([])
|
607
|
+
ax1[j].set_yticks([])
|
608
|
+
ax1[j].set_title(title_info)
|
609
|
+
|
610
|
+
|
611
|
+
artist5.append([art5])
|
612
|
+
|
613
|
+
fig1.suptitle(suptitle_info, fontsize=16)
|
614
|
+
|
604
615
|
|
605
616
|
def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
|
606
617
|
"""Initializes the visualization setup based on the parameters."""
|
607
618
|
from .data_operations import find_closest_factors
|
608
619
|
visualization_objects = {}
|
609
620
|
|
610
|
-
if show_training:
|
621
|
+
if show_training or neurons_history:
|
611
622
|
if not val:
|
612
|
-
raise ValueError("For showing training, 'val' parameter must be True.")
|
623
|
+
raise ValueError("For showing training or neurons history, 'val' parameter must be True.")
|
613
624
|
|
614
625
|
G = nx.Graph()
|
615
626
|
fig, ax = plt.subplots(2, 2)
|
@@ -626,7 +637,7 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
|
|
626
637
|
|
627
638
|
if neurons_history:
|
628
639
|
row, col = find_closest_factors(len(x_train[0]))
|
629
|
-
fig1, ax1 = plt.subplots(1, len(
|
640
|
+
fig1, ax1 = plt.subplots(1, len(y_train[0]), figsize=(18, 14))
|
630
641
|
visualization_objects.update({
|
631
642
|
'fig1': fig1,
|
632
643
|
'ax1': ax1,
|
@@ -637,6 +648,16 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
|
|
637
648
|
|
638
649
|
return visualization_objects
|
639
650
|
|
651
|
+
|
652
|
+
|
653
|
+
def update_neural_web_for_fit(W, ax, G, artist):
|
654
|
+
"""
|
655
|
+
The function `update_neural_web_for_fit` updates a neural web visualization for fitting.
|
656
|
+
"""
|
657
|
+
art5_1, art5_2, art5_3 = draw_neural_web(W=W, ax=ax, G=G, return_objs=True)
|
658
|
+
art5_list = [art5_1] + [art5_2] + list(art5_3.values())
|
659
|
+
artist.append(art5_list)
|
660
|
+
|
640
661
|
|
641
662
|
def update_weight_visualization_for_fit(ax, LTPW, artist2):
|
642
663
|
"""Updates the weight visualization plot."""
|
@@ -673,11 +694,12 @@ def update_validation_history_for_fit(ax, val_list, artist3):
|
|
673
694
|
def display_visualization_for_fit(fig, artist_list, interval):
|
674
695
|
"""Displays the animation for the given artist list."""
|
675
696
|
ani = ArtistAnimation(fig, artist_list, interval=interval, blit=True)
|
697
|
+
return ani
|
698
|
+
|
699
|
+
def show():
|
676
700
|
plt.tight_layout()
|
677
701
|
plt.show()
|
678
702
|
|
679
|
-
|
680
|
-
|
681
703
|
def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
|
682
704
|
"""Initialize all visualization components"""
|
683
705
|
from .data_operations import find_closest_factors
|
@@ -765,7 +787,7 @@ def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc
|
|
765
787
|
if 'neurons' in viz_objects:
|
766
788
|
neurons = viz_objects['neurons']
|
767
789
|
for _ in range(10):
|
768
|
-
neurons['artists'] =
|
790
|
+
neurons['artists'] = update_neuron_history_for_learner(
|
769
791
|
np.copy(best_weights),
|
770
792
|
neurons['ax'],
|
771
793
|
neurons['row'],
|