pyerualjetwork 3.3.4__py3-none-any.whl → 4.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -68,131 +68,117 @@ def draw_neural_web(W, ax, G, return_objs=False):
68
68
  return art1, art2, art3
69
69
 
70
70
 
71
- def draw_model_architecture(model_name, model_path='', style='basic'):
71
+ def draw_model_architecture(model_name, model_path=''):
72
72
  """
73
- Visualizes the architecture of a neural network model.
74
-
75
- Parameters
76
- ----------
77
- model_name : str
78
- The name of the model to be visualized, which will be displayed in the title or label.
79
-
80
- model_path : str
81
- The file path to the model, from which the architecture is loaded. Default is ''
82
-
83
- style : str, optional
84
- The style of the visualization.
85
- Options:
86
- - 'basic': Displays a simplified view of the model architecture.
87
- - 'detailed': Shows a more comprehensive view, including layer details and parameters.
88
- Default is 'basic'.
89
-
90
- Returns
91
- -------
92
- None
93
- Draws and displays the architecture of the specified model.
94
-
95
-
96
- Examples
97
- --------
98
- >>> draw_model_architecture("MyModel", "path/to/model", style='detailed')
73
+ The `draw_model_architecture` function visualizes the architecture of a neural network model with
74
+ multiple inputs based on activation functions.
75
+
76
+ :param model_name: The `model_name` parameter in the `draw_model_architecture` function is used to
77
+ specify the name of the neural network model whose architecture you want to visualize. This function
78
+ visualizes the architecture of a neural network model with multiple inputs based on activation
79
+ functions
80
+ :param model_path: The `model_path` parameter in the `draw_model_architecture` function is used to
81
+ specify the path where the neural network model is saved. If the model is saved in a specific
82
+ directory or file location, you can provide that path as a string when calling the function. If the
83
+ model is saved
99
84
  """
100
- from .plan import get_scaler, get_act_pot, get_weights
101
- from .model_operations import load_model
85
+ """
86
+ Visualizes the architecture of a neural network model with multiple inputs based on activation functions.
87
+ """
88
+
89
+ from .model_operations import load_model, get_scaler, get_act_pot, get_weights
102
90
 
103
91
  model = load_model(model_name=model_name, model_path=model_path)
104
92
 
105
93
  W = model[get_weights()]
106
94
  activation_potentiation = model[get_act_pot()]
107
95
  scaler_params = model[get_scaler()]
108
-
109
- text_1 = f"Input Shape:\n{W.shape[1]}"
110
- text_2 = f"Output Shape:\n{W.shape[0]}"
111
-
112
- if scaler_params is None:
113
- bottom_left_text = 'Standard Scaler=No'
114
- else:
115
- bottom_left_text = 'Standard Scaler=Yes'
116
-
117
- if len(activation_potentiation) != 1 or (len(activation_potentiation) == 1 and activation_potentiation[0] != 'linear'):
118
-
119
- bottom_left_text_1 = f'Aggregation Layers(Aggregates All Conversions)={len(activation_potentiation)}'
120
-
121
- else:
122
-
123
- bottom_left_text_1 = 'Aggregation Layers(Aggregates All Conversions)=0'
124
-
125
- bottom_left_text_2 = 'Potentiation Layer(Fully Connected)=1'
126
-
127
- if scaler_params is None:
128
- bottom_left_text = 'Standard Scaler=No'
129
- else:
130
- bottom_left_text = 'Standard Scaler=Yes'
131
-
132
- num_middle_axes = len(activation_potentiation)
133
-
134
- if style == 'detailed':
135
-
136
- col = 1
137
-
138
- elif style == 'basic':
139
96
 
140
- col = 2
97
+ # Calculate dimensions based on number of activation functions
98
+ num_activations = len(activation_potentiation)
99
+ input_groups = num_activations # Number of input groups equals number of activations
100
+ num_inputs = W.shape[1]
141
101
 
142
- fig, axes = plt.subplots(1, num_middle_axes + col, figsize=(5 * (num_middle_axes + 2), 5))
143
-
144
- fig.suptitle("Model Architecture", fontsize=16, fontweight='bold')
145
-
146
- for i, activation in enumerate(activation_potentiation):
147
- x = np.linspace(-100, 100, 100)
148
- translated_x_train = draw_activations(x, activation)
149
- y = translated_x_train
150
-
151
- axes[i].plot(x, y, color='b', markersize=6, linewidth=2, label='Activations Over Depth')
152
- axes[i].set_title(activation_potentiation[i])
153
-
154
- axes[i].spines['top'].set_visible(False)
155
- axes[i].spines['right'].set_visible(False)
156
- axes[i].spines['left'].set_visible(False)
157
- axes[i].spines['bottom'].set_visible(False)
158
- axes[i].get_xaxis().set_visible(False)
159
- axes[i].get_yaxis().set_visible(False)
102
+ # Create figure
103
+ fig = plt.figure(figsize=(15, 10))
104
+
105
+ # Calculate positions for nodes
106
+ def get_node_positions():
107
+ positions = {}
160
108
 
161
-
162
- if i < num_middle_axes - 1:
163
- axes[i].annotate('', xy=(1.05, 0.5), xytext=(0.95, 0.5),
164
- xycoords='axes fraction', textcoords='axes fraction',
165
- arrowprops=dict(arrowstyle="->", color='black', lw=1.5))
109
+ # Input layer positions
110
+ total_height = 0.8 # Maksimum dikey alan
111
+ group_height = total_height / input_groups # Her grup için ayrılan dikey alan
112
+ input_spacing = min(group_height / (num_inputs + 1), 0.1) # Her girdi arasındaki mesafe
113
+
114
+ for group in range(input_groups):
115
+ group_start_y = 0.9 - (group * group_height) # Grubun başlangıç y koordinatı
116
+ for i in range(num_inputs):
117
+ y_pos = group_start_y - ((i + 1) * input_spacing)
118
+ positions[f'input_{group}_{i}'] = (0.2, y_pos)
119
+
120
+ # Aggregation layer positions
121
+ agg_spacing = total_height / (num_inputs + 1)
122
+ for i in range(num_inputs):
123
+ positions[f'summed_{i}'] = (0.5, 0.9 - ((i + 1) * agg_spacing))
124
+
125
+ # Output layer positions
126
+ output_spacing = total_height / (W.shape[0] + 1)
127
+ for i in range(W.shape[0]):
128
+ positions[f'output_{i}'] = (0.8, 0.9 - ((i + 1) * output_spacing))
129
+
130
+ return positions
131
+
132
+ # Draw the network
133
+ pos = get_node_positions()
134
+
135
+ # Draw nodes
136
+ for group in range(input_groups):
137
+ # Draw input nodes
138
+ for i in range(num_inputs):
139
+ plt.plot(*pos[f'input_{group}_{i}'], 'o', color='lightgreen', markersize=20)
140
+ plt.text(pos[f'input_{group}_{i}'][0] - 0.05, pos[f'input_{group}_{i}'][1],
141
+ f'Input #{i+1} ({activation_potentiation[group]})', ha='right', va='center')
142
+
143
+ # Draw connections from input to summed input directly
144
+ plt.plot([pos[f'input_{group}_{i}'][0], pos[f'summed_{i}'][0]],
145
+ [pos[f'input_{group}_{i}'][1], pos[f'summed_{i}'][1]], 'k-')
146
+ # Draw aggregation nodes
147
+ if group == 0:
148
+ plt.plot(*pos[f'summed_{i}'], 'o', color='lightgreen', markersize=20)
149
+ plt.text(pos[f'summed_{i}'][0], pos[f'summed_{i}'][1] + 0.02,
150
+ f'Summed\nInput #{i+1}', ha='center', va='bottom')
151
+
152
+ # Draw output nodes and connections
153
+ for i in range(W.shape[0]):
154
+ plt.plot(*pos[f'output_{i}'], 'o', color='gold', markersize=20)
155
+ plt.text(pos[f'output_{i}'][0] + 0.05, pos[f'output_{i}'][1],
156
+ f'Output #{i+1}', ha='left', va='center', color='purple')
157
+
158
+ # Connect all aggregation nodes to each output
159
+ for group in range(num_inputs):
160
+ plt.plot([pos[f'summed_{group}'][0], pos[f'output_{i}'][0]],
161
+ [pos[f'summed_{group}'][1], pos[f'output_{i}'][1]], 'k-')
166
162
 
167
- if style == 'detailed':
163
+ # Add labels and annotations
164
+ plt.text(0.2, 0.95, 'Input Layer', ha='center', va='bottom', fontsize=12)
165
+ plt.text(0.5, 0.95, 'Aggregation\nLayer', ha='center', va='bottom', fontsize=12)
166
+ plt.text(0.8, 0.95, 'Output Layer', ha='center', va='bottom', fontsize=12)
168
167
 
169
- G = nx.Graph()
170
- draw_neural_web(W=W, ax=axes[num_middle_axes], G=G)
171
-
172
- elif style == 'basic':
173
-
174
- circle1 = plt.Circle((0.5, 0.5), 0.4, color='skyblue', ec='black', lw=1.5)
175
- axes[num_middle_axes].add_patch(circle1)
176
- axes[num_middle_axes].text(0.5, 0.5, text_1, ha='center', va='center', fontsize=12)
177
- axes[num_middle_axes].set_xlim(0, 1)
178
- axes[num_middle_axes].set_ylim(0, 1)
179
- axes[num_middle_axes].axis('off')
180
-
181
- circle2 = plt.Circle((0.5, 0.5), 0.4, color='lightcoral', ec='black', lw=1.5)
182
- axes[-1].add_patch(circle2)
183
- axes[-1].text(0.5, 0.5, text_2, ha='center', va='center', fontsize=12)
184
- axes[-1].set_xlim(0, 1)
185
- axes[-1].set_ylim(0, 1)
186
- axes[-1].axis('off')
187
-
188
-
189
- fig.text(0.01, 0, bottom_left_text, ha='left', va='bottom', fontsize=10)
190
- fig.text(0.01, 0.04, bottom_left_text_1, ha='left', va='bottom', fontsize=10)
191
- fig.text(0.01, 0.08, bottom_left_text_2, ha='left', va='bottom', fontsize=10)
168
+ # Remove axes
169
+ plt.axis('off')
170
+
171
+ # Add model information
172
+ if scaler_params is None:
173
+ plt.text(0.95, 0.05, 'Standard Scaler=No', fontsize=10, ha='right', va='bottom')
174
+ else:
175
+ plt.text(0.95, 0.05, 'Standard Scaler=Yes', fontsize=10, ha='right', va='bottom')
192
176
 
177
+ # Add model architecture title
178
+ plt.text(0.95, 0.1, f"PLAN Model Architecture: {model_name}", fontsize=12, ha='right', va='bottom', fontweight='bold')
193
179
  plt.tight_layout()
194
180
  plt.show()
195
-
181
+
196
182
 
197
183
  def draw_activations(x_train, activation):
198
184
 
@@ -332,10 +318,12 @@ def draw_activations(x_train, activation):
332
318
 
333
319
  elif activation == 'spiral':
334
320
  result = af.spiral_activation(x_train)
335
-
336
- return result
337
-
338
-
321
+
322
+ try: return result
323
+ except:
324
+ print('WARNING: error in drawing some activation.')
325
+ return x_train
326
+
339
327
  def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
340
328
 
341
329
  from .metrics import metrics, confusion_matrix, roc_curve
@@ -479,7 +467,7 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non
479
467
  np.arange(y_min, y_max, h))
480
468
 
481
469
  grid = np.c_[xx.ravel(), yy.ravel()]
482
- grid_full = np.zeros((grid.shape[0], x.shape[1]))
470
+ grid_full = np.zeros((grid.shape[0], x.shape[1]), dtype=np.float32)
483
471
  grid_full[:, feature_indices] = grid
484
472
 
485
473
  Z = [None] * len(grid_full)
@@ -487,7 +475,7 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non
487
475
  for i in range(len(grid_full)):
488
476
  Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
489
477
 
490
- Z = np.array(Z)
478
+ Z = np.array(Z, dtype=np.int32)
491
479
  Z = Z.reshape(xx.shape)
492
480
 
493
481
  if ax is None:
@@ -513,11 +501,11 @@ def plot_decision_boundary(x, y, activation_potentiation, W, artist=None, ax=Non
513
501
 
514
502
  except:
515
503
 
516
- art1_1 = ax[0].contourf(xx, yy, Z, alpha=0.8)
517
- art1_2 = ax[0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
518
- ax[0].set_xlabel(f'Feature {0 + 1}')
519
- ax[0].set_ylabel(f'Feature {1 + 1}')
520
- ax[0].set_title('Decision Boundary')
504
+ art1_1 = ax.contourf(xx, yy, Z, alpha=0.8)
505
+ art1_2 = ax.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
506
+ ax.set_xlabel(f'Feature {0 + 1}')
507
+ ax.set_ylabel(f'Feature {1 + 1}')
508
+ ax.set_title('Decision Boundary')
521
509
 
522
510
 
523
511
  return art1_1, art1_2
@@ -558,7 +546,7 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
558
546
  if y_preds[i] == cls:
559
547
  class_points.append(X_pca[i])
560
548
 
561
- class_points = np.array(class_points)
549
+ class_points = np.array(class_points, dtype=y.dtype)
562
550
 
563
551
 
564
552
  if len(class_points) > 2:
@@ -573,8 +561,8 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
573
561
 
574
562
  plt.draw()
575
563
 
576
-
577
- def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=False, loss=False):
564
+
565
+ def update_neuron_history_for_learner(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=False, loss=False):
578
566
 
579
567
  for j in range(len(class_count)):
580
568
 
@@ -601,15 +589,40 @@ def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=Fa
601
589
 
602
590
  return artist5
603
591
 
592
+ def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=False, loss=False):
593
+
594
+ for j in range(class_count):
595
+
596
+ if acc != False and loss != False:
597
+ suptitle_info = ' Accuracy:' + str(acc) + '\n' + '\nNeurons Memory:'
598
+ else:
599
+ suptitle_info = 'Neurons Memory:'
600
+
601
+ mat = LTPW[j,:].reshape(row, col)
602
+
603
+ title_info = f'{j+1}. Neuron'
604
+
605
+ art5 = ax1[j].imshow(mat, interpolation='sinc', cmap='viridis')
606
+
607
+ ax1[j].set_aspect('equal')
608
+ ax1[j].set_xticks([])
609
+ ax1[j].set_yticks([])
610
+ ax1[j].set_title(title_info)
611
+
612
+
613
+ artist5.append([art5])
614
+
615
+ fig1.suptitle(suptitle_info, fontsize=16)
616
+
604
617
 
605
618
  def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
606
619
  """Initializes the visualization setup based on the parameters."""
607
620
  from .data_operations import find_closest_factors
608
621
  visualization_objects = {}
609
622
 
610
- if show_training:
623
+ if show_training or neurons_history:
611
624
  if not val:
612
- raise ValueError("For showing training, 'val' parameter must be True.")
625
+ raise ValueError("For showing training or neurons history, 'val' parameter must be True.")
613
626
 
614
627
  G = nx.Graph()
615
628
  fig, ax = plt.subplots(2, 2)
@@ -626,7 +639,7 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
626
639
 
627
640
  if neurons_history:
628
641
  row, col = find_closest_factors(len(x_train[0]))
629
- fig1, ax1 = plt.subplots(1, len(set(y_train)), figsize=(18, 14))
642
+ fig1, ax1 = plt.subplots(1, len(y_train[0]), figsize=(18, 14))
630
643
  visualization_objects.update({
631
644
  'fig1': fig1,
632
645
  'ax1': ax1,
@@ -637,6 +650,16 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
637
650
 
638
651
  return visualization_objects
639
652
 
653
+
654
+
655
+ def update_neural_web_for_fit(W, ax, G, artist):
656
+ """
657
+ The function `update_neural_web_for_fit` updates a neural web visualization for fitting.
658
+ """
659
+ art5_1, art5_2, art5_3 = draw_neural_web(W=W, ax=ax, G=G, return_objs=True)
660
+ art5_list = [art5_1] + [art5_2] + list(art5_3.values())
661
+ artist.append(art5_list)
662
+
640
663
 
641
664
  def update_weight_visualization_for_fit(ax, LTPW, artist2):
642
665
  """Updates the weight visualization plot."""
@@ -673,11 +696,12 @@ def update_validation_history_for_fit(ax, val_list, artist3):
673
696
  def display_visualization_for_fit(fig, artist_list, interval):
674
697
  """Displays the animation for the given artist list."""
675
698
  ani = ArtistAnimation(fig, artist_list, interval=interval, blit=True)
699
+ return ani
700
+
701
+ def show():
676
702
  plt.tight_layout()
677
703
  plt.show()
678
704
 
679
-
680
-
681
705
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
682
706
  """Initialize all visualization components"""
683
707
  from .data_operations import find_closest_factors
@@ -765,7 +789,7 @@ def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc
765
789
  if 'neurons' in viz_objects:
766
790
  neurons = viz_objects['neurons']
767
791
  for _ in range(10):
768
- neurons['artists'] = neuron_history(
792
+ neurons['artists'] = update_neuron_history_for_learner(
769
793
  np.copy(best_weights),
770
794
  neurons['ax'],
771
795
  neurons['row'],