pyerualjetwork 4.1.5__py3-none-any.whl → 4.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,7 @@
1
1
  import networkx as nx
2
2
  import matplotlib.pyplot as plt
3
3
  import cupy as cp
4
+ import numpy as np
4
5
  from scipy.spatial import ConvexHull
5
6
  import seaborn as sns
6
7
  from matplotlib.animation import ArtistAnimation
@@ -31,7 +32,7 @@ def draw_neural_web(W, ax, G, return_objs=False):
31
32
  art1, art2, art3 = draw_neural_web(W, ax, G, return_objs=True)
32
33
  plt.show()
33
34
  """
34
-
35
+ W = W.get()
35
36
  for i in range(W.shape[0]):
36
37
  for j in range(W.shape[1]):
37
38
  if W[i, j] != 0:
@@ -40,6 +41,7 @@ def draw_neural_web(W, ax, G, return_objs=False):
40
41
  edges = G.edges(data=True)
41
42
  weights = [edata['ltpw'] for _, _, edata in edges]
42
43
  pos = {}
44
+
43
45
  num_motor_neurons = W.shape[0]
44
46
  num_sensory_neurons = W.shape[1]
45
47
 
@@ -68,128 +70,114 @@ def draw_neural_web(W, ax, G, return_objs=False):
68
70
  return art1, art2, art3
69
71
 
70
72
 
71
- def draw_model_architecture(model_name, model_path='', style='basic'):
73
+ def draw_model_architecture(model_name, model_path=''):
74
+ """
75
+ The `draw_model_architecture` function visualizes the architecture of a neural network model with
76
+ multiple inputs based on activation functions.
77
+
78
+ :param model_name: The `model_name` parameter in the `draw_model_architecture` function is used to
79
+ specify the name of the neural network model whose architecture you want to visualize. This function
80
+ visualizes the architecture of a neural network model with multiple inputs based on activation
81
+ functions
82
+ :param model_path: The `model_path` parameter in the `draw_model_architecture` function is used to
83
+ specify the path where the neural network model is saved. If the model is saved in a specific
84
+ directory or file location, you can provide that path as a string when calling the function. If the
85
+ model is saved
72
86
  """
73
- Visualizes the architecture of a neural network model.
74
-
75
- Parameters
76
- ----------
77
- model_name : str
78
- The name of the model to be visualized, which will be displayed in the title or label.
79
-
80
- model_path : str
81
- The file path to the model, from which the architecture is loaded. Default is ''
82
-
83
- style : str, optional
84
- The style of the visualization.
85
- Options:
86
- - 'basic': Displays a simplified view of the model architecture.
87
- - 'detailed': Shows a more comprehensive view, including layer details and parameters.
88
- Default is 'basic'.
89
-
90
- Returns
91
- -------
92
- None
93
- Draws and displays the architecture of the specified model.
94
-
95
-
96
- Examples
97
- --------
98
- >>> draw_model_architecture("MyModel", "path/to/model", style='detailed')
99
87
  """
100
- from .plan_cuda import get_scaler, get_act_pot, get_weights
101
- from .model_operations_cuda import load_model
88
+ Visualizes the architecture of a neural network model with multiple inputs based on activation functions.
89
+ """
90
+
91
+ from .model_operations_cuda import load_model, get_scaler, get_act_pot, get_weights
102
92
 
103
93
  model = load_model(model_name=model_name, model_path=model_path)
104
94
 
105
95
  W = model[get_weights()]
106
96
  activation_potentiation = model[get_act_pot()]
107
97
  scaler_params = model[get_scaler()]
108
-
109
- text_1 = f"Input Shape:\n{W.shape[1]}"
110
- text_2 = f"Output Shape:\n{W.shape[0]}"
111
-
112
- if scaler_params is None:
113
- bottom_left_text = 'Standard Scaler=No'
114
- else:
115
- bottom_left_text = 'Standard Scaler=Yes'
116
-
117
- if len(activation_potentiation) != 1 or (len(activation_potentiation) == 1 and activation_potentiation[0] != 'linear'):
118
-
119
- bottom_left_text_1 = f'Aggregation Layers(Aggregates All Conversions)={len(activation_potentiation)}'
120
-
121
- else:
122
-
123
- bottom_left_text_1 = 'Aggregation Layers(Aggregates All Conversions)=0'
124
-
125
- bottom_left_text_2 = 'Potentiation Layer(Fully Connected)=1'
126
-
127
- if scaler_params is None:
128
- bottom_left_text = 'Standard Scaler=No'
129
- else:
130
- bottom_left_text = 'Standard Scaler=Yes'
131
-
132
- num_middle_axes = len(activation_potentiation)
133
-
134
- if style == 'detailed':
135
-
136
- col = 1
137
-
138
- elif style == 'basic':
139
98
 
140
- col = 2
99
+ # Calculate dimensions based on number of activation functions
100
+ num_activations = len(activation_potentiation)
101
+ input_groups = num_activations # Number of input groups equals number of activations
102
+ num_inputs = W.shape[1]
141
103
 
142
- fig, axes = plt.subplots(1, num_middle_axes + col, figsize=(5 * (num_middle_axes + 2), 5))
143
-
144
- fig.suptitle("Model Architecture", fontsize=16, fontweight='bold')
145
-
146
- for i, activation in enumerate(activation_potentiation):
147
- x = cp.linspace(-100, 100, 100)
148
- translated_x_train = draw_activations(x, activation)
149
- y = translated_x_train
150
-
151
- axes[i].plot(x, y, color='b', markersize=6, linewidth=2, label='Activations Over Depth')
152
- axes[i].set_title(activation_potentiation[i])
153
-
154
- axes[i].spines['top'].set_visible(False)
155
- axes[i].spines['right'].set_visible(False)
156
- axes[i].spines['left'].set_visible(False)
157
- axes[i].spines['bottom'].set_visible(False)
158
- axes[i].get_xaxis().set_visible(False)
159
- axes[i].get_yaxis().set_visible(False)
104
+ # Create figure
105
+ fig = plt.figure(figsize=(15, 10))
106
+
107
+ # Calculate positions for nodes
108
+ def get_node_positions():
109
+ positions = {}
160
110
 
161
-
162
- if i < num_middle_axes - 1:
163
- axes[i].annotate('', xy=(1.05, 0.5), xytext=(0.95, 0.5),
164
- xycoords='axes fraction', textcoords='axes fraction',
165
- arrowprops=dict(arrowstyle="->", color='black', lw=1.5))
111
+ # Input layer positions
112
+ total_height = 0.8 # Maksimum dikey alan
113
+ group_height = total_height / input_groups # Her grup için ayrılan dikey alan
114
+ input_spacing = min(group_height / (num_inputs + 1), 0.1) # Her girdi arasındaki mesafe
115
+
116
+ for group in range(input_groups):
117
+ group_start_y = 0.9 - (group * group_height) # Grubun başlangıç y koordinatı
118
+ for i in range(num_inputs):
119
+ y_pos = group_start_y - ((i + 1) * input_spacing)
120
+ positions[f'input_{group}_{i}'] = (0.2, y_pos)
121
+
122
+ # Aggregation layer positions
123
+ agg_spacing = total_height / (num_inputs + 1)
124
+ for i in range(num_inputs):
125
+ positions[f'summed_{i}'] = (0.5, 0.9 - ((i + 1) * agg_spacing))
126
+
127
+ # Output layer positions
128
+ output_spacing = total_height / (W.shape[0] + 1)
129
+ for i in range(W.shape[0]):
130
+ positions[f'output_{i}'] = (0.8, 0.9 - ((i + 1) * output_spacing))
131
+
132
+ return positions
133
+
134
+ # Draw the network
135
+ pos = get_node_positions()
136
+
137
+ # Draw nodes
138
+ for group in range(input_groups):
139
+ # Draw input nodes
140
+ for i in range(num_inputs):
141
+ plt.plot(*pos[f'input_{group}_{i}'], 'o', color='lightgreen', markersize=20)
142
+ plt.text(pos[f'input_{group}_{i}'][0] - 0.05, pos[f'input_{group}_{i}'][1],
143
+ f'Input #{i+1} ({activation_potentiation[group]})', ha='right', va='center')
144
+
145
+ # Draw connections from input to summed input directly
146
+ plt.plot([pos[f'input_{group}_{i}'][0], pos[f'summed_{i}'][0]],
147
+ [pos[f'input_{group}_{i}'][1], pos[f'summed_{i}'][1]], 'k-')
148
+ # Draw aggregation nodes
149
+ if group == 0:
150
+ plt.plot(*pos[f'summed_{i}'], 'o', color='lightgreen', markersize=20)
151
+ plt.text(pos[f'summed_{i}'][0], pos[f'summed_{i}'][1] + 0.02,
152
+ f'Summed\nInput #{i+1}', ha='center', va='bottom')
153
+
154
+ # Draw output nodes and connections
155
+ for i in range(W.shape[0]):
156
+ plt.plot(*pos[f'output_{i}'], 'o', color='gold', markersize=20)
157
+ plt.text(pos[f'output_{i}'][0] + 0.05, pos[f'output_{i}'][1],
158
+ f'Output #{i+1}', ha='left', va='center', color='purple')
159
+
160
+ # Connect all aggregation nodes to each output
161
+ for group in range(num_inputs):
162
+ plt.plot([pos[f'summed_{group}'][0], pos[f'output_{i}'][0]],
163
+ [pos[f'summed_{group}'][1], pos[f'output_{i}'][1]], 'k-')
166
164
 
167
- if style == 'detailed':
165
+ # Add labels and annotations
166
+ plt.text(0.2, 0.95, 'Input Layer', ha='center', va='bottom', fontsize=12)
167
+ plt.text(0.5, 0.95, 'Aggregation\nLayer', ha='center', va='bottom', fontsize=12)
168
+ plt.text(0.8, 0.95, 'Output Layer', ha='center', va='bottom', fontsize=12)
168
169
 
169
- G = nx.Graph()
170
- draw_neural_web(W=W, ax=axes[num_middle_axes], G=G)
171
-
172
- elif style == 'basic':
173
-
174
- circle1 = plt.Circle((0.5, 0.5), 0.4, color='skyblue', ec='black', lw=1.5)
175
- axes[num_middle_axes].add_patch(circle1)
176
- axes[num_middle_axes].text(0.5, 0.5, text_1, ha='center', va='center', fontsize=12)
177
- axes[num_middle_axes].set_xlim(0, 1)
178
- axes[num_middle_axes].set_ylim(0, 1)
179
- axes[num_middle_axes].axis('off')
180
-
181
- circle2 = plt.Circle((0.5, 0.5), 0.4, color='lightcoral', ec='black', lw=1.5)
182
- axes[-1].add_patch(circle2)
183
- axes[-1].text(0.5, 0.5, text_2, ha='center', va='center', fontsize=12)
184
- axes[-1].set_xlim(0, 1)
185
- axes[-1].set_ylim(0, 1)
186
- axes[-1].axis('off')
187
-
188
-
189
- fig.text(0.01, 0, bottom_left_text, ha='left', va='bottom', fontsize=10)
190
- fig.text(0.01, 0.04, bottom_left_text_1, ha='left', va='bottom', fontsize=10)
191
- fig.text(0.01, 0.08, bottom_left_text_2, ha='left', va='bottom', fontsize=10)
170
+ # Remove axes
171
+ plt.axis('off')
172
+
173
+ # Add model information
174
+ if scaler_params is None:
175
+ plt.text(0.95, 0.05, 'Standard Scaler=No', fontsize=10, ha='right', va='bottom')
176
+ else:
177
+ plt.text(0.95, 0.05, 'Standard Scaler=Yes', fontsize=10, ha='right', va='bottom')
192
178
 
179
+ # Add model architecture title
180
+ plt.text(0.95, 0.1, f"PLAN Model Architecture: {model_name}", fontsize=12, ha='right', va='bottom', fontweight='bold')
193
181
  plt.tight_layout()
194
182
  plt.show()
195
183
 
@@ -348,8 +336,8 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
348
336
  acc = acc_list[len(acc_list) - 1]
349
337
  y_true = decode_one_hot(y_test)
350
338
 
351
- y_true = cp.array(y_true, copy=False)
352
- y_preds = cp.array(y_preds, copy=False)
339
+ y_true = cp.array(y_true, copy=True)
340
+ y_preds = cp.array(y_preds, copy=True)
353
341
  Class = cp.unique(decode_one_hot(y_test))
354
342
 
355
343
  precision, recall, f1 = metrics(y_test, y_preds)
@@ -568,20 +556,20 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
568
556
  plt.draw()
569
557
 
570
558
 
571
- def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=False, loss=False):
559
+ def update_neuron_history(LTPW, ax1, row, col, class_count, artist5, fig1, acc=False, loss=False):
572
560
 
573
- for j in range(len(class_count)):
561
+ for j in range(class_count):
574
562
 
575
563
  if acc != False and loss != False:
576
- suptitle_info = data + ' Accuracy:' + str(acc) + '\n' + data + ' Loss:' + str(loss) + '\nNeurons Memory:'
564
+ suptitle_info = ' Accuracy:' + str(acc) + '\n' + '\nNeurons Memory:'
577
565
  else:
578
566
  suptitle_info = 'Neurons Memory:'
579
567
 
580
- mat = LTPW[j,:].reshape(row, col)
568
+ mat = LTPW[j,:].reshape(row, col).get()
581
569
 
582
570
  title_info = f'{j+1}. Neuron'
583
571
 
584
- art5 = ax1[j].imshow(mat.get(), interpolation='sinc', cmap='viridis')
572
+ art5 = ax1[j].imshow(mat, interpolation='sinc', cmap='viridis')
585
573
 
586
574
  ax1[j].set_aspect('equal')
587
575
  ax1[j].set_xticks([])
@@ -593,17 +581,15 @@ def neuron_history(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=Fa
593
581
 
594
582
  fig1.suptitle(suptitle_info, fontsize=16)
595
583
 
596
- return artist5
597
-
598
584
 
599
585
  def initialize_visualization_for_fit(val, show_training, neurons_history, x_train, y_train):
600
586
  """Initializes the visualization setup based on the parameters."""
601
587
  from .data_operations_cuda import find_closest_factors
602
588
  visualization_objects = {}
603
589
 
604
- if show_training:
590
+ if show_training or neurons_history:
605
591
  if not val:
606
- raise ValueError("For showing training, 'val' parameter must be True.")
592
+ raise ValueError("For showing training or neurons history, 'val' parameter must be True.")
607
593
 
608
594
  G = nx.Graph()
609
595
  fig, ax = plt.subplots(2, 2)
@@ -620,7 +606,7 @@ def initialize_visualization_for_fit(val, show_training, neurons_history, x_trai
620
606
 
621
607
  if neurons_history:
622
608
  row, col = find_closest_factors(len(x_train[0]))
623
- fig1, ax1 = plt.subplots(1, len(set(y_train)), figsize=(18, 14))
609
+ fig1, ax1 = plt.subplots(1, len(y_train[0]), figsize=(18, 14))
624
610
  visualization_objects.update({
625
611
  'fig1': fig1,
626
612
  'ax1': ax1,
@@ -637,7 +623,18 @@ def update_weight_visualization_for_fit(ax, LTPW, artist2):
637
623
  art2 = ax.imshow(LTPW.get(), interpolation='sinc', cmap='viridis')
638
624
  artist2.append([art2])
639
625
 
626
+ def show():
627
+ plt.tight_layout()
628
+ plt.show()
640
629
 
630
+ def update_neural_web_for_fit(W, ax, G, artist):
631
+ """
632
+ The function `update_neural_web_for_fit` updates a neural web visualization for fitting.
633
+ """
634
+ art5_1, art5_2, art5_3 = draw_neural_web(W=W, ax=ax, G=G, return_objs=True)
635
+ art5_list = [art5_1] + [art5_2] + list(art5_3.values())
636
+ artist.append(art5_list)
637
+
641
638
  def update_decision_boundary_for_fit(ax, x_val, y_val, activation_potentiation, LTPW, artist1):
642
639
  """Updates the decision boundary visualization."""
643
640
  art1_1, art1_2 = plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW, artist=artist1, ax=ax)
@@ -646,10 +643,13 @@ def update_decision_boundary_for_fit(ax, x_val, y_val, activation_potentiation,
646
643
 
647
644
  def update_validation_history_for_fit(ax, val_list, artist3):
648
645
  """Updates the validation accuracy history plot."""
649
- period = list(range(1, len(val_list) + 1))
646
+ val_list_cpu = []
647
+ for i in range(len(val_list)):
648
+ val_list_cpu.append(val_list[i].get())
649
+ period = list(range(1, len(val_list_cpu) + 1))
650
650
  art3 = ax.plot(
651
651
  period,
652
- val_list,
652
+ val_list_cpu,
653
653
  linestyle='--',
654
654
  color='g',
655
655
  marker='o',
@@ -667,10 +667,34 @@ def update_validation_history_for_fit(ax, val_list, artist3):
667
667
  def display_visualization_for_fit(fig, artist_list, interval):
668
668
  """Displays the animation for the given artist list."""
669
669
  ani = ArtistAnimation(fig, artist_list, interval=interval, blit=True)
670
- plt.tight_layout()
671
- plt.show()
670
+ return ani
672
671
 
672
+ def update_neuron_history_for_learner(LTPW, ax1, row, col, class_count, artist5, data, fig1, acc=False, loss=False):
673
673
 
674
+ for j in range(len(class_count)):
675
+
676
+ if acc != False and loss != False:
677
+ suptitle_info = data + ' Accuracy:' + str(acc) + '\n' + data + ' Loss:' + str(loss) + '\nNeurons Memory:'
678
+ else:
679
+ suptitle_info = 'Neurons Memory:'
680
+
681
+ mat = LTPW[j,:].reshape(row, col)
682
+
683
+ title_info = f'{j+1}. Neuron'
684
+
685
+ art5 = ax1[j].imshow(mat.get(), interpolation='sinc', cmap='viridis')
686
+
687
+ ax1[j].set_aspect('equal')
688
+ ax1[j].set_xticks([])
689
+ ax1[j].set_yticks([])
690
+ ax1[j].set_title(title_info)
691
+
692
+
693
+ artist5.append([art5])
694
+
695
+ fig1.suptitle(suptitle_info, fontsize=16)
696
+
697
+ return artist5
674
698
 
675
699
  def initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train):
676
700
  """Initialize all visualization components"""
@@ -720,13 +744,19 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
720
744
  return
721
745
 
722
746
  hist = viz_objects['history']
723
-
747
+ for i in range(len(loss_list)):
748
+ loss_list[i] = loss_list[i].get()
749
+
724
750
  # Loss plot
725
- art1 = hist['ax'][0].plot(depth_list.get(), loss_list.get(), color='r', markersize=6, linewidth=2)
751
+ art1 = hist['ax'][0].plot(depth_list, loss_list, color='r', markersize=6, linewidth=2)
726
752
  hist['ax'][0].set_title('Test Loss Over Depth')
727
753
  hist['artist1'].append(art1)
728
754
 
729
755
  # Accuracy plot
756
+
757
+ for i in range(len(best_acc_per_depth_list)):
758
+ best_acc_per_depth_list[i] = best_acc_per_depth_list[i].get()
759
+
730
760
  art2 = hist['ax'][1].plot(depth_list, best_acc_per_depth_list, color='g', markersize=6, linewidth=2)
731
761
  hist['ax'][1].set_title('Test Accuracy Over Depth')
732
762
  hist['artist2'].append(art2)
@@ -737,7 +767,7 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
737
767
  for activation in final_activations:
738
768
  translated_x_train += draw_activations(x, activation)
739
769
 
740
- art3 = hist['ax'][2].plot(x, translated_x_train, color='b', markersize=6, linewidth=2)
770
+ art3 = hist['ax'][2].plot(x.get(), translated_x_train.get(), color='b', markersize=6, linewidth=2)
741
771
  hist['ax'][2].set_title('Potentiation Shape Over Depth')
742
772
  hist['artist3'].append(art3)
743
773
 
@@ -759,8 +789,8 @@ def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc
759
789
  if 'neurons' in viz_objects:
760
790
  neurons = viz_objects['neurons']
761
791
  for _ in range(10):
762
- neurons['artists'] = neuron_history(
763
- cp.copy(best_weights),
792
+ neurons['artists'] = update_neuron_history_for_learner(
793
+ cp.copy(best_weights),
764
794
  neurons['ax'],
765
795
  neurons['row'],
766
796
  neurons['col'],
@@ -1,16 +1,21 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.1.5
3
+ Version: 4.1.6
4
4
  Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classification,potentiation learning artificial neural networks,NEAT,genetic algorithms,reinforcement learning,neural networks
8
8
  Description-Content-Type: text/markdown
9
9
 
10
- # PyerualJetwork [![Socket Badge](https://socket.dev/api/badge/pypi/package/pyerualjetwork/4.0.6?artifact_id=tar-gz)](https://socket.dev/pypi/package/pyerualjetwork/overview/4.0.6/tar-gz) [![CodeFactor](https://www.codefactor.io/repository/github/hcb06/pyerualjetwork/badge)](https://www.codefactor.io/repository/github/hcb06/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/anaplan)](https://pepy.tech/projects/anaplan) + [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork)](https://pepy.tech/projects/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork/month)](https://pepy.tech/projects/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork/week)](https://pepy.tech/projects/pyerualjetwork) [![PyPI version](https://img.shields.io/pypi/v/pyerualjetwork.svg)](https://pypi.org/project/pyerualjetwork/)
10
+ # PyerualJetwork [![Socket Badge](https://socket.dev/api/badge/pypi/package/pyerualjetwork/4.0.6?artifact_id=tar-gz)](https://socket.dev/pypi/package/pyerualjetwork/overview/4.0.6/tar-gz) [![CodeFactor](https://www.codefactor.io/repository/github/hcb06/pyerualjetwork/badge)](https://www.codefactor.io/repository/github/hcb06/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork)](https://pepy.tech/projects/pyerualjetwork) + [![PyPI Downloads](https://static.pepy.tech/badge/anaplan)](https://pepy.tech/projects/anaplan)
11
+
12
+
13
+ [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork/month)](https://pepy.tech/projects/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork/week)](https://pepy.tech/projects/pyerualjetwork) [![PyPI version](https://img.shields.io/pypi/v/pyerualjetwork.svg)](https://pypi.org/project/pyerualjetwork/)
11
14
 
12
15
  Note: anaplan old name of pyerualjetwork
13
16
 
17
+ ![PyerualJetwork](https://github.com/HCB06/PyerualJetwork/blob/main/Media/pyerualjetwork_with_name.png)<br><br><br>
18
+
14
19
  Libraries.io Page: https://libraries.io/pypi/pyerualjetwork
15
20
 
16
21
  PyPi Page: https://pypi.org/project/pyerualjetwork/
@@ -33,7 +38,7 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
33
38
  Optimized for Visual Studio Code
34
39
 
35
40
  requires=[
36
- 'setuptools==75.6.0'
41
+ 'setuptools==75.6.0',
37
42
  'scipy==1.13.1',
38
43
  'tqdm==4.66.4',
39
44
  'seaborn==0.13.2',
@@ -42,18 +47,19 @@ GitHub Page: https://github.com/HCB06/PyerualJetwork
42
47
  'numpy==1.26.4',
43
48
  'matplotlib==3.9.0',
44
49
  'colorama==0.4.6',
45
- 'cupy-cuda12x'
50
+ 'cupy-cuda12x',
51
+ 'psutil==6.1.1'
46
52
  ]
47
53
 
48
54
  matplotlib, seaborn, networkx (optional).
49
- PyerualJetwork checks and install all dependencies (with optional ones but except cupy) for every runing.
55
+ PyerualJetwork checks and install all dependencies with optional ones for every runing.
50
56
  If your version is higher or lower, PyerualJetwork automaticly delete other versions and installs this versions.
51
57
 
52
58
  ##############################
53
59
 
54
60
  ABOUT PYERUALJETWORK:
55
61
 
56
- PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques. Its most important component is the PLAN (Potentiation Learning Artificial Neural Network) https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4862342. (THIS ARTICLE IS FIRST VERSION OF PLAN.) MODERN VERSION OF PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
62
+ PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques with optimized GPU acceleration. Its most important component is the PLAN (Potentiation Learning Artificial Neural Network) https://papers.ssrn.com/sol3/papers.cfm?abstract_id=4862342. (THIS ARTICLE IS FIRST VERSION OF PLAN.) MODERN VERSION OF PLAN: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
57
63
  Both the PLAN algorithm and the PyerualJetwork library were created by Author, and all rights are reserved by Author.
58
64
  PyerualJetwork is free to use for commercial business and individual users. PyerualJetwork is written in fully functional programming with non-oop elements. PyerualJetwork consists of many functions that complement each other, which facilitates the learning process and debugging during use.
59
65
  As of 12/21/2024, the library includes PLAN and PLANEAT module, but other machine learning modules are expected to be added in the future.
@@ -66,8 +72,6 @@ fit function only fits given training data(suitable for dynamic graph) but learn
66
72
 
67
73
  PyerualJetworket includes Plan Vision, NLPlan, PLANEAT and at the between of both, Deep Plan.<br>
68
74
 
69
- ![PyerualJetwork](https://github.com/HCB06/PyerualJetwork/blob/main/Media/anaplanet_logo_final.png)<br><br><br>
70
-
71
75
  PLAN VISION:<br>
72
76
 
73
77
  ![PLAN VISION](https://github.com/HCB06/PyerualJetwork/blob/main/Media/PlanVision.jpg)
@@ -91,8 +95,20 @@ PLANEAT:<br>
91
95
  You can create artificial intelligence models that perform reinforcement learning tasks and genetic optimization tasks using the planeat module:
92
96
 
93
97
  ![PLANEAT](https://github.com/HCB06/PyerualJetwork/blob/main/Media/PLANEAT_1.gif)<br>
94
-
95
98
  ![PLANEAT](https://github.com/HCB06/PyerualJetwork/blob/main/Media/PLANEAT_2.gif)<br>
99
+ ![PLANEAT](https://github.com/HCB06/PyerualJetwork/blob/main/Media/mario.gif)<br><br>
100
+
101
+ YOU CAN CREATE DYNAMIC ANIMATIONS OF YOUR MODELS
102
+
103
+ ![VISUALIZATIONS](https://github.com/HCB06/PyerualJetwork/blob/main/Media/fit_history.gif)<br>
104
+ ![VISUALIZATIONS](https://github.com/HCB06/PyerualJetwork/blob/main/Media/neuron_history.gif)<br>
105
+ ![VISUALIZATIONS](https://github.com/HCB06/PyerualJetwork/blob/main/Media/neural_web.gif)<br>
106
+
107
+ YOU CAN CREATE AND VISUALIZE YOUR MODEL ARCHITECTURE
108
+
109
+ ![VISUALIZATIONS](https://github.com/HCB06/PyerualJetwork/blob/main/Media/model_arc.png)<br>
110
+ ![VISUALIZATIONS](https://github.com/HCB06/PyerualJetwork/blob/main/Media/eval_metrics.png)<br>
111
+
96
112
 
97
113
 
98
114
  HOW DO I IMPORT IT TO MY PROJECT?
@@ -0,0 +1,24 @@
1
+ pyerualjetwork/__init__.py,sha256=47FoEu3nH5W85OrbFZXsfegly9PKg-oHIAk82AvVCKE,2450
2
+ pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
+ pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
+ pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
5
+ pyerualjetwork/data_operations_cuda.py,sha256=IrLQkyf5FNNy4kfFcYDToueRnMDdXk7W4ufzpgwxA4k,17267
6
+ pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
7
+ pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
+ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
+ pyerualjetwork/memory_operations.py,sha256=g_DU1g_Xx8BXZ253CV_DvhHI65cXaLNT4iBhlPuPN_w,13487
10
+ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
+ pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
+ pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
13
+ pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
+ pyerualjetwork/plan.py,sha256=ZadbCULBnfd8yrE21-shzifnILzQPZ9jEy6amQxuuvw,35251
15
+ pyerualjetwork/plan_cuda.py,sha256=y1YoZQCSXGyLduG-IdcSPk2DPMAYG5G2pOfDefRZw0w,36287
16
+ pyerualjetwork/planeat.py,sha256=6uEcCF4bV1_W1aQUTKQjfnDgWp6rP2oluKFo5Y37k7o,39517
17
+ pyerualjetwork/planeat_cuda.py,sha256=GXYt_00rDKkDKJrhjE8hHOtu4U_pQZM1yZ6XrMpQo2c,39574
18
+ pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
+ pyerualjetwork/visualizations.py,sha256=9naPYMQKpkMcP_GEaBK90FEZAlImT_f-lgRqVCwvcb8,28660
20
+ pyerualjetwork/visualizations_cuda.py,sha256=blOM-VQnAT_qzM3i_OWjL5C1qnUtYctEvja-a_X4Z0w,29085
21
+ pyerualjetwork-4.1.6.dist-info/METADATA,sha256=xRiAQOkHwFGtNVJDRHGgGS6KbFbWm8B3C2dI-dP8GUM,7793
22
+ pyerualjetwork-4.1.6.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.1.6.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.1.6.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- pyerualjetwork/__init__.py,sha256=r2aXRTfYuAEBNIxvSqtamyIilgdZd-SwHfhtoDNQ8VI,2533
2
- pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
- pyerualjetwork/activation_functions_cuda.py,sha256=7U69VfwAIE8STUng2zEwPPQES9NgnkAXsDtVh-EzaZE,11803
4
- pyerualjetwork/data_operations.py,sha256=2julEScuHsL_ueeJ-JE3hiqw3wibZQW_L2bwwdoXTN0,16552
5
- pyerualjetwork/data_operations_cuda.py,sha256=fokG05P61J2bBMkkEh6jFMrsl6WqM-Twcy7nA5-2Fg0,18779
6
- pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
7
- pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
- pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
- pyerualjetwork/memory_operations.py,sha256=D0LasqfnUkz1zOiGs68VwDrfW3xOELAhJRc1WxH7Zsw,8785
10
- pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
- pyerualjetwork/metrics_cuda.py,sha256=Hz4PCeE5GcVUllZdsgXXdIw-UNqUVpqNxMIlPBNTSKY,5069
12
- pyerualjetwork/model_operations.py,sha256=eWYiYlXYZzsRgVfF-4CFvjCHaZOGB2378evre8yCzYk,13084
13
- pyerualjetwork/model_operations_cuda.py,sha256=1082RJ-b8PS9g3VV8NIE0E7MepkMSJzC6uJWbcrHcWw,13407
14
- pyerualjetwork/plan.py,sha256=MNXCFZ7zaIsdveKKopJL1DGQh1MGxwrCat0_r0S6hbo,34346
15
- pyerualjetwork/plan_cuda.py,sha256=uMJh-mmkmvDFw5jKOJvlRPRn_w3ybLD2WE6at4Okigs,33976
16
- pyerualjetwork/planeat.py,sha256=6uEcCF4bV1_W1aQUTKQjfnDgWp6rP2oluKFo5Y37k7o,39517
17
- pyerualjetwork/planeat_cuda.py,sha256=GXYt_00rDKkDKJrhjE8hHOtu4U_pQZM1yZ6XrMpQo2c,39574
18
- pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
- pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
20
- pyerualjetwork/visualizations_cuda.py,sha256=LkVWyI9ihSl0lGMEr82DjTfNQPfVB2QNObKnJI32QKA,26783
21
- pyerualjetwork-4.1.5.dist-info/METADATA,sha256=A2eSYvdlJB1poeQl5uIzMgCNpX5zfgjmpxfAs3hWr6E,7066
22
- pyerualjetwork-4.1.5.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.1.5.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.1.5.dist-info/RECORD,,