pyerualjetwork 3.2.0__py3-none-any.whl → 3.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -18,43 +18,49 @@ from scipy.spatial import ConvexHull
18
18
  from datetime import datetime
19
19
  from scipy import io
20
20
  import scipy.io as sio
21
+ from matplotlib.animation import ArtistAnimation
22
+ import networkx as nx
21
23
 
22
24
  # BUILD -----
23
25
 
24
26
 
25
27
  def fit(
26
28
  x_train: List[Union[int, float]],
27
- y_train: List[Union[int, float]], # At least two.. and one hot encoded
29
+ y_train: List[Union[int, float]], # One hot encoded
28
30
  val= None,
29
31
  val_count = None,
30
32
  activation_potentiation=[None], # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
31
33
  x_val= None,
32
34
  y_val= None,
33
35
  show_training = None,
34
- show_count= None, # [DISABLED]
35
- visible_layer=None # For future [DISABLED]
36
+ visible_layer=None, # For the future [DISABLED]
37
+ interval=100,
38
+ LTD = 0 # LONG TERM DEPRESSION
36
39
  ) -> str:
37
40
 
38
41
  infoPLAN = """
39
42
  Creates and configures a PLAN model.
40
43
 
41
44
  Args:
42
- x_train (list[num]): List of input data.
43
- y_train (list[num]): List of target labels. (one hot encoded)
44
- val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
45
- val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 10 (%10) (optional)
46
- activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
47
- x_val (list[num]): List of validation data. (optional) Default: %10 of x_train (auto_balanced) it means every %1 of train progress starts validation
48
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: %10 of y_train (auto_balanced) it means every %1 of train progress starts validation
49
- show_training (bool, str): True, None or'final'
50
- show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional) [DISABLED]
51
- visible_layer: For future [DISABLED]
45
+ x_train (list[num]): List or numarray of input data.
46
+ y_train (list[num]): List or numarray of target labels. (one hot encoded)
47
+ val (None or True): validation in training process ? None or True default: None (optional)
48
+ val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
49
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None] (optional)
50
+ x_val (list[num]): List of validation data. (optional) Default: %10 of x_train (auto_balanced) it means every %1 of train progress starts validation default: x_train (optional)
51
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: %10 of y_train (auto_balanced) it means every %1 of train progress starts validation default: y_train (optional)
52
+ show_training (bool, str): True or None default: None (optional)
53
+ visible_layer: For the future [DISABLED]
54
+ LTD (int): Long Term Depression Hyperparameter for train PLAN neural network (optional)
55
+ interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
52
56
 
53
57
  Returns:
54
- list([num]): (Weight matrices list, train_predictions list, Train_acc).
58
+ list([num]): (Weight matrix).
55
59
  error handled ?: Process status ('e')
56
60
  """
57
61
 
62
+ fit.__doc__ = infoPLAN
63
+
58
64
  visible_layer = None
59
65
 
60
66
  if len(x_train) != len(y_train):
@@ -62,43 +68,46 @@ def fit(
62
68
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
63
69
  return 'e'
64
70
 
65
- if val == True or val == 'final':
71
+ if val == True:
66
72
 
67
73
  try:
68
74
 
69
75
  if x_val == None and y_val == None:
70
76
 
71
- x_train, x_val, y_train, y_val = split(x_train, y_train, test_size=0.1, random_state=42)
77
+ x_val = x_train
78
+ y_val = y_train
79
+
80
+ except:
72
81
 
73
- x_train, y_train = auto_balancer(x_train, y_train)
74
- x_val, y_val = auto_balancer(x_val, y_val)
82
+ pass
75
83
 
76
- except:
77
- pass
84
+ if val_count == None:
78
85
 
79
- if val == True:
86
+ val_count = 10
87
+
88
+ val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
89
+ v_iter = 0
90
+ val_list = [] * val_count
80
91
 
81
- if val_count == None:
92
+ if show_training == True:
82
93
 
83
- val_count = 10
94
+ G = nx.Graph()
84
95
 
85
- v_iter = 0
96
+ fig, ax = plt.subplots(2, 2)
97
+ fig.suptitle('Train Report')
86
98
 
87
- if val == 'final':
99
+ artist1 = []
100
+ artist2 = []
101
+ artist3 = []
102
+ artist4 = []
88
103
 
89
- val_count = 100
90
-
91
- val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
92
- val_list = [] * val_count
104
+ if val != True:
93
105
 
94
- if show_count == None:
95
-
96
- show_count = 10
106
+ print(Fore.RED + "ERROR115: For showing training, val parameter must be True. from: fit",
107
+ infoPLAN + Style.RESET_ALL)
108
+ return 'e'
97
109
 
98
- if show_training == True or show_training == 'final':
99
110
 
100
- row, col = shape_control(x_train)
101
-
102
111
  class_count = set()
103
112
 
104
113
  for sublist in y_train:
@@ -121,10 +130,12 @@ def fit(
121
130
  x_train_size = len(x_train[0])
122
131
 
123
132
  if visible_layer == None:
133
+
124
134
  STPW = [None]
125
135
  STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
126
136
 
127
137
  else:
138
+
128
139
  if visible_layer == 1:
129
140
  fex_count = visible_layer
130
141
  else:
@@ -133,6 +144,7 @@ def fit(
133
144
  fex_neurons = [None] * fex_count
134
145
 
135
146
  for i in range(fex_count):
147
+
136
148
  fex_neurons[i] = [x_train_size]
137
149
 
138
150
  cat_neurons = [len(class_count), x_train_size]
@@ -165,7 +177,7 @@ def fit(
165
177
  for Lindex, Layer in enumerate(STPW):
166
178
 
167
179
 
168
- STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation, index=Lindex, max_w=max_w)
180
+ STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation, index=Lindex, max_w=max_w, LTD=LTD)
169
181
 
170
182
 
171
183
  for i in range(len(STPW)):
@@ -173,27 +185,68 @@ def fit(
173
185
 
174
186
  for i, w in enumerate(STPW):
175
187
  LTPW[i] = LTPW[i] + w
176
-
177
188
 
178
189
  if val == True:
179
190
 
180
- try:
181
-
182
- if round(progress) % val_count == 1:
183
-
191
+ if int(progress) % val_count == 1:
184
192
 
185
193
  validation_model = evaluate(x_val, y_val, LTPW ,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
186
-
187
194
  val_acc = validation_model[get_acc()]
188
-
189
- plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW)
190
195
 
191
- plt.pause(0.0001)
192
-
193
- plt.clf()
194
-
195
196
  val_list.append(val_acc)
196
197
 
198
+ if show_training == True:
199
+
200
+
201
+ mat = LTPW[0]
202
+ art2 = ax[0, 0].imshow(mat, interpolation='sinc', cmap='viridis')
203
+ suptitle_info = 'Weight Learning Progress'
204
+
205
+ ax[0, 0].set_title(suptitle_info)
206
+
207
+ artist2.append([art2])
208
+
209
+ artist1 = plot_decision_boundary(ax, x_val, y_val, activation_potentiation, LTPW, artist=artist1)
210
+
211
+ period = list(range(1, len(val_list) + 1))
212
+
213
+ art3 = ax[1, 1].plot(
214
+ period,
215
+ val_list,
216
+ linestyle='--',
217
+ color='g',
218
+ marker='o',
219
+ markersize=6,
220
+ linewidth=2,
221
+ label='Validation Accuracy'
222
+ )
223
+
224
+ ax[1, 1].set_title('Validation History')
225
+ ax[1, 1].set_xlabel('Time')
226
+ ax[1, 1].set_ylabel('Validation Accuracy')
227
+ ax[1, 1].set_ylim([0, 1])
228
+
229
+ artist3.append(art3)
230
+
231
+ for i in range(LTPW[0].shape[0]):
232
+ for j in range(LTPW[0].shape[1]):
233
+ if LTPW[0][i, j] != 0:
234
+ G.add_edge(f'Motor Neuron{i}', f'Sensory Neuron{j}', ltpw=LTPW[0][i, j])
235
+
236
+ edges = G.edges(data=True)
237
+ weights = [edata['ltpw'] for _, _, edata in edges]
238
+ pos = generate_fixed_positions(G, layout_type='circular')
239
+
240
+ art4_1 = nx.draw_networkx_nodes(G, pos, ax=ax[0, 1], node_size=1000, node_color='lightblue')
241
+ art4_2 = nx.draw_networkx_edges(G, pos, ax=ax[0, 1], edge_color=weights, edge_cmap=plt.cm.Blues)
242
+ art4_3 = nx.draw_networkx_labels(G, pos, ax=ax[0, 1], font_size=10, font_weight='bold')
243
+ ax[0, 1].set_title('Neural Web')
244
+
245
+ art4_list = [art4_1] + [art4_2] + list(art4_3.values())
246
+
247
+ artist4.append(art4_list)
248
+
249
+
197
250
  if v_iter == 0:
198
251
 
199
252
  val_bar.update(val_acc)
@@ -204,133 +257,92 @@ def fit(
204
257
  val_bar.update(val_acc)
205
258
 
206
259
  v_iter += 1
207
- except:
208
- pass
209
-
210
- if show_training == True:
211
- if index == 0:
212
- if row != 0:
213
-
214
- ax = plt.subplots(1, len(class_count), figsize=(18, 14))
215
-
216
- if round(progress) % 2 == 1:
217
-
218
- for j in range(len(class_count)):
219
-
220
-
221
- if row != 0:
222
-
223
- mat = LTPW[0][j,:].reshape(row, col)
224
- suptitle_info = 'Neurons Learning Progress: % '
225
- title_info = f'{j+1}. Neuron'
226
-
227
- mat = LTPW[0][j,:].reshape(row, col)
228
-
229
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
230
-
231
- ax[j].set_aspect('equal')
232
-
233
- ax[j].set_xticks([])
234
- ax[j].set_yticks([])
235
- ax[j].set_title(title_info)
236
-
237
- else:
238
-
239
-
240
- mat = LTPW[0]
241
- plt.imshow(mat, interpolation='sinc', cmap='viridis')
242
- suptitle_info = 'Weight Learning Progress: % '
243
- title_info = 'Weight Matrix Of Fex Layer'
244
-
245
- progress_status = f"{progress:.1f}"
246
- plt.title(suptitle_info + progress_status)
247
- plt.draw()
248
- plt.pause(0.0001)
249
- plt.clf()
250
-
260
+
251
261
  if visible_layer == None:
252
262
  STPW = [None]
253
263
  STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
254
264
 
255
265
  else:
256
266
  STPW = weight_identification(
257
- len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size)
267
+ len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size)
258
268
 
259
269
  train_progress.update(1)
260
270
 
261
- if show_training == 'final':
262
-
263
- fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
271
+ if show_training == True:
264
272
 
265
- for j in range(len(class_count)):
273
+ mat = LTPW[0]
266
274
 
267
- mat = LTPW[0][j,:].reshape(row, col)
275
+ for i in range(30):
268
276
 
269
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
270
- ax[j].set_aspect('equal')
271
-
272
- ax[j].set_xticks([])
273
- ax[j].set_yticks([])
274
- ax[j].set_title(f'{j+1}. Neuron')
277
+ art2 = ax[0, 0].imshow(mat, interpolation='sinc', cmap='viridis')
278
+ suptitle_info = 'Weight Learning Progress:'
275
279
 
276
- progress_status = f"{progress:.1f}"
277
- fig.suptitle('Neurons Learning Progress: % ' + progress_status)
278
- plt.draw()
279
- plt.pause(0.0001)
280
+ ax[0, 0].set_title(suptitle_info)
280
281
 
281
- if val == 'final':
282
-
283
- validation_model = evaluate(x_val, y_val, LTPW,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
282
+ artist2.append([art2])
283
+
284
+ art3 = ax[1, 1].plot(
285
+ period,
286
+ val_list,
287
+ linestyle='--',
288
+ color='g',
289
+ marker='o',
290
+ markersize=6,
291
+ linewidth=2,
292
+ label='Validation Accuracy'
293
+ )
284
294
 
285
- val_acc = validation_model[get_acc()]
286
-
287
- plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW)
295
+ ax[1, 1].set_title('Validation History')
296
+ ax[1, 1].set_xlabel('Time')
297
+ ax[1, 1].set_ylabel('Validation Accuracy')
298
+ ax[1, 1].set_ylim([0, 1])
288
299
 
289
- plt.show()
300
+ artist3.append(art3)
290
301
 
291
- val_list.append(val_acc)
292
-
293
- val_bar.update(val_acc)
294
-
295
- for i in range(len(LTPW)):
296
- LTPW[i] = normalization(LTPW[i])
302
+ for i in range(28):
297
303
 
298
- return LTPW
304
+ art4_1 = nx.draw_networkx_nodes(G, pos, ax=ax[0, 1], node_size=1000, node_color='lightblue')
305
+ art4_2 = nx.draw_networkx_edges(G, pos, ax=ax[0, 1], edge_color=weights, edge_cmap=plt.cm.Blues)
306
+ art4_3 = nx.draw_networkx_labels(G, pos, ax=ax[0, 1], font_size=10, font_weight='bold')
307
+ ax[0, 1].set_title('Neural Web')
308
+
309
+ art4_list = [art4_1] + [art4_2] + list(art4_3.values())
299
310
 
300
- # FUNCTIONS -----
311
+ artist4.append(art4_list)
301
312
 
302
- def shape_control(x_train):
303
313
 
304
- try:
305
- row = x_train[1].shape[0]
306
- col = x_train[1].shape[1]
307
-
308
- except:
314
+ artist1 = plot_decision_boundary(ax, x_val, y_val, activation_potentiation, LTPW, artist=artist1, draw_is_finished=True)
309
315
 
310
- print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
311
-
312
- try:
313
- row, col = find_numbers(len(x_train[0]))
314
-
315
- except:
316
-
317
- print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
318
- return [0, 0]
319
-
320
- return row, col
316
+ ani1 = ArtistAnimation(fig, artist1, interval=interval, blit=True)
317
+ ani2 = ArtistAnimation(fig, artist2, interval=interval, blit=True)
318
+ ani3 = ArtistAnimation(fig, artist3, interval=interval, blit=True)
319
+ ani4 = ArtistAnimation(fig, artist4, interval=interval, blit=True)
321
320
 
322
- def find_numbers(n):
323
- if n <= 1:
324
- raise ValueError("Parameter 'n' must be greater than 1.")
321
+ plt.show()
325
322
 
326
- for i in range(2, int(n**0.5) + 1):
327
- if n % i == 0:
328
- factor1 = i
329
- factor2 = n // i
330
- if factor1 == factor2:
331
- return factor1, factor2
323
+ LTPW = normalization(LTPW)
332
324
 
333
- return None
325
+ return LTPW
326
+
327
+ # FUNCTIONS -----
328
+
329
+ def generate_fixed_positions(G, layout_type='circular'):
330
+ pos = {}
331
+ num_nodes = len(G.nodes())
332
+
333
+ if layout_type == 'circular':
334
+ angles = np.linspace(0, 2 * np.pi, num_nodes, endpoint=False)
335
+ radius = 10
336
+ for i, node in enumerate(G.nodes()):
337
+ pos[node] = (radius * np.cos(angles[i]), radius * np.sin(angles[i]))
338
+ elif layout_type == 'grid':
339
+ grid_size = int(np.ceil(np.sqrt(num_nodes)))
340
+ for i, node in enumerate(G.nodes()):
341
+ pos[node] = (i % grid_size, i // grid_size)
342
+ else:
343
+ raise ValueError("Unsupported layout_type. Use 'circular' or 'grid'.")
344
+
345
+ return pos
334
346
 
335
347
  def weight_normalization(
336
348
  W,
@@ -384,6 +396,20 @@ def weight_identification(
384
396
 
385
397
  # ACTIVATION FUNCTIONS -----
386
398
 
399
+ def spiral_activation(x):
400
+
401
+ r = np.sqrt(np.sum(x**2))
402
+
403
+ theta = np.arctan2(x[1:], x[:-1])
404
+
405
+ spiral_x = r * np.cos(theta + r)
406
+ spiral_y = r * np.sin(theta + r)
407
+
408
+
409
+ spiral_output = np.concatenate(([spiral_x[0]], spiral_y))
410
+
411
+ return spiral_output
412
+
387
413
  def Softmax(
388
414
  x # num: Input data to be transformed using softmax function.
389
415
  ):
@@ -567,7 +593,8 @@ def fex(
567
593
  Class, # int: Which class is, if training.
568
594
  activation_potentiation, # (list): Activation potentiation list for deep PLAN. (optional)
569
595
  index,
570
- max_w
596
+ max_w,
597
+ LTD=0
571
598
  ) -> tuple:
572
599
  """
573
600
  Applies feature extraction process to the input data using synaptic potentiation.
@@ -718,12 +745,21 @@ def fex(
718
745
  if activation == 'sine_offset':
719
746
  Output += sine_offset(Input, 1.0)
720
747
 
748
+ if activation == 'spiral':
749
+ Output += spiral_activation(Input)
750
+
721
751
 
722
752
  Input = Output
723
753
 
724
754
 
725
755
  if is_training == True:
726
756
 
757
+ for i in range(LTD):
758
+
759
+ depression_vector = np.random.rand(*Input.shape)
760
+
761
+ Input -= depression_vector
762
+
727
763
  w[Class, :] = Input
728
764
 
729
765
  return w
@@ -793,14 +829,15 @@ def evaluate(
793
829
  x_test (list[num]): Test input data.
794
830
  y_test (list[num]): Test labels.
795
831
  W (list[num]): Weight matrix list of the neural network.
796
- activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
832
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None]
797
833
  bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
798
834
  show_metrices (bool): (True or None) (optional) Default: None
799
835
 
800
836
  Returns:
801
837
  tuple: A tuple containing the predicted labels and the accuracy of the model.
802
838
  """
803
-
839
+ evaluate.__doc__ = infoTestModel
840
+
804
841
  predict_probabilitys = []
805
842
  real_classes = []
806
843
  predict_classes = []
@@ -1011,7 +1048,6 @@ def multiple_evaluate(
1011
1048
 
1012
1049
  def save_model(model_name,
1013
1050
  model_type,
1014
- class_count,
1015
1051
  test_acc,
1016
1052
  weights_type,
1017
1053
  weights_format,
@@ -1027,21 +1063,21 @@ def save_model(model_name,
1027
1063
  Arguments:
1028
1064
  model_name (str): Name of the model.
1029
1065
  model_type (str): Type of the model.(options: PLAN)
1030
- class_count (int): Number of classes.
1031
1066
  test_acc (float): Test accuracy of the model.
1032
1067
  weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
1033
1068
  WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
1034
1069
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
1035
- scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
1070
+ scaler_params (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
1036
1071
  W: Weights of the model.
1037
- activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
1072
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None]
1038
1073
 
1039
1074
  Returns:
1040
1075
  str: Message indicating if the model was saved successfully or encountered an error.
1041
1076
  """
1042
1077
 
1043
- # Operations to be performed by the function will be written here
1044
- pass
1078
+ save_model.__doc__ = infosave_model
1079
+
1080
+ class_count = W[0].shape[0]
1045
1081
 
1046
1082
  layers = ['fex']
1047
1083
 
@@ -1061,7 +1097,7 @@ def save_model(model_name,
1061
1097
 
1062
1098
  try:
1063
1099
  for w in W:
1064
- NeuronCount += np.shape(w)[0]
1100
+ NeuronCount += np.shape(w)[0] + np.shape(w)[1]
1065
1101
  SynapseCount += np.shape(w)[0] * np.shape(w)[1]
1066
1102
  except:
1067
1103
 
@@ -1188,7 +1224,8 @@ def load_model(model_name,
1188
1224
  Returns:
1189
1225
  lists: W(list[num]), activation_potentiation, DataFrame of the model
1190
1226
  """
1191
- pass
1227
+
1228
+ load_model.__doc__ = infoload_model
1192
1229
 
1193
1230
  try:
1194
1231
 
@@ -1232,6 +1269,9 @@ def predict_model_ssd(Input, model_name, model_path):
1232
1269
  Returns:
1233
1270
  ndarray: Output from the model.
1234
1271
  """
1272
+
1273
+ predict_model_ram.__doc__ = infopredict_model_ssd
1274
+
1235
1275
  W, df = load_model(model_name, model_path)
1236
1276
 
1237
1277
  activation_potentiation = list(df['ACTIVATION POTENTIATION'])
@@ -1288,6 +1328,9 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[Non
1288
1328
  Returns:
1289
1329
  ndarray: Output from the model.
1290
1330
  """
1331
+
1332
+ predict_model_ram.__doc__ = infopredict_model_ram
1333
+
1291
1334
  try:
1292
1335
  if scaler_params != None:
1293
1336
 
@@ -1333,6 +1376,9 @@ def auto_balancer(x_train, y_train):
1333
1376
  Returns:
1334
1377
  tuple: A tuple containing balanced input data and labels.
1335
1378
  """
1379
+
1380
+ auto_balancer.__doc__ = infoauto_balancer
1381
+
1336
1382
  classes = np.arange(y_train.shape[1])
1337
1383
  class_count = len(classes)
1338
1384
 
@@ -1342,7 +1388,7 @@ def auto_balancer(x_train, y_train):
1342
1388
  classes = [len(ClassIndices[i]) for i in range(class_count)]
1343
1389
 
1344
1390
  if len(set(classes)) == 1:
1345
- print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
1391
+ print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
1346
1392
  return x_train, y_train
1347
1393
 
1348
1394
  MinCount = min(classes)
@@ -1359,7 +1405,7 @@ def auto_balancer(x_train, y_train):
1359
1405
  BalancedInputs = [x_train[idx] for idx in BalancedIndices]
1360
1406
  BalancedLabels = [y_train[idx] for idx in BalancedIndices]
1361
1407
 
1362
- print(Fore.GREEN + "All Data Succesfully Balanced from: " + str(len(x_train)
1408
+ print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
1363
1409
  ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
1364
1410
  except:
1365
1411
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
@@ -1418,7 +1464,7 @@ def synthetic_augmentation(x_train, y_train):
1418
1464
  return np.array(x_balanced), np.array(y_balanced)
1419
1465
 
1420
1466
 
1421
- def standard_scaler(x_train, x_test=None, scaler_params=None):
1467
+ def standard_scaler(x_train=None, x_test=None, scaler_params=None):
1422
1468
  info_standard_scaler = """
1423
1469
  Standardizes training and test datasets. x_test may be None.
1424
1470
 
@@ -1434,6 +1480,9 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1434
1480
  tuple
1435
1481
  Standardized training and test datasets
1436
1482
  """
1483
+
1484
+ standard_scaler.__doc__ = info_standard_scaler
1485
+
1437
1486
  try:
1438
1487
 
1439
1488
  x_train = x_train.tolist()
@@ -1494,7 +1543,7 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1494
1543
  return('e')
1495
1544
 
1496
1545
  def encode_one_hot(y_train, y_test):
1497
- info_one_hot_encode = """
1546
+ """
1498
1547
  Performs one-hot encoding on y_train and y_test data..
1499
1548
 
1500
1549
  Args:
@@ -1868,11 +1917,11 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
1868
1917
 
1869
1918
  plt.show()
1870
1919
 
1871
- def plot_decision_boundary(x, y, activation_potentiation, W):
1872
1920
 
1873
- feature_indices=[0, 1]
1921
+ def plot_decision_boundary(ax, x, y, activation_potentiation, W, artist, draw_is_finished=False):
1922
+ feature_indices = [0, 1]
1874
1923
 
1875
- h = .02 # mesh grid adımı
1924
+ h = .02
1876
1925
  x_min, x_max = x[:, feature_indices[0]].min() - 1, x[:, feature_indices[0]].max() + 1
1877
1926
  y_min, y_max = x[:, feature_indices[1]].min() - 1, x[:, feature_indices[1]].max() + 1
1878
1927
  xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
@@ -1885,26 +1934,39 @@ def plot_decision_boundary(x, y, activation_potentiation, W):
1885
1934
  Z = [None] * len(grid_full)
1886
1935
 
1887
1936
  for i in range(len(grid_full)):
1888
-
1889
1937
  Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
1890
1938
 
1891
1939
  Z = np.array(Z)
1892
1940
  Z = Z.reshape(xx.shape)
1941
+
1942
+ if draw_is_finished == False:
1893
1943
 
1894
- plt.contourf(xx, yy, Z, alpha=0.8)
1895
- plt.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
1896
- plt.xlabel(f'Feature {0 + 1}')
1897
- plt.ylabel(f'Feature {1 + 1}')
1898
- plt.title('Decision Boundary')
1899
- plt.draw()
1944
+ art1_1 = ax[1, 0].contourf(xx, yy, Z, alpha=0.8)
1945
+ art1_2 = ax[1, 0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
1946
+ ax[1, 0].set_xlabel(f'Feature {0 + 1}')
1947
+ ax[1, 0].set_ylabel(f'Feature {1 + 1}')
1948
+ ax[1, 0].set_title('Decision Boundary')
1949
+ artist.append([*art1_1.collections, art1_2])
1950
+
1951
+ else:
1952
+
1953
+ for i in range(30):
1954
+
1955
+ art1_1 = ax[1, 0].contourf(xx, yy, Z, alpha=0.8)
1956
+ art1_2 = ax[1, 0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
1957
+ ax[1, 0].set_xlabel(f'Feature {0 + 1}')
1958
+ ax[1, 0].set_ylabel(f'Feature {1 + 1}')
1959
+ ax[1, 0].set_title('Decision Boundary')
1960
+ artist.append([*art1_1.collections, art1_2])
1961
+
1962
+ return artist
1900
1963
 
1901
1964
  def pca(X, n_components):
1902
1965
  """
1903
- PCA algoritmasını uygulayan fonksiyon.
1904
1966
 
1905
1967
  Parameters:
1906
- X (numpy array): Giriş verisi (n_samples, n_features)
1907
- n_components (int): Saklanacak ana bileşen sayısı
1968
+ X (numpy array): (n_samples, n_features)
1969
+ n_components (int):
1908
1970
 
1909
1971
  Returns:
1910
1972
  X_reduced (numpy array): (n_samples, n_components)
@@ -1918,7 +1980,6 @@ def pca(X, n_components):
1918
1980
 
1919
1981
  sorted_index = np.argsort(eigenvalues)[::-1]
1920
1982
  sorted_eigenvectors = eigenvectors[:, sorted_index]
1921
- sorted_eigenvalues = eigenvalues[sorted_index]
1922
1983
 
1923
1984
  eigenvectors_subset = sorted_eigenvectors[:, :n_components]
1924
1985
 
@@ -1946,7 +2007,7 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
1946
2007
  norm = plt.Normalize(vmin=0, vmax=num_classes - 1)
1947
2008
 
1948
2009
 
1949
- scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, edgecolor='k', s=50, cmap=cmap, norm=norm)
2010
+ plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, edgecolor='k', s=50, cmap=cmap, norm=norm)
1950
2011
 
1951
2012
 
1952
2013
  for cls in range(num_classes):
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 3.2.0
4
- Summary: Deep PLAN Integreted. Version 3 document coming
3
+ Version: 3.3.1
4
+ Summary: fit function changes: LTD parameter included for Deep PLAN and professional visualizing for training. in the fit funciton, show_training=True
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
+ plan/plan.py,sha256=39AW4YIsz6htzmlVGLLUab1wpSQw6u2PeE9I488tSx4,67925
3
+ pyerualjetwork-3.3.1.dist-info/METADATA,sha256=0QhZbV-1i7-9ynrRwLTEP6MaWzBzD9yc2uIZeMFkj5I,368
4
+ pyerualjetwork-3.3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-3.3.1.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-3.3.1.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
- plan/plan.py,sha256=ASQIHk-atzX48iH7RfyqSRo4rO2YyDKqLudo3l_9d78,65066
3
- pyerualjetwork-3.2.0.dist-info/METADATA,sha256=HhmOci9_X7tjMcPKFw4ZMKOj3-BM70RQNNg569CJBtU,274
4
- pyerualjetwork-3.2.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
- pyerualjetwork-3.2.0.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-3.2.0.dist-info/RECORD,,