pyerualjetwork 3.1__py3-none-any.whl → 3.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -18,43 +18,49 @@ from scipy.spatial import ConvexHull
18
18
  from datetime import datetime
19
19
  from scipy import io
20
20
  import scipy.io as sio
21
+ from matplotlib.animation import ArtistAnimation
22
+ import networkx as nx
21
23
 
22
24
  # BUILD -----
23
25
 
24
26
 
25
27
  def fit(
26
28
  x_train: List[Union[int, float]],
27
- y_train: List[Union[int, float]], # At least two.. and one hot encoded
29
+ y_train: List[Union[int, float]], # One hot encoded
28
30
  val= None,
29
31
  val_count = None,
30
- activation_potentiation=[None], # (float): Input activation_potentiation (optional)
32
+ activation_potentiation=[None], # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
31
33
  x_val= None,
32
34
  y_val= None,
33
35
  show_training = None,
34
- show_count= None, # [DISABLED]
35
- visible_layer=None # For future [DISABLED]
36
+ visible_layer=None, # For the future [DISABLED]
37
+ interval=100,
38
+ LTD = 0 # LONG TERM DEPRESSION
36
39
  ) -> str:
37
40
 
38
41
  infoPLAN = """
39
42
  Creates and configures a PLAN model.
40
43
 
41
44
  Args:
42
- x_train (list[num]): List of input data.
43
- y_train (list[num]): List of target labels. (one hot encoded)
44
- val (None, True or 'final'): validation in training process ? None, True or 'final' Default: None (optional)
45
- val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
46
- activation_potentiation (float): Input activation potentiation (for binary injection) (optional) in range: -1, 1
47
- x_val (list[num]): List of validation data. (optional) Default: %10 of x_train (auto_balanced) it means every %1 of train progress starts validation
48
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: %10 of y_train (auto_balanced) it means every %1 of train progress starts validation
49
- show_training (bool, str): True, None or'final'
50
- show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional) [DISABLED]
51
- visible_layer: For future [DISABLED]
45
+ x_train (list[num]): List or numarray of input data.
46
+ y_train (list[num]): List or numarray of target labels. (one hot encoded)
47
+ val (None or True): validation in training process ? None or True default: None (optional)
48
+ val_count (None or int): After how many examples learned will an accuracy test be performed? default: 10=(%10) it means every approximately 10 step (optional)
49
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None] (optional)
50
+ x_val (list[num]): List of validation data. (optional) Default: %10 of x_train (auto_balanced) it means every %1 of train progress starts validation default: x_train (optional)
51
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: %10 of y_train (auto_balanced) it means every %1 of train progress starts validation default: y_train (optional)
52
+ show_training (bool, str): True or None default: None (optional)
53
+ visible_layer: For the future [DISABLED]
54
+ LTD (int): Long Term Depression Hyperparameter for train PLAN neural network (optional)
55
+ interval (float, int): frame delay (milisecond) parameter for Training Report (show_training=True) This parameter effects to your Training Report performance. Lower value is more diffucult for Low end PC's (33.33 = 30 FPS, 16.67 = 60 FPS) default: 100 (optional)
52
56
 
53
57
  Returns:
54
- list([num]): (Weight matrices list, train_predictions list, Train_acc).
58
+ list([num]): (Weight matrix).
55
59
  error handled ?: Process status ('e')
56
60
  """
57
61
 
62
+ fit.__doc__ = infoPLAN
63
+
58
64
  visible_layer = None
59
65
 
60
66
  if len(x_train) != len(y_train):
@@ -62,45 +68,46 @@ def fit(
62
68
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN + Style.RESET_ALL)
63
69
  return 'e'
64
70
 
65
- if val == True or val == 'final':
71
+ if val == True:
66
72
 
67
73
  try:
68
74
 
69
75
  if x_val == None and y_val == None:
70
76
 
71
- x_train, x_val, y_train, y_val = split(x_train, y_train, test_size=0.1, random_state=42)
77
+ x_val = x_train
78
+ y_val = y_train
79
+
80
+ except:
72
81
 
73
- x_train, y_train = auto_balancer(x_train, y_train)
74
- x_val, y_val = auto_balancer(x_val, y_val)
82
+ pass
75
83
 
76
- except:
77
- pass
84
+ if val_count == None:
78
85
 
79
- if val == True:
86
+ val_count = 10
87
+
88
+ val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
89
+ v_iter = 0
90
+ val_list = [] * val_count
80
91
 
81
- if val_count == None:
92
+ if show_training == True:
82
93
 
83
- val_count = 0.01
94
+ G = nx.Graph()
84
95
 
85
- v_iter = 0
96
+ fig, ax = plt.subplots(2, 2)
97
+ fig.suptitle('Train Report')
86
98
 
87
- if val == 'final':
99
+ artist1 = []
100
+ artist2 = []
101
+ artist3 = []
102
+ artist4 = []
88
103
 
89
- val_count = 0.99
90
-
91
- val_count = int(len(x_train) * val_count)
92
- val_count_copy = val_count
93
- val_bar = tqdm(total=1, desc="Validating Accuracy", ncols=120)
94
- val_list = [] * val_count
104
+ if val != True:
95
105
 
96
- if show_count == None:
97
-
98
- show_count = 10
106
+ print(Fore.RED + "ERROR115: For showing training, val parameter must be True. from: fit",
107
+ infoPLAN + Style.RESET_ALL)
108
+ return 'e'
99
109
 
100
- if show_training == True or show_training == 'final':
101
110
 
102
- row, col = shape_control(x_train)
103
-
104
111
  class_count = set()
105
112
 
106
113
  for sublist in y_train:
@@ -123,10 +130,12 @@ def fit(
123
130
  x_train_size = len(x_train[0])
124
131
 
125
132
  if visible_layer == None:
133
+
126
134
  STPW = [None]
127
135
  STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
128
136
 
129
137
  else:
138
+
130
139
  if visible_layer == 1:
131
140
  fex_count = visible_layer
132
141
  else:
@@ -135,6 +144,7 @@ def fit(
135
144
  fex_neurons = [None] * fex_count
136
145
 
137
146
  for i in range(fex_count):
147
+
138
148
  fex_neurons[i] = [x_train_size]
139
149
 
140
150
  cat_neurons = [len(class_count), x_train_size]
@@ -167,7 +177,7 @@ def fit(
167
177
  for Lindex, Layer in enumerate(STPW):
168
178
 
169
179
 
170
- STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation, index=Lindex, max_w=max_w)
180
+ STPW[Lindex] = fex(neural_layer, STPW[Lindex], True, y[index], activation_potentiation, index=Lindex, max_w=max_w, LTD=LTD)
171
181
 
172
182
 
173
183
  for i in range(len(STPW)):
@@ -175,27 +185,68 @@ def fit(
175
185
 
176
186
  for i, w in enumerate(STPW):
177
187
  LTPW[i] = LTPW[i] + w
178
-
179
188
 
180
189
  if val == True:
181
190
 
182
- try:
183
-
184
- if round(progress) % 10 == 1:
185
-
191
+ if int(progress) % val_count == 1:
186
192
 
187
193
  validation_model = evaluate(x_val, y_val, LTPW ,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
188
-
189
194
  val_acc = validation_model[get_acc()]
190
-
191
- plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW)
192
195
 
193
- plt.pause(0.0001)
194
-
195
- plt.clf()
196
-
197
196
  val_list.append(val_acc)
198
197
 
198
+ if show_training == True:
199
+
200
+
201
+ mat = LTPW[0]
202
+ art2 = ax[0, 0].imshow(mat, interpolation='sinc', cmap='viridis')
203
+ suptitle_info = 'Weight Learning Progress'
204
+
205
+ ax[0, 0].set_title(suptitle_info)
206
+
207
+ artist2.append([art2])
208
+
209
+ artist1 = plot_decision_boundary(ax, x_val, y_val, activation_potentiation, LTPW, artist=artist1)
210
+
211
+ period = list(range(1, len(val_list) + 1))
212
+
213
+ art3 = ax[1, 1].plot(
214
+ period,
215
+ val_list,
216
+ linestyle='--',
217
+ color='g',
218
+ marker='o',
219
+ markersize=6,
220
+ linewidth=2,
221
+ label='Validation Accuracy'
222
+ )
223
+
224
+ ax[1, 1].set_title('Validation History')
225
+ ax[1, 1].set_xlabel('Time')
226
+ ax[1, 1].set_ylabel('Validation Accuracy')
227
+ ax[1, 1].set_ylim([0, 1])
228
+
229
+ artist3.append(art3)
230
+
231
+ for i in range(LTPW[0].shape[0]):
232
+ for j in range(LTPW[0].shape[1]):
233
+ if LTPW[0][i, j] != 0:
234
+ G.add_edge(f'Motor Neuron{i}', f'Sensory Neuron{j}', ltpw=LTPW[0][i, j])
235
+
236
+ edges = G.edges(data=True)
237
+ weights = [edata['ltpw'] for _, _, edata in edges]
238
+ pos = generate_fixed_positions(G, layout_type='circular')
239
+
240
+ art4_1 = nx.draw_networkx_nodes(G, pos, ax=ax[0, 1], node_size=1000, node_color='lightblue')
241
+ art4_2 = nx.draw_networkx_edges(G, pos, ax=ax[0, 1], edge_color=weights, edge_cmap=plt.cm.Blues)
242
+ art4_3 = nx.draw_networkx_labels(G, pos, ax=ax[0, 1], font_size=10, font_weight='bold')
243
+ ax[0, 1].set_title('Neural Web')
244
+
245
+ art4_list = [art4_1] + [art4_2] + list(art4_3.values())
246
+
247
+ artist4.append(art4_list)
248
+
249
+
199
250
  if v_iter == 0:
200
251
 
201
252
  val_bar.update(val_acc)
@@ -206,131 +257,92 @@ def fit(
206
257
  val_bar.update(val_acc)
207
258
 
208
259
  v_iter += 1
209
- except:
210
- pass
211
-
212
- if show_training == True:
213
- if index == 0:
214
- if row != 0:
215
-
216
- ax = plt.subplots(1, len(class_count), figsize=(18, 14))
217
-
218
- if round(progress) % 2 == 1:
219
-
220
- for j in range(len(class_count)):
221
-
222
-
223
- if row != 0:
224
-
225
- mat = LTPW[0][j,:].reshape(row, col)
226
- suptitle_info = 'Neurons Learning Progress: % '
227
- title_info = f'{j+1}. Neuron'
228
-
229
- mat = LTPW[0][j,:].reshape(row, col)
230
-
231
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
232
-
233
- ax[j].set_aspect('equal')
234
-
235
- ax[j].set_xticks([])
236
- ax[j].set_yticks([])
237
- ax[j].set_title(title_info)
238
-
239
- else:
240
-
241
-
242
- mat = LTPW[0]
243
- plt.imshow(mat, interpolation='sinc', cmap='viridis')
244
- suptitle_info = 'Weight Learning Progress: % '
245
- title_info = 'Weight Matrix Of Fex Layer'
246
-
247
- progress_status = f"{progress:.1f}"
248
- plt.title(suptitle_info + progress_status)
249
- plt.draw()
250
- plt.pause(0.0001)
251
- plt.clf()
252
-
260
+
253
261
  if visible_layer == None:
254
262
  STPW = [None]
255
263
  STPW[0] = np.ones((len(class_count), x_train_size)) # STPW = SHORT TIME POTENTIATION WEIGHT
256
264
 
257
265
  else:
258
266
  STPW = weight_identification(
259
- len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size)
267
+ len(layers), len(class_count), fex_neurons, cat_neurons, x_train_size)
260
268
 
261
269
  train_progress.update(1)
262
270
 
263
- if show_training == 'final':
264
-
265
- fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
271
+ if show_training == True:
266
272
 
267
- for j in range(len(class_count)):
273
+ mat = LTPW[0]
268
274
 
269
- mat = LTPW[0][j,:].reshape(row, col)
275
+ for i in range(30):
270
276
 
271
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
272
- ax[j].set_aspect('equal')
273
-
274
- ax[j].set_xticks([])
275
- ax[j].set_yticks([])
276
- ax[j].set_title(f'{j+1}. Neuron')
277
+ art2 = ax[0, 0].imshow(mat, interpolation='sinc', cmap='viridis')
278
+ suptitle_info = 'Weight Learning Progress:'
277
279
 
278
- progress_status = f"{progress:.1f}"
279
- fig.suptitle('Neurons Learning Progress: % ' + progress_status)
280
- plt.draw()
281
- plt.pause(0.0001)
280
+ ax[0, 0].set_title(suptitle_info)
282
281
 
283
- if val == 'final':
284
-
285
- validation_model = evaluate(x_val, y_val, LTPW,bar_status=False, activation_potentiation=activation_potentiation, show_metrices=None)
282
+ artist2.append([art2])
283
+
284
+ art3 = ax[1, 1].plot(
285
+ period,
286
+ val_list,
287
+ linestyle='--',
288
+ color='g',
289
+ marker='o',
290
+ markersize=6,
291
+ linewidth=2,
292
+ label='Validation Accuracy'
293
+ )
286
294
 
287
- val_acc = validation_model[get_acc()]
288
-
289
- plot_decision_boundary(x_val, y_val, activation_potentiation, LTPW)
295
+ ax[1, 1].set_title('Validation History')
296
+ ax[1, 1].set_xlabel('Time')
297
+ ax[1, 1].set_ylabel('Validation Accuracy')
298
+ ax[1, 1].set_ylim([0, 1])
290
299
 
291
- val_list.append(val_acc)
292
-
293
- val_bar.update(val_acc)
294
-
295
- for i in range(len(LTPW)):
296
- LTPW[i] = normalization(LTPW[i])
300
+ artist3.append(art3)
297
301
 
298
- return LTPW
302
+ for i in range(28):
299
303
 
300
- # FUNCTIONS -----
304
+ art4_1 = nx.draw_networkx_nodes(G, pos, ax=ax[0, 1], node_size=1000, node_color='lightblue')
305
+ art4_2 = nx.draw_networkx_edges(G, pos, ax=ax[0, 1], edge_color=weights, edge_cmap=plt.cm.Blues)
306
+ art4_3 = nx.draw_networkx_labels(G, pos, ax=ax[0, 1], font_size=10, font_weight='bold')
307
+ ax[0, 1].set_title('Neural Web')
308
+
309
+ art4_list = [art4_1] + [art4_2] + list(art4_3.values())
301
310
 
302
- def shape_control(x_train):
311
+ artist4.append(art4_list)
303
312
 
304
- try:
305
- row = x_train[1].shape[0]
306
- col = x_train[1].shape[1]
307
-
308
- except:
309
313
 
310
- print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
311
-
312
- try:
313
- row, col = find_numbers(len(x_train[0]))
314
-
315
- except:
316
-
317
- print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
318
- return [0, 0]
319
-
320
- return row, col
314
+ artist1 = plot_decision_boundary(ax, x_val, y_val, activation_potentiation, LTPW, artist=artist1, draw_is_finished=True)
315
+
316
+ ani1 = ArtistAnimation(fig, artist1, interval=interval, blit=True)
317
+ ani2 = ArtistAnimation(fig, artist2, interval=interval, blit=True)
318
+ ani3 = ArtistAnimation(fig, artist3, interval=interval, blit=True)
319
+ ani4 = ArtistAnimation(fig, artist4, interval=interval, blit=True)
321
320
 
322
- def find_numbers(n):
323
- if n <= 1:
324
- raise ValueError("Parameter 'n' must be greater than 1.")
321
+ plt.show()
325
322
 
326
- for i in range(2, int(n**0.5) + 1):
327
- if n % i == 0:
328
- factor1 = i
329
- factor2 = n // i
330
- if factor1 == factor2:
331
- return factor1, factor2
323
+ LTPW = normalization(LTPW)
332
324
 
333
- return None
325
+ return LTPW
326
+
327
+ # FUNCTIONS -----
328
+
329
+ def generate_fixed_positions(G, layout_type='circular'):
330
+ pos = {}
331
+ num_nodes = len(G.nodes())
332
+
333
+ if layout_type == 'circular':
334
+ angles = np.linspace(0, 2 * np.pi, num_nodes, endpoint=False)
335
+ radius = 10
336
+ for i, node in enumerate(G.nodes()):
337
+ pos[node] = (radius * np.cos(angles[i]), radius * np.sin(angles[i]))
338
+ elif layout_type == 'grid':
339
+ grid_size = int(np.ceil(np.sqrt(num_nodes)))
340
+ for i, node in enumerate(G.nodes()):
341
+ pos[node] = (i % grid_size, i // grid_size)
342
+ else:
343
+ raise ValueError("Unsupported layout_type. Use 'circular' or 'grid'.")
344
+
345
+ return pos
334
346
 
335
347
  def weight_normalization(
336
348
  W,
@@ -384,37 +396,6 @@ def weight_identification(
384
396
 
385
397
  # ACTIVATION FUNCTIONS -----
386
398
 
387
- def tanh(x):
388
- return np.tanh(x)
389
-
390
- def swish(x):
391
- return x * (1 / (1 + np.exp(-x)))
392
-
393
- def circular_activation(x):
394
- return (np.sin(x) + 1) / 2
395
-
396
- def modular_circular_activation(x, period=2*np.pi):
397
- return np.mod(x, period) / period
398
-
399
- def tanh_circular_activation(x):
400
- return (np.tanh(x) + 1) / 2
401
-
402
- def leaky_relu(x, alpha=0.01):
403
- return np.where(x > 0, x, alpha * x)
404
-
405
- def softplus(x):
406
- return np.log(1 + np.exp(x))
407
-
408
- def elu(x, alpha=1.0):
409
- return np.where(x > 0, x, alpha * (np.exp(x) - 1))
410
-
411
- def gelu(x):
412
- return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
413
-
414
- def selu(x, lambda_=1.0507, alpha=1.6733):
415
- return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
416
-
417
-
418
399
  def Softmax(
419
400
  x # num: Input data to be transformed using softmax function.
420
401
  ):
@@ -461,15 +442,145 @@ def Relu(
461
442
 
462
443
  return np.maximum(0, x)
463
444
 
445
+ def tanh(x):
446
+ return np.tanh(x)
447
+
448
+ def swish(x):
449
+ return x * (1 / (1 + np.exp(-x)))
450
+
451
+ def circular_activation(x):
452
+ return (np.sin(x) + 1) / 2
453
+
454
+ def modular_circular_activation(x, period=2*np.pi):
455
+ return np.mod(x, period) / period
456
+
457
+ def tanh_circular_activation(x):
458
+ return (np.tanh(x) + 1) / 2
459
+
460
+ def leaky_relu(x, alpha=0.01):
461
+ return np.where(x > 0, x, alpha * x)
462
+
463
+ def softplus(x):
464
+ return np.log(1 + np.exp(x))
465
+
466
+ def elu(x, alpha=1.0):
467
+ return np.where(x > 0, x, alpha * (np.exp(x) - 1))
468
+
469
+ def gelu(x):
470
+ return 0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
471
+
472
+ def selu(x, lambda_=1.0507, alpha=1.6733):
473
+ return lambda_ * np.where(x > 0, x, alpha * (np.exp(x) - 1))
474
+
475
+ # 1. Sinusoids Activation (SinAkt)
476
+ def sinakt(x):
477
+ return np.sin(x) + np.cos(x)
478
+
479
+ # 2. Parametric Squared Activation (P-Squared)
480
+ def p_squared(x, alpha=1.0, beta=0.0):
481
+ return alpha * x**2 + beta * x
482
+
483
+ def sglu(x, alpha=1.0):
484
+ return softmax(alpha * x) * x
485
+
486
+ # 4. Double Leaky ReLU (DLReLU)
487
+ def dlrelu(x):
488
+ return np.maximum(0.01 * x, x) + np.minimum(0.01 * x, 0.1 * x)
489
+
490
+ # 5. Exponential Sigmoid (ExSig)
491
+ def exsig(x):
492
+ return 1 / (1 + np.exp(-x**2))
493
+
494
+ # 6. Adaptive Cosine Activation (ACos)
495
+ def acos(x, alpha=1.0, beta=0.0):
496
+ return np.cos(alpha * x + beta)
497
+
498
+ # 7. Gaussian-like Activation (GLA)
499
+ def gla(x, alpha=1.0, mu=0.0):
500
+ return np.exp(-alpha * (x - mu)**2)
501
+
502
+ # 8. Swish ReLU (SReLU)
503
+ def srelu(x):
504
+ return x * (1 / (1 + np.exp(-x))) + np.maximum(0, x)
505
+
506
+ # 9. Quadratic Exponential Linear Unit (QELU)
507
+ def qelu(x):
508
+ return x**2 * np.exp(x) - 1
509
+
510
+ # 10. Inverse Square Root Activation (ISRA)
511
+ def isra(x):
512
+ return x / np.sqrt(np.abs(x) + 1)
513
+
514
+ def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
515
+ return np.sin(alpha * x) * np.cos(beta * x) * np.sin(gamma * x)
516
+
517
+ def arctan(x):
518
+ return np.arctan(x)
519
+
520
+ def bent_identity(x):
521
+ return (np.sqrt(x**2 + 1) - 1) / 2 + x
522
+
523
+ def sech(x):
524
+ return 2 / (np.exp(x) + np.exp(-x))
525
+
526
+ def softsign(x):
527
+ return x / (1 + np.abs(x))
528
+
529
+ def pwl(x, alpha=0.5, beta=1.5):
530
+ return np.where(x <= 0, alpha * x, beta * x)
531
+
532
+ def cubic(x):
533
+ return x**3
534
+
535
+ def gaussian(x, alpha=1.0, mu=0.0):
536
+ return np.exp(-alpha * (x - mu)**2)
537
+
538
+ def sine(x, alpha=1.0):
539
+ return np.sin(alpha * x)
540
+
541
+ def tanh_square(x):
542
+ return np.tanh(x)**2
543
+
544
+ def mod_sigmoid(x, alpha=1.0, beta=0.0):
545
+ return 1 / (1 + np.exp(-alpha * x + beta))
546
+
547
+ def quartic(x):
548
+ return x**4
549
+
550
+ def square_quartic(x):
551
+ return (x**2)**2
552
+
553
+ def cubic_quadratic(x):
554
+ return x**3 * (x**2)
555
+
556
+ def exp_cubic(x):
557
+ return np.exp(x**3)
558
+
559
+ def sine_square(x):
560
+ return np.sin(x)**2
561
+
562
+ def logarithmic(x):
563
+ return np.log(x**2 + 1)
564
+
565
+ def power(x, p):
566
+ return x**p
567
+
568
+ def scaled_cubic(x, alpha=1.0):
569
+ return alpha * x**3
570
+
571
+ def sine_offset(x, beta=0.0):
572
+ return np.sin(x + beta)
573
+
464
574
 
465
575
  def fex(
466
576
  Input, # list[num]: Input data.
467
577
  w, # num: Weight matrix of the neural network.
468
578
  is_training, # bool: Flag indicating if the function is called during training (True or False).
469
579
  Class, # int: Which class is, if training.
470
- activation_potentiation,
580
+ activation_potentiation, # (list): Activation potentiation list for deep PLAN. (optional)
471
581
  index,
472
- max_w # float or None: Input activation potentiation (optional)
582
+ max_w,
583
+ LTD=0
473
584
  ) -> tuple:
474
585
  """
475
586
  Applies feature extraction process to the input data using synaptic potentiation.
@@ -479,10 +590,12 @@ def fex(
479
590
  w (num): Weight matrix of the neural network.
480
591
  is_training (bool): Flag indicating if the function is called during training (True or False).
481
592
  Class (int): if is during training then which class(label) ? is isnt then put None.
482
- activation_potentiation (float or None): Threshold value for comparison. (optional)
593
+ # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
483
594
 
484
595
  Returns:
485
596
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
597
+ or
598
+ num: neural network output
486
599
  """
487
600
 
488
601
  Output = np.zeros(len(Input))
@@ -528,13 +641,108 @@ def fex(
528
641
  if activation == 'tanh':
529
642
  Output += tanh(Input)
530
643
 
531
- if activation == None:
644
+ if activation == 'sinakt':
645
+ Output += sinakt(Input)
646
+
647
+ if activation == 'p_squared':
648
+ Output += p_squared(Input)
649
+
650
+ if activation == 'sglu':
651
+ Output += sglu(Input, alpha=1.0)
652
+
653
+ if activation == 'dlrelu':
654
+ Output += dlrelu(Input)
655
+
656
+ if activation == 'exsig':
657
+ Output += exsig(Input)
658
+
659
+ if activation == 'acos':
660
+ Output += acos(Input, alpha=1.0, beta=0.0)
661
+
662
+ if activation == 'gla':
663
+ Output += gla(Input, alpha=1.0, mu=0.0)
664
+
665
+ if activation == 'srelu':
666
+ Output += srelu(Input)
667
+
668
+ if activation == 'qelu':
669
+ Output += qelu(Input)
670
+
671
+ if activation == 'isra':
672
+ Output += isra(Input)
673
+
674
+ if activation == 'waveakt':
675
+ Output += waveakt(Input)
676
+
677
+ if activation == 'arctan':
678
+ Output += arctan(Input)
679
+
680
+ if activation == 'bent_identity':
681
+ Output += bent_identity(Input)
682
+
683
+ if activation == 'sech':
684
+ Output += sech(Input)
685
+
686
+ if activation == 'softsign':
687
+ Output += softsign(Input)
688
+
689
+ if activation == 'pwl':
690
+ Output += pwl(Input)
691
+
692
+ if activation == 'cubic':
693
+ Output += cubic(Input)
694
+
695
+ if activation == 'gaussian':
696
+ Output += gaussian(Input)
697
+
698
+ if activation == 'sine':
699
+ Output += sine(Input)
700
+
701
+ if activation == 'tanh_square':
702
+ Output += tanh_square(Input)
703
+
704
+ if activation == 'mod_sigmoid':
705
+ Output += mod_sigmoid(Input)
706
+
707
+ if activation == None or activation == 'linear':
532
708
  Output += Input
533
709
 
710
+ if activation == 'quartic':
711
+ Output += quartic(Input)
712
+
713
+ if activation == 'square_quartic':
714
+ Output += square_quartic(Input)
715
+
716
+ if activation == 'cubic_quadratic':
717
+ Output += cubic_quadratic(Input)
718
+
719
+ if activation == 'exp_cubic':
720
+ Output += exp_cubic(Input)
721
+
722
+ if activation == 'sine_square':
723
+ Output += sine_square(Input)
724
+
725
+ if activation == 'logarithmic':
726
+ Output += logarithmic(Input)
727
+
728
+ if activation == 'scaled_cubic':
729
+ Output += scaled_cubic(Input, 1.0)
730
+
731
+ if activation == 'sine_offset':
732
+ Output += sine_offset(Input, 1.0)
733
+
734
+
534
735
  Input = Output
535
736
 
737
+
536
738
  if is_training == True:
537
739
 
740
+ for i in range(LTD):
741
+
742
+ depression_vector = np.random.rand(*Input.shape)
743
+
744
+ Input -= depression_vector
745
+
538
746
  w[Class, :] = Input
539
747
 
540
748
  return w
@@ -593,7 +801,7 @@ def evaluate(
593
801
  x_test, # list[num]: Test input data.
594
802
  y_test, # list[num]: Test labels.
595
803
  W, # list[num]: Weight matrix list of the neural network.
596
- activation_potentiation=[None], # activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
804
+ activation_potentiation=[None], # (list): Activation potentiation list for deep PLAN. (optional)
597
805
  bar_status=True, # bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
598
806
  show_metrices=None # show_metrices (bool): (True or None) (optional) Default: None
599
807
  ) -> tuple:
@@ -604,14 +812,15 @@ def evaluate(
604
812
  x_test (list[num]): Test input data.
605
813
  y_test (list[num]): Test labels.
606
814
  W (list[num]): Weight matrix list of the neural network.
607
- activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
815
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None]
608
816
  bar_status (bool): Loading bar for accuracy (True or None) (optional) Default: True
609
817
  show_metrices (bool): (True or None) (optional) Default: None
610
818
 
611
819
  Returns:
612
820
  tuple: A tuple containing the predicted labels and the accuracy of the model.
613
821
  """
614
-
822
+ evaluate.__doc__ = infoTestModel
823
+
615
824
  predict_probabilitys = []
616
825
  real_classes = []
617
826
  predict_classes = []
@@ -838,21 +1047,21 @@ def save_model(model_name,
838
1047
  Arguments:
839
1048
  model_name (str): Name of the model.
840
1049
  model_type (str): Type of the model.(options: PLAN)
841
- class_count (int): Number of classes.
842
1050
  test_acc (float): Test accuracy of the model.
843
1051
  weights_type (str): Type of weights to save (options: 'txt', 'npy', 'mat').
844
1052
  WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
845
1053
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
846
- scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
1054
+ scaler_params (list[num, num]): standard scaler params list: mean,std. If not used standard scaler then be: None.
847
1055
  W: Weights of the model.
848
- activation_potentiation (float or None): Threshold value for comparison. (optional)
1056
+ activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: help(plan.activation_functions_list) default: [None]
849
1057
 
850
1058
  Returns:
851
1059
  str: Message indicating if the model was saved successfully or encountered an error.
852
1060
  """
853
1061
 
854
- # Operations to be performed by the function will be written here
855
- pass
1062
+ save_model.__doc__ = infosave_model
1063
+
1064
+ class_count = W[0].shape[0]
856
1065
 
857
1066
  layers = ['fex']
858
1067
 
@@ -872,7 +1081,7 @@ def save_model(model_name,
872
1081
 
873
1082
  try:
874
1083
  for w in W:
875
- NeuronCount += np.shape(w)[0]
1084
+ NeuronCount += np.shape(w)[0] + np.shape(w)[1]
876
1085
  SynapseCount += np.shape(w)[0] * np.shape(w)[1]
877
1086
  except:
878
1087
 
@@ -898,7 +1107,7 @@ def save_model(model_name,
898
1107
  'CLASS COUNT': class_count,
899
1108
  'NEURON COUNT': NeuronCount,
900
1109
  'SYNAPSE COUNT': SynapseCount,
901
- 'TEST ACCURACY': test_acc,
1110
+ 'TEST ACCURACY': float(test_acc),
902
1111
  'SAVE DATE': datetime.now(),
903
1112
  'WEIGHTS TYPE': weights_type,
904
1113
  'WEIGHTS FORMAT': weights_format,
@@ -999,7 +1208,8 @@ def load_model(model_name,
999
1208
  Returns:
1000
1209
  lists: W(list[num]), activation_potentiation, DataFrame of the model
1001
1210
  """
1002
- pass
1211
+
1212
+ load_model.__doc__ = infoload_model
1003
1213
 
1004
1214
  try:
1005
1215
 
@@ -1043,6 +1253,9 @@ def predict_model_ssd(Input, model_name, model_path):
1043
1253
  Returns:
1044
1254
  ndarray: Output from the model.
1045
1255
  """
1256
+
1257
+ predict_model_ram.__doc__ = infopredict_model_ssd
1258
+
1046
1259
  W, df = load_model(model_name, model_path)
1047
1260
 
1048
1261
  activation_potentiation = list(df['ACTIVATION POTENTIATION'])
@@ -1093,12 +1306,15 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=[Non
1093
1306
  Arguments:
1094
1307
  Input (list or ndarray): Input data for the model (single vector or single matrix).
1095
1308
  W (list of ndarrays): Weights of the model.
1096
- scaler_params (int, float): standard scaler params list: mean,std. (optional) Default: None.
1097
- activation_potentiation (float or None): Threshold value for comparison. (optional) Default: None
1309
+ scaler_params (list): standard scaler params list: mean,std. (optional) Default: None.
1310
+ activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
1098
1311
 
1099
1312
  Returns:
1100
1313
  ndarray: Output from the model.
1101
1314
  """
1315
+
1316
+ predict_model_ram.__doc__ = infopredict_model_ram
1317
+
1102
1318
  try:
1103
1319
  if scaler_params != None:
1104
1320
 
@@ -1144,6 +1360,9 @@ def auto_balancer(x_train, y_train):
1144
1360
  Returns:
1145
1361
  tuple: A tuple containing balanced input data and labels.
1146
1362
  """
1363
+
1364
+ auto_balancer.__doc__ = infoauto_balancer
1365
+
1147
1366
  classes = np.arange(y_train.shape[1])
1148
1367
  class_count = len(classes)
1149
1368
 
@@ -1153,7 +1372,7 @@ def auto_balancer(x_train, y_train):
1153
1372
  classes = [len(ClassIndices[i]) for i in range(class_count)]
1154
1373
 
1155
1374
  if len(set(classes)) == 1:
1156
- print(Fore.WHITE + "INFO: All training data have already balanced. from: auto_balancer" + Style.RESET_ALL)
1375
+ print(Fore.WHITE + "INFO: Data have already balanced. from: auto_balancer" + Style.RESET_ALL)
1157
1376
  return x_train, y_train
1158
1377
 
1159
1378
  MinCount = min(classes)
@@ -1170,7 +1389,7 @@ def auto_balancer(x_train, y_train):
1170
1389
  BalancedInputs = [x_train[idx] for idx in BalancedIndices]
1171
1390
  BalancedLabels = [y_train[idx] for idx in BalancedIndices]
1172
1391
 
1173
- print(Fore.GREEN + "All Data Succesfully Balanced from: " + str(len(x_train)
1392
+ print(Fore.GREEN + "Data Succesfully Balanced from: " + str(len(x_train)
1174
1393
  ) + " to: " + str(len(BalancedInputs)) + ". from: auto_balancer " + Style.RESET_ALL)
1175
1394
  except:
1176
1395
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoauto_balancer)
@@ -1245,6 +1464,9 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1245
1464
  tuple
1246
1465
  Standardized training and test datasets
1247
1466
  """
1467
+
1468
+ standard_scaler.__doc__ = info_standard_scaler
1469
+
1248
1470
  try:
1249
1471
 
1250
1472
  x_train = x_train.tolist()
@@ -1305,7 +1527,7 @@ def standard_scaler(x_train, x_test=None, scaler_params=None):
1305
1527
  return('e')
1306
1528
 
1307
1529
  def encode_one_hot(y_train, y_test):
1308
- info_one_hot_encode = """
1530
+ """
1309
1531
  Performs one-hot encoding on y_train and y_test data..
1310
1532
 
1311
1533
  Args:
@@ -1679,11 +1901,11 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
1679
1901
 
1680
1902
  plt.show()
1681
1903
 
1682
- def plot_decision_boundary(x, y, activation_potentiation, W):
1683
1904
 
1684
- feature_indices=[0, 1]
1905
+ def plot_decision_boundary(ax, x, y, activation_potentiation, W, artist, draw_is_finished=False):
1906
+ feature_indices = [0, 1]
1685
1907
 
1686
- h = .02 # mesh grid adımı
1908
+ h = .02
1687
1909
  x_min, x_max = x[:, feature_indices[0]].min() - 1, x[:, feature_indices[0]].max() + 1
1688
1910
  y_min, y_max = x[:, feature_indices[1]].min() - 1, x[:, feature_indices[1]].max() + 1
1689
1911
  xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
@@ -1696,26 +1918,39 @@ def plot_decision_boundary(x, y, activation_potentiation, W):
1696
1918
  Z = [None] * len(grid_full)
1697
1919
 
1698
1920
  for i in range(len(grid_full)):
1699
-
1700
1921
  Z[i] = np.argmax(predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
1701
1922
 
1702
1923
  Z = np.array(Z)
1703
1924
  Z = Z.reshape(xx.shape)
1925
+
1926
+ if draw_is_finished == False:
1704
1927
 
1705
- plt.contourf(xx, yy, Z, alpha=0.8)
1706
- plt.scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
1707
- plt.xlabel(f'Feature {0 + 1}')
1708
- plt.ylabel(f'Feature {1 + 1}')
1709
- plt.title('Decision Boundary')
1710
- plt.draw()
1928
+ art1_1 = ax[1, 0].contourf(xx, yy, Z, alpha=0.8)
1929
+ art1_2 = ax[1, 0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
1930
+ ax[1, 0].set_xlabel(f'Feature {0 + 1}')
1931
+ ax[1, 0].set_ylabel(f'Feature {1 + 1}')
1932
+ ax[1, 0].set_title('Decision Boundary')
1933
+ artist.append([*art1_1.collections, art1_2])
1934
+
1935
+ else:
1936
+
1937
+ for i in range(30):
1938
+
1939
+ art1_1 = ax[1, 0].contourf(xx, yy, Z, alpha=0.8)
1940
+ art1_2 = ax[1, 0].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=decode_one_hot(y), edgecolors='k', marker='o', s=20, alpha=0.9)
1941
+ ax[1, 0].set_xlabel(f'Feature {0 + 1}')
1942
+ ax[1, 0].set_ylabel(f'Feature {1 + 1}')
1943
+ ax[1, 0].set_title('Decision Boundary')
1944
+ artist.append([*art1_1.collections, art1_2])
1945
+
1946
+ return artist
1711
1947
 
1712
1948
  def pca(X, n_components):
1713
1949
  """
1714
- PCA algoritmasını uygulayan fonksiyon.
1715
1950
 
1716
1951
  Parameters:
1717
- X (numpy array): Giriş verisi (n_samples, n_features)
1718
- n_components (int): Saklanacak ana bileşen sayısı
1952
+ X (numpy array): (n_samples, n_features)
1953
+ n_components (int):
1719
1954
 
1720
1955
  Returns:
1721
1956
  X_reduced (numpy array): (n_samples, n_components)
@@ -1729,7 +1964,6 @@ def pca(X, n_components):
1729
1964
 
1730
1965
  sorted_index = np.argsort(eigenvalues)[::-1]
1731
1966
  sorted_eigenvectors = eigenvectors[:, sorted_index]
1732
- sorted_eigenvalues = eigenvalues[sorted_index]
1733
1967
 
1734
1968
  eigenvectors_subset = sorted_eigenvectors[:, :n_components]
1735
1969
 
@@ -1757,7 +1991,7 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
1757
1991
  norm = plt.Normalize(vmin=0, vmax=num_classes - 1)
1758
1992
 
1759
1993
 
1760
- scatter = plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, edgecolor='k', s=50, cmap=cmap, norm=norm)
1994
+ plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y, edgecolor='k', s=50, cmap=cmap, norm=norm)
1761
1995
 
1762
1996
 
1763
1997
  for cls in range(num_classes):
@@ -1780,7 +2014,7 @@ def plot_decision_space(x, y, y_preds=None, s=100, color='tab20'):
1780
2014
 
1781
2015
  plt.fill(hull_points[:, 0], hull_points[:, 1], color=cmap(norm(cls)), alpha=0.3, edgecolor='k', label=f'Class {cls} Hull')
1782
2016
 
1783
- plt.title("Decision Boundry")
2017
+ plt.title("Decision Space (Data Distribution)")
1784
2018
 
1785
2019
  plt.draw()
1786
2020
 
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 3.1
4
- Summary: Deep PLAN Integreted. Version 3 document coming
3
+ Version: 3.3.0
4
+ Summary: fit function changes: LTD parameter included for Deep PLAN and professional visualizing for training. in the fit funciton, show_training=True
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classifcation,potentiation learning artficial neural networks
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
+ plan/plan.py,sha256=2ccjNjnPWMlC1uatIdXpexBpTgdYlBQdd6Hua9cDrb0,67575
3
+ pyerualjetwork-3.3.0.dist-info/METADATA,sha256=oaNPz8e5fpUVSBNugzKQskDpDYW41Fotoy_WVl-H5ao,368
4
+ pyerualjetwork-3.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-3.3.0.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-3.3.0.dist-info/RECORD,,
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=LuFcY0nqAzpjTDWAZn7L7-wipwMpnREqVghPiva0Xjg,548
2
- plan/plan.py,sha256=Q7W2o5T4fLpme5ZG0kRPL0EA5SXA1Ccui3UfHWxkBHI,60472
3
- pyerualjetwork-3.1.dist-info/METADATA,sha256=bqwcxFGQBi8b-0l8bAzDfIdrtUbZV-pIP-1yiwhETOo,272
4
- pyerualjetwork-3.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
- pyerualjetwork-3.1.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-3.1.dist-info/RECORD,,