pyerualjetwork 4.1.7__py3-none-any.whl → 4.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,7 +48,7 @@ for package_name in package_names:
48
48
 
49
49
  print(f"PyerualJetwork is ready to use with {err} errors")
50
50
 
51
- __version__ = "4.1.7"
51
+ __version__ = "4.1.8"
52
52
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
53
53
 
54
54
  def print_version(__version__):
@@ -58,15 +58,4 @@ def print_update_notes(__update__):
58
58
  print(f"Update Notes:\n{__update__}")
59
59
 
60
60
  print_version(__version__)
61
- print_update_notes(__update__)
62
-
63
- from .plan import *
64
- from .planeat import *
65
- from .activation_functions import *
66
- from .data_operations import *
67
- from .loss_functions import *
68
- from .metrics import *
69
- from .model_operations import *
70
- from .ui import *
71
- from .visualizations import *
72
- from .help import *
61
+ print_update_notes(__update__)
@@ -40,7 +40,7 @@ def transfer_to_cpu(x, dtype=np.float32):
40
40
 
41
41
  :return: NumPy array with the specified dtype
42
42
  """
43
- from .ui import loading_bars, initialize_loading_bar
43
+ from ui import loading_bars, initialize_loading_bar
44
44
  try:
45
45
  if isinstance(x, np.ndarray):
46
46
  return x.astype(dtype) if x.dtype != dtype else x
pyerualjetwork/plan.py CHANGED
@@ -3,8 +3,8 @@
3
3
 
4
4
  MAIN MODULE FOR PLAN
5
5
 
6
- PLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_PLAN/PLAN.pdf
7
- ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
6
+ PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
7
+ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
8
8
 
9
9
  @author: Hasan Can Beydili
10
10
  @YouTube: https://www.youtube.com/@HasanCanBeydili
@@ -24,6 +24,7 @@ from .loss_functions import binary_crossentropy, categorical_crossentropy
24
24
  from .activation_functions import apply_activation, Softmax, all_activations
25
25
  from .metrics import metrics
26
26
  from .model_operations import get_acc, get_preds, get_preds_softmax
27
+ from .memory_operations import optimize_labels
27
28
  from .visualizations import (
28
29
  draw_neural_web,
29
30
  update_neural_web_for_fit,
@@ -102,8 +103,6 @@ def fit(
102
103
  Returns:
103
104
  numpyarray([num]): (Weight matrix).
104
105
  """
105
-
106
- from model_operations import get_acc
107
106
 
108
107
  # Pre-checks
109
108
 
@@ -173,45 +172,61 @@ def fit(
173
172
  return normalization(LTPW, dtype=dtype)
174
173
 
175
174
 
176
- def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', batch_size=1,
175
+ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
177
176
  neural_web_history=False, show_current_activations=False, auto_normalization=True,
178
- neurons_history=False, patience=None, depth=None, early_shifting=False,
179
- early_stop=False, loss='categorical_crossentropy', show_history=False,
177
+ neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
180
178
  interval=33.33, target_acc=None, target_loss=None, except_this=None,
181
- only_this=None, start_this=None, dtype=np.float32):
179
+ only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=np.float32):
182
180
  """
183
181
  Optimizes the activation functions for a neural network by leveraging train data to find
184
- the most accurate combination of activation potentiation for the given dataset.
182
+ the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
185
183
 
184
+ Why genetic optimization and not backpropagation?
185
+ Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
186
+ Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
187
+
186
188
  Args:
187
189
 
188
190
  x_train (array-like): Training input data.
189
191
 
190
192
  y_train (array-like): Labels for training data. one-hot encoded.
191
193
 
194
+ optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
195
+ ```python
196
+ genetic_optimizer = lambda *args, **kwargs: planeat.evolve(*args,
197
+ activation_add_prob=0.85,
198
+ mutations=False,
199
+ strategy='cross_over',
200
+ **kwargs)
201
+
202
+ model = plan.learner(x_train,
203
+ y_train,
204
+ optimizer=genetic_optimizer,
205
+ strategy='accuracy',
206
+ show_history=True,
207
+ target_acc=0.94,
208
+ interval=16.67)
209
+ ```
210
+
192
211
  x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
193
212
 
194
213
  y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
195
214
 
196
- strategy (str, optional): Learning strategy. (options: 'accuracy', 'loss', 'f1', 'precision', 'recall', 'adaptive_accuracy', 'adaptive_loss', 'all'): 'accuracy', Maximizes test accuracy during learning. 'f1', Maximizes test f1 score during learning. 'precision', Maximizes test preciison score during learning. 'recall', Maximizes test recall during learning. loss', Minimizes test loss during learning. 'adaptive_accuracy', The model compares the current accuracy with the accuracy from the past based on the number specified by the patience value. If no improvement is observed it adapts to the condition by switching to the 'loss' strategy quickly starts minimizing loss and continues learning. 'adaptive_loss',The model adopts the 'loss' strategy until the loss reaches or falls below the value specified by the patience parameter. However, when the patience threshold is reached, it automatically switches to the 'accuracy' strategy and begins to maximize accuracy. 'all', Maximizes all test scores and minimizes test loss, 'all' strategy most strong and most robust strategy. Default is 'accuracy'.
197
-
198
- patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
215
+ strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
199
216
 
200
- depth (int, optional): The depth of the PLAN neural networks Aggreagation layers.
217
+ gen (int, optional): The generation count for genetic optimization.
201
218
 
202
219
  batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
203
220
 
204
221
  auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
205
222
 
206
- early_shifting (int, optional): Early shifting checks if the test accuracy improves after a given number of activation attempts while inside a depth. If there's no improvement, it automatically shifts to the next depth. Basically, if no progress, it’s like, “Alright, let’s move on!” Default is False
207
-
208
- early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two depth stops learning.) Default is False.
223
+ early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
209
224
 
210
225
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
211
226
 
212
227
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
213
228
 
214
- loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training(if strategy not 'loss'). options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
229
+ loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
215
230
 
216
231
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
217
232
 
@@ -223,81 +238,54 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
223
238
 
224
239
  only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
225
240
 
226
- start_this (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
241
+ start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
242
+
243
+ start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
227
244
 
228
245
  neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
229
246
 
230
247
  neural_web_history (bool, optional): Draws history of neural web. Default is False.
231
248
 
249
+ target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
250
+
232
251
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
233
252
 
234
253
  Returns:
235
254
  tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
236
255
 
237
256
  """
238
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
257
+
258
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
239
259
 
240
260
  activation_potentiation = all_activations()
241
261
 
242
262
  # Pre-checks
243
263
 
244
264
  x_train = x_train.astype(dtype, copy=False)
245
-
246
- if len(y_train[0]) < 256:
247
- if y_train.dtype != np.uint8:
248
- y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
249
- elif len(y_train[0]) <= 32767:
250
- if y_train.dtype != np.uint16:
251
- y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
252
- else:
253
- if y_train.dtype != np.uint32:
254
- y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
255
-
256
- if x_test is not None:
257
- x_test = x_test.astype(dtype, copy=False)
258
-
259
- if len(y_test[0]) < 256:
260
- if y_test.dtype != np.uint8:
261
- y_test = np.array(y_test, copy=False).astype(np.uint8, copy=False)
262
- elif len(y_test[0]) <= 32767:
263
- if y_test.dtype != np.uint16:
264
- y_test = np.array(y_test, copy=False).astype(np.uint16, copy=False)
265
- else:
266
- if y_test.dtype != np.uint32:
267
- y_test = np.array(y_test, copy=False).astype(np.uint32, copy=False)
265
+ y_train = optimize_labels(y_train, cuda=False)
268
266
 
269
267
  if x_test is None and y_test is None:
270
268
  x_test = x_train
271
269
  y_test = y_train
272
270
  data = 'Train'
273
271
  else:
272
+ x_test = x_test.astype(dtype, copy=False)
273
+ y_test = optimize_labels(y_test, cuda=False)
274
274
  data = 'Test'
275
275
 
276
- if early_shifting != False:
277
- shift_patience = early_shifting
278
-
279
- # Strategy initialization
280
- if strategy == 'adaptive_accuracy':
281
- strategy = 'accuracy'
282
- adaptive = True
283
- if patience == None:
284
- patience = 5
285
- elif strategy == 'adaptive_loss':
286
- strategy = 'loss'
287
- adaptive = True
288
- if patience == None:
289
- patience = 0.150
290
- else:
291
- adaptive = False
292
-
293
276
  # Filter activation functions
294
- if only_this != None:
277
+ if only_this is not None:
295
278
  activation_potentiation = only_this
296
- if except_this != None:
279
+ if except_this is not None:
297
280
  activation_potentiation = [item for item in activation_potentiation if item not in except_this]
298
- if depth is None:
299
- depth = len(activation_potentiation)
281
+ if gen is None:
282
+ gen = len(activation_potentiation)
283
+
284
+ if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
300
285
 
286
+ if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
287
+ if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
288
+
301
289
  # Initialize visualization components
302
290
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
303
291
 
@@ -306,19 +294,20 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
306
294
  ncols = 76
307
295
  else:
308
296
  ncols = 89
309
- progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
310
297
 
311
298
  # Initialize variables
312
- activations = []
313
- if start_this is None:
314
- best_activations = []
299
+ act_pop = []
300
+ weight_pop = []
301
+
302
+ if start_this_act is None and start_this_W is None:
315
303
  best_acc = 0
316
304
  else:
317
- best_activations = start_this
305
+ act_pop.append(start_this_act)
306
+ weight_pop.append(start_this_W)
307
+
318
308
  x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
319
309
 
320
- W = fit(x_train, y_train, activation_potentiation=best_activations, train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
321
- model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype)
310
+ model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
322
311
 
323
312
  if loss == 'categorical_crossentropy':
324
313
  test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
@@ -328,12 +317,15 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
328
317
  best_acc = model[get_acc()]
329
318
  best_loss = test_loss
330
319
 
331
- best_acc_per_depth_list = []
320
+ best_acc_per_gen_list = []
332
321
  postfix_dict = {}
333
322
  loss_list = []
334
-
335
- for i in range(depth):
336
- postfix_dict["Depth"] = str(i+1) + '/' + str(depth)
323
+ target_pop = []
324
+
325
+ progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
326
+
327
+ for i in range(gen):
328
+ postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
337
329
  progress.set_postfix(postfix_dict)
338
330
 
339
331
  progress.n = 0
@@ -341,80 +333,65 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
341
333
  progress.update(0)
342
334
 
343
335
  for j in range(len(activation_potentiation)):
344
- for k in range(len(best_activations)):
345
- activations.append(best_activations[k])
346
-
347
- activations.append(activation_potentiation[j])
348
336
 
349
337
  x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
338
+
339
+ if i == 0:
340
+ act_pop.append(activation_potentiation[j])
341
+ W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
342
+ weight_pop.append(W)
350
343
 
351
- W = fit(x_train, y_train, activation_potentiation=activations, train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
352
- model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype)
353
-
344
+ model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
354
345
  acc = model[get_acc()]
346
+
347
+ if strategy == 'accuracy': target_pop.append(acc)
355
348
 
356
- if strategy == 'loss' or strategy == 'all':
357
- if loss == 'categorical_crossentropy':
358
- test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
359
- else:
360
- test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
361
-
362
- if i == 0 and j == 0 and start_this is None:
363
- best_loss = test_loss
364
-
365
- if strategy == 'f1' or strategy == 'precision' or strategy == 'recall' or strategy == 'all':
349
+ elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
366
350
  precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
367
351
 
368
- if strategy == 'precision' or strategy == 'all':
352
+ if strategy == 'precision':
353
+ target_pop.append(precision_score)
354
+
369
355
  if i == 0 and j == 0:
370
356
  best_precision = precision_score
371
357
 
372
- if strategy == 'recall' or strategy == 'all':
358
+ if strategy == 'recall':
359
+ target_pop.append(recall_score)
360
+
373
361
  if i == 0 and j == 0:
374
362
  best_recall = recall_score
375
363
 
376
- if strategy == 'f1' or strategy == 'all':
364
+ if strategy == 'f1':
365
+ target_pop.append(f1_score)
366
+
377
367
  if i == 0 and j == 0:
378
368
  best_f1 = f1_score
379
369
 
380
- if early_shifting != False:
381
- if acc <= best_acc:
382
- early_shifting -= 1
383
- if early_shifting == 0:
384
- early_shifting = shift_patience
385
- break
386
-
387
370
  if ((strategy == 'accuracy' and acc >= best_acc) or
388
- (strategy == 'loss' and test_loss <= best_loss) or
389
371
  (strategy == 'f1' and f1_score >= best_f1) or
390
- (strategy == 'precision' and precision_score >= best_precision) or
391
- (strategy == 'recall' and recall_score >= best_recall) or
392
- (strategy == 'all' and f1_score >= best_f1 and acc >= best_acc and
393
- test_loss <= best_loss and precision_score >= best_precision and
394
- recall_score >= best_recall)):
372
+ (strategy == 'precision' and precision_score >= best_precision) or
373
+ (strategy == 'recall' and recall_score >= best_recall)):
395
374
 
396
- current_best_activation = activation_potentiation[j]
397
375
  best_acc = acc
398
-
376
+ best_weights = weight_pop[j]
377
+ final_activations = act_pop[j]
378
+ best_model = model
379
+
380
+ final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
381
+
399
382
  if batch_size == 1:
400
383
  postfix_dict[f"{data} Accuracy"] = best_acc
401
384
  else:
402
385
  postfix_dict[f"{data} Batch Accuracy"] = acc
403
386
  progress.set_postfix(postfix_dict)
404
-
405
- final_activations = activations
406
387
 
407
388
  if show_current_activations:
408
389
  print(f", Current Activations={final_activations}", end='')
409
390
 
410
- best_weights = W
411
- best_model = model
412
-
413
- if strategy != 'loss':
414
- if loss == 'categorical_crossentropy':
415
- test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
416
- else:
417
- test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
391
+ if loss == 'categorical_crossentropy':
392
+ test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
393
+ else:
394
+ test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
418
395
 
419
396
  if batch_size == 1:
420
397
  postfix_dict[f"{data} Loss"] = test_loss
@@ -426,9 +403,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
426
403
 
427
404
  # Update visualizations during training
428
405
  if show_history:
429
- depth_list = range(1, len(best_acc_per_depth_list) + 2)
430
- update_history_plots_for_learner(viz_objects, depth_list, loss_list + [test_loss],
431
- best_acc_per_depth_list + [best_acc], x_train, final_activations)
406
+ gen_list = range(1, len(best_acc_per_gen_list) + 2)
407
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
408
+ best_acc_per_gen_list + [best_acc], x_train, final_activations)
432
409
 
433
410
  if neurons_history:
434
411
  viz_objects['neurons']['artists'] = (
@@ -496,25 +473,16 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
496
473
  return best_weights, best_model[get_preds()], best_acc, final_activations
497
474
 
498
475
  progress.update(1)
499
- activations = []
500
476
 
501
- best_activations.append(current_best_activation)
502
- best_acc_per_depth_list.append(best_acc)
477
+ best_acc_per_gen_list.append(best_acc)
503
478
  loss_list.append(best_loss)
504
479
 
505
- # Check adaptive strategy conditions
506
- if adaptive == True and strategy == 'accuracy' and i + 1 >= patience:
507
- check = best_acc_per_depth_list[-patience:]
508
- if all(x == check[0] for x in check):
509
- strategy = 'loss'
510
- adaptive = False
511
- elif adaptive == True and strategy == 'loss' and best_loss <= patience:
512
- strategy = 'accuracy'
513
- adaptive = False
480
+ weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
481
+ target_pop = []
514
482
 
515
483
  # Early stopping check
516
484
  if early_stop == True and i > 0:
517
- if best_acc_per_depth_list[i] == best_acc_per_depth_list[i-1]:
485
+ if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
518
486
  progress.close()
519
487
  train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
520
488
  activation_potentiation=final_activations, dtype=dtype)
@@ -548,7 +516,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
548
516
  else:
549
517
  train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
550
518
 
551
- print('\nActivations: ', final_activations)
519
+ print('\nActivations: ', act_pop[-1])
552
520
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
553
521
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
554
522
  if data == 'Test':
@@ -1,10 +1,10 @@
1
1
  # -*- coding: utf-8 -*-
2
2
  """
3
3
 
4
- MAIN MODULE FOR PLAN
4
+ MAIN MODULE FOR PLAN_CUDA
5
5
 
6
- PLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_PLAN/PLAN.pdf
7
- ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
6
+ PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
7
+ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
8
8
 
9
9
  @author: Hasan Can Beydili
10
10
  @YouTube: https://www.youtube.com/@HasanCanBeydili
@@ -14,6 +14,7 @@ ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/
14
14
  """
15
15
 
16
16
  import cupy as cp
17
+ import numpy as np
17
18
  from colorama import Fore
18
19
  import math
19
20
 
@@ -24,6 +25,7 @@ from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
24
25
  from .activation_functions_cuda import apply_activation, Softmax, all_activations
25
26
  from .metrics_cuda import metrics
26
27
  from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
28
+ from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
27
29
  from .visualizations_cuda import (
28
30
  draw_neural_web,
29
31
  update_neural_web_for_fit,
@@ -106,8 +108,6 @@ def fit(
106
108
  numpyarray([num]): (Weight matrix).
107
109
  """
108
110
  # Pre-checks
109
-
110
- from memory_operations import transfer_to_gpu, transfer_to_cpu
111
111
 
112
112
  if train_bar and val:
113
113
  train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
@@ -188,45 +188,63 @@ def fit(
188
188
  return normalization(LTPW, dtype=dtype)
189
189
 
190
190
 
191
- def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', batch_size=1,
191
+ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
192
192
  neural_web_history=False, show_current_activations=False, auto_normalization=True,
193
- neurons_history=False, patience=None, depth=None, early_shifting=False,
194
- early_stop=False, loss='categorical_crossentropy', show_history=False,
193
+ neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
195
194
  interval=33.33, target_acc=None, target_loss=None, except_this=None,
196
- only_this=None, start_this=None, dtype=cp.float32, memory='gpu'):
195
+ only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=cp.float32, memory='gpu'):
197
196
  """
198
197
  Optimizes the activation functions for a neural network by leveraging train data to find
199
198
  the most accurate combination of activation potentiation for the given dataset.
200
199
 
200
+ Why genetic optimization and not backpropagation?
201
+ Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
202
+ Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
203
+
201
204
  Args:
202
205
 
203
206
  x_train (array-like): Training input data.
204
207
 
205
208
  y_train (array-like): Labels for training data.
206
209
 
210
+ optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
211
+ ```python
212
+ genetic_optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args,
213
+ activation_add_prob=0.85,
214
+ mutations=False,
215
+ strategy='cross_over',
216
+ **kwargs)
217
+
218
+ model = plan_cuda.learner(x_train,
219
+ y_train,
220
+ optimizer=genetic_optimizer,
221
+ strategy='accuracy',
222
+ show_history=True,
223
+ target_acc=0.94,
224
+ interval=16.67)
225
+ ```
226
+
207
227
  x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
208
228
 
209
229
  y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
210
230
 
211
- strategy (str, optional): Learning strategy. (options: 'accuracy', 'loss', 'f1', 'precision', 'recall', 'adaptive_accuracy', 'adaptive_loss', 'all'): 'accuracy', Maximizes test accuracy during learning. 'f1', Maximizes test f1 score during learning. 'precision', Maximizes test preciison score during learning. 'recall', Maximizes test recall during learning. loss', Minimizes test loss during learning. 'adaptive_accuracy', The model compares the current accuracy with the accuracy from the past based on the number specified by the patience value. If no improvement is observed it adapts to the condition by switching to the 'loss' strategy quickly starts minimizing loss and continues learning. 'adaptive_loss',The model adopts the 'loss' strategy until the loss reaches or falls below the value specified by the patience parameter. However, when the patience threshold is reached, it automatically switches to the 'accuracy' strategy and begins to maximize accuracy. 'all', Maximizes all test scores and minimizes test loss, 'all' strategy most strong and most robust strategy. Default is 'accuracy'.
231
+ strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
212
232
 
213
233
  patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
214
234
 
215
- depth (int, optional): The depth of the PLAN neural networks Aggreagation layers.
235
+ gen (int, optional): The generation count for genetic optimization.
216
236
 
217
237
  batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
218
238
 
219
239
  auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
220
240
 
221
- early_shifting (int, optional): Early shifting checks if the test accuracy improves after a given number of activation attempts while inside a depth. If there's no improvement, it automatically shifts to the next depth. Basically, if no progress, it’s like, “Alright, let’s move on!” Default is False
222
-
223
- early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two depth stops learning.) Default is False.
241
+ early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
224
242
 
225
243
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
226
244
 
227
245
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
228
246
 
229
- loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training(if strategy not 'loss'). options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
247
+ loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
230
248
 
231
249
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
232
250
 
@@ -238,8 +256,10 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
238
256
 
239
257
  only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
240
258
 
241
- start_this (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
259
+ start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
242
260
 
261
+ start_this_W (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
262
+
243
263
  neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
244
264
 
245
265
  neural_web_history (bool, optional): Draws history of neural web. Default is False.
@@ -252,18 +272,20 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
252
272
  tuple: A list for model parameters: [Weight matrix, Preds, Accuracy, [Activations functions]]. You can acces this parameters in model_operations module. For example: model_operations.get_weights() for Weight matrix.
253
273
 
254
274
  """
255
- from memory_operations import transfer_to_gpu, transfer_to_cpu
256
275
 
257
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
276
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
258
277
 
259
278
  activation_potentiation = all_activations()
260
279
 
280
+ y_train = optimize_labels(y_train, cuda=True)
281
+
261
282
  if x_test is None and y_test is None:
262
283
  x_test = x_train
263
284
  y_test = y_train
264
285
  data = 'Train'
265
286
  else:
266
287
  data = 'Test'
288
+ y_test = optimize_labels(y_test, cuda=True)
267
289
 
268
290
  if memory == 'gpu':
269
291
  x_train = transfer_to_gpu(x_train, dtype=dtype)
@@ -272,7 +294,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
272
294
  x_test = transfer_to_gpu(x_test, dtype=dtype)
273
295
  y_test = transfer_to_gpu(y_test, dtype=y_train.dtype)
274
296
 
275
- from data_operations_cuda import batcher
297
+ from .data_operations_cuda import batcher
276
298
 
277
299
  elif memory == 'cpu':
278
300
  x_train = transfer_to_cpu(x_train, dtype=dtype)
@@ -281,57 +303,45 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
281
303
  x_test = transfer_to_cpu(x_test, dtype=dtype)
282
304
  y_test = transfer_to_cpu(y_test, dtype=y_train.dtype)
283
305
 
284
- from data_operations import batcher
306
+ from .data_operations import batcher
285
307
 
286
308
  else:
287
309
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
288
310
 
289
-
290
- if early_shifting != False:
291
- shift_patience = early_shifting
292
-
293
- # Strategy initialization
294
- if strategy == 'adaptive_accuracy':
295
- strategy = 'accuracy'
296
- adaptive = True
297
- if patience == None:
298
- patience = 5
299
- elif strategy == 'adaptive_loss':
300
- strategy = 'loss'
301
- adaptive = True
302
- if patience == None:
303
- patience = 0.150
304
- else:
305
- adaptive = False
311
+ if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
306
312
 
307
313
  # Filter activation functions
308
- if only_this != None:
314
+ if only_this is not None:
309
315
  activation_potentiation = only_this
310
- if except_this != None:
316
+ if except_this is not None:
311
317
  activation_potentiation = [item for item in activation_potentiation if item not in except_this]
312
- if depth is None:
313
- depth = len(activation_potentiation)
318
+ if gen is None:
319
+ gen = len(activation_potentiation)
314
320
 
321
+ if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
322
+ if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
323
+
315
324
  # Initialize visualization components
316
325
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
317
326
 
318
327
  # Initialize progress bar
319
328
  if batch_size == 1:
320
- ncols = 100
329
+ ncols = 86
321
330
  else:
322
- ncols = 140
323
- progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
331
+ ncols = 99
324
332
 
325
333
  # Initialize variables
326
- activations = []
327
- if start_this is None:
328
- best_activations = []
334
+ act_pop = []
335
+ weight_pop = []
336
+
337
+ if start_this_act is None and start_this_W is None:
329
338
  best_acc = 0
330
339
  else:
331
- best_activations = start_this
340
+ act_pop.append(start_this_act)
341
+ weight_pop.append(start_this_W)
342
+
332
343
  x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
333
- W = fit(x_train, y_train, activation_potentiation=best_activations, train_bar=False, auto_normalization=auto_normalization, dtype=dtype, memory=memory)
334
- model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype, memory=memory)
344
+ model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
335
345
 
336
346
  if loss == 'categorical_crossentropy':
337
347
  test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
@@ -341,12 +351,15 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
341
351
  best_acc = model[get_acc()]
342
352
  best_loss = test_loss
343
353
 
344
- best_acc_per_depth_list = []
354
+ best_acc_per_gen_list = []
345
355
  postfix_dict = {}
346
356
  loss_list = []
347
-
348
- for i in range(depth):
349
- postfix_dict["Depth"] = str(i+1) + '/' + str(depth)
357
+ target_pop = []
358
+
359
+ progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
360
+
361
+ for i in range(gen):
362
+ postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
350
363
  progress.set_postfix(postfix_dict)
351
364
 
352
365
  progress.n = 0
@@ -354,79 +367,65 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
354
367
  progress.update(0)
355
368
 
356
369
  for j in range(len(activation_potentiation)):
357
- for k in range(len(best_activations)):
358
- activations.append(best_activations[k])
359
-
360
- activations.append(activation_potentiation[j])
361
370
 
362
371
  x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
363
- W = fit(x_train, y_train, activation_potentiation=activations, train_bar=False, auto_normalization=auto_normalization, dtype=dtype, memory=memory)
364
- model = evaluate(x_test_batch, y_test_batch, W=W, loading_bar_status=False, activation_potentiation=activations, dtype=dtype, memory=memory)
372
+
373
+ if i == 0:
374
+ act_pop.append(activation_potentiation[j])
375
+ W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
376
+ weight_pop.append(W)
365
377
 
366
- acc = model[get_acc()]
378
+ model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
367
379
 
368
- if strategy == 'loss' or strategy == 'all':
369
- if loss == 'categorical_crossentropy':
370
- test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
371
- else:
372
- test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
380
+ acc = model[get_acc()]
381
+ if strategy == 'accuracy': target_pop.append(acc)
373
382
 
374
- if i == 0 and j == 0 and start_this is None:
375
- best_loss = test_loss
383
+ elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
384
+ precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
376
385
 
377
- if strategy == 'f1' or strategy == 'precision' or strategy == 'recall' or strategy == 'all':
378
- precision_score, recall_score, f1_score = metrics(transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), model[get_preds()])
386
+ if strategy == 'precision':
387
+ target_pop.append(precision_score)
379
388
 
380
- if strategy == 'precision' or strategy == 'all':
381
389
  if i == 0 and j == 0:
382
390
  best_precision = precision_score
383
391
 
384
- if strategy == 'recall' or strategy == 'all':
392
+ if strategy == 'recall':
393
+ target_pop.append(recall_score)
394
+
385
395
  if i == 0 and j == 0:
386
396
  best_recall = recall_score
387
397
 
388
- if strategy == 'f1' or strategy == 'all':
398
+ if strategy == 'f1':
399
+ target_pop.append(f1_score)
400
+
389
401
  if i == 0 and j == 0:
390
402
  best_f1 = f1_score
391
403
 
392
- if early_shifting != False:
393
- if acc <= best_acc:
394
- early_shifting -= 1
395
- if early_shifting == 0:
396
- early_shifting = shift_patience
397
- break
398
-
399
404
  if ((strategy == 'accuracy' and acc >= best_acc) or
400
- (strategy == 'loss' and test_loss <= best_loss) or
401
405
  (strategy == 'f1' and f1_score >= best_f1) or
402
406
  (strategy == 'precision' and precision_score >= best_precision) or
403
- (strategy == 'recall' and recall_score >= best_recall) or
404
- (strategy == 'all' and f1_score >= best_f1 and acc >= best_acc and
405
- test_loss <= best_loss and precision_score >= best_precision and
406
- recall_score >= best_recall)):
407
+ (strategy == 'recall' and recall_score >= best_recall)):
407
408
 
408
- current_best_activation = activation_potentiation[j]
409
409
  best_acc = acc
410
-
410
+ best_weights = weight_pop[j]
411
+ final_activations = act_pop[j]
412
+ best_model = model
413
+
414
+ final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
415
+
411
416
  if batch_size == 1:
412
417
  postfix_dict[f"{data} Accuracy"] = best_acc
413
418
  else:
414
419
  postfix_dict[f"{data} Batch Accuracy"] = acc
415
420
  progress.set_postfix(postfix_dict)
416
-
417
- final_activations = activations
418
421
 
419
422
  if show_current_activations:
420
423
  print(f", Current Activations={final_activations}", end='')
421
-
422
- best_weights = W
423
- best_model = model
424
-
425
- if strategy != 'loss':
426
- if loss == 'categorical_crossentropy':
427
- test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
428
- else:
429
- test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
424
+
425
+ if loss == 'categorical_crossentropy':
426
+ test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
427
+ else:
428
+ test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
430
429
 
431
430
  if batch_size == 1:
432
431
  postfix_dict[f"{data} Loss"] = test_loss
@@ -438,9 +437,9 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
438
437
 
439
438
  # Update visualizations during training
440
439
  if show_history:
441
- depth_list = range(1, len(best_acc_per_depth_list) + 2)
442
- update_history_plots_for_learner(viz_objects, depth_list, loss_list + [test_loss],
443
- best_acc_per_depth_list + [best_acc], x_train, final_activations)
440
+ gen_list = range(1, len(best_acc_per_gen_list) + 2)
441
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
442
+ best_acc_per_gen_list + [best_acc], x_train, final_activations)
444
443
 
445
444
  if neurons_history:
446
445
  viz_objects['neurons']['artists'] = (
@@ -508,28 +507,19 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
508
507
  return best_weights, best_model[get_preds()], best_acc, final_activations
509
508
 
510
509
  progress.update(1)
511
- activations = []
512
510
 
513
- best_activations.append(current_best_activation)
514
- best_acc_per_depth_list.append(best_acc)
511
+ best_acc_per_gen_list.append(best_acc)
515
512
  loss_list.append(best_loss)
516
-
517
- # Check adaptive strategy conditions
518
- if adaptive == True and strategy == 'accuracy' and i + 1 >= patience:
519
- check = best_acc_per_depth_list[-patience:]
520
- if all(x == check[0] for x in check):
521
- strategy = 'loss'
522
- adaptive = False
523
- elif adaptive == True and strategy == 'loss' and best_loss <= patience:
524
- strategy = 'accuracy'
525
- adaptive = False
513
+
514
+ weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
515
+ target_pop = []
526
516
 
527
517
  # Early stopping check
528
518
  if early_stop == True and i > 0:
529
- if best_acc_per_depth_list[i] == best_acc_per_depth_list[i-1]:
519
+ if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
530
520
  progress.close()
531
521
  train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
532
- activation_potentiation=final_activations, dtype=dtype)
522
+ activation_potentiation=final_activations, dtype=dtype, memory=memory)
533
523
 
534
524
  if loss == 'categorical_crossentropy':
535
525
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -553,7 +543,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
553
543
  # Final evaluation
554
544
  progress.close()
555
545
  train_model = evaluate(x_train, y_train, W=best_weights, loading_bar_status=False,
556
- activation_potentiation=final_activations, dtype=dtype)
546
+ activation_potentiation=final_activations, dtype=dtype, memory=memory)
557
547
 
558
548
  if loss == 'categorical_crossentropy':
559
549
  train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
@@ -652,7 +642,6 @@ def evaluate(
652
642
  Returns:
653
643
  tuple: Model (list).
654
644
  """
655
- from memory_operations import transfer_to_cpu, transfer_to_gpu
656
645
 
657
646
  if memory == 'gpu':
658
647
  x_test = transfer_to_gpu(x_test, dtype=dtype)
pyerualjetwork/planeat.py CHANGED
@@ -17,7 +17,7 @@ from tqdm import tqdm
17
17
  ### LIBRARY IMPORTS ###
18
18
  from .plan import feed_forward
19
19
  from .data_operations import normalization
20
- from .ui import loading_bars
20
+ from .ui import loading_bars, initialize_loading_bar
21
21
  from .activation_functions import apply_activation, all_activations
22
22
 
23
23
  def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
@@ -69,7 +69,7 @@ def define_genomes(input_shape, output_shape, population_size, dtype=np.float32)
69
69
  return np.array(population_weights, dtype=dtype), population_activations
70
70
 
71
71
 
72
- def evolve(weights, activation_potentiations, what_gen, y_reward, show_info=False, strategy='cross_over', policy='normal_selective', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=np.float32):
72
+ def evolve(weights, activation_potentiations, what_gen, fitness, show_info=False, strategy='cross_over', bar_status=True, policy='normal_selective', target_fitness='max', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=np.float32):
73
73
  """
74
74
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
75
75
  The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
@@ -83,8 +83,8 @@ Args:
83
83
 
84
84
  what_gen (int): The current generation number, used for informational purposes or logging.
85
85
 
86
- y_reward (numpy.ndarray): A 1D array containing the fitness or reward values of each genome.
87
- The array is used to rank the genomes based on their performance. PLANEAT maximizes the reward.
86
+ fitness (numpy.ndarray): A 1D array containing the fitness values of each genome.
87
+ The array is used to rank the genomes based on their performance. PLANEAT maximizes or minimizes this fitness for looking 'target' hyperparameter.
88
88
 
89
89
  show_info (bool, optional): If True, prints information about the current generation and the
90
90
  maximum reward obtained. Also shows the current configuration. Default is False.
@@ -96,12 +96,16 @@ Args:
96
96
  (PLAN feature, similar to arithmetic crossover but different.)
97
97
  Default is 'cross_over'.
98
98
 
99
+ bar_status (bool, optional): Loading bar status during evolving process of genomes. True or False. Default: True
100
+
99
101
  policy (str, optional): The selection policy that governs how genomes are selected for reproduction. Options:
100
102
  - 'normal_selective': Normal selection based on reward, where a portion of the bad genes are discarded.
101
103
  - 'more_selective': A more selective policy, where fewer bad genes survive.
102
104
  - 'less_selective': A less selective policy, where more bad genes survive.
103
105
  Default is 'normal_selective'.
104
106
 
107
+ target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
108
+
105
109
  mutations (bool, optional): If True, mutations are applied to the bad genomes and potentially
106
110
  to the best genomes as well. Default is True.
107
111
 
@@ -163,7 +167,7 @@ Returns:
163
167
 
164
168
  Notes:
165
169
  - **Selection Process**:
166
- - The genomes are sorted by their fitness (based on `y_reward`), and then split into "best" and "bad" halves.
170
+ - The genomes are sorted by their fitness (based on `fitness`), and then split into "best" and "bad" halves.
167
171
  - The best genomes are retained, and the bad genomes are modified based on the selected strategy.
168
172
 
169
173
  - **Crossover and Potentiation Strategies**:
@@ -175,13 +179,13 @@ Notes:
175
179
  - `bad_genoms_mutation_prob` determines the probability of applying mutations to the bad genomes.
176
180
  - If `activation_mutate_prob` is provided, activation function mutations are applied to the genomes based on this probability.
177
181
 
178
- - **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `y_reward` has an odd length, an error is raised.
182
+ - **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `fitness` has an odd length, an error is raised.
179
183
 
180
184
  - **Logging**: If `show_info=True`, the current generation and the maximum reward from the population are printed for tracking the learning progress.
181
185
 
182
186
  Example:
183
187
  ```python
184
- weights, activation_potentiations = learner(weights, activation_potentiations, 1, y_reward, info=True, strategy='cross_over', policy='normal_selective')
188
+ weights, activation_potentiations = planeat.evolve(weights, activation_potentiations, 1, fitness, show_info=True, strategy='cross_over', policy='normal_selective')
185
189
  ```
186
190
 
187
191
  - The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
@@ -219,17 +223,19 @@ Example:
219
223
  if not isinstance(activation_mutate_prob, float) or activation_mutate_prob < 0 or activation_mutate_prob > 1:
220
224
  raise ValueError("activation_mutate_prob parameter must be float and 0-1 range")
221
225
 
222
- if len(y_reward) % 2 == 0:
223
- slice_center = int(len(y_reward) / 2)
226
+ if len(fitness) % 2 == 0:
227
+ slice_center = int(len(fitness) / 2)
224
228
 
225
229
  else:
226
230
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
227
231
 
228
- sort_indices = np.argsort(y_reward)
232
+
233
+ ### FITNESS IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
229
234
 
230
- ### REWARD LIST IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
235
+ if target_fitness == 'max': sort_indices = np.argsort(fitness)
236
+ elif target_fitness == 'min': sort_indices = np.argsort(-fitness)
231
237
 
232
- y_reward = y_reward[sort_indices]
238
+ fitness = fitness[sort_indices]
233
239
  weights = weights[sort_indices]
234
240
 
235
241
  activation_potentiations = [activation_potentiations[i] for i in sort_indices]
@@ -249,7 +255,9 @@ Example:
249
255
 
250
256
  bar_format = loading_bars()[0]
251
257
 
252
- for i in tqdm(range(len(bad_weights)), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50, ascii="▱▰"):
258
+ if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50, ascii="▱▰")
259
+
260
+ for i in range(len(bad_weights)):
253
261
 
254
262
  if policy == 'normal_selective':
255
263
 
@@ -313,7 +321,8 @@ Example:
313
321
 
314
322
  elif mutation_prob < bad_genoms_mutation_prob:
315
323
  bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
316
-
324
+
325
+ if bar_status: progress.update(1)
317
326
 
318
327
  weights = np.vstack((bad_weights, best_weights))
319
328
  activation_potentiations = bad_activations + best_activations
@@ -344,9 +353,9 @@ Example:
344
353
  print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
345
354
 
346
355
  print("*** Performance ***")
347
- print(" MAX REWARD: ", str(round(max(y_reward), 2)))
348
- print(" MEAN REWARD: ", str(round(np.mean(y_reward), 2)))
349
- print(" MIN REWARD: ", str(round(min(y_reward), 2)) + '\n')
356
+ print(" MAX FITNESS: ", str(round(max(fitness), 2)))
357
+ print(" MEAN FITNESS: ", str(round(np.mean(fitness), 2)))
358
+ print(" MIN FITNESS: ", str(round(min(fitness), 2)) + '\n')
350
359
 
351
360
  print(" BEST GENOME INDEX: ", str(len(weights)-1))
352
361
  print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
@@ -13,12 +13,12 @@ ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/
13
13
  import cupy as cp
14
14
  import numpy as np
15
15
  import random
16
- from tqdm import tqdm
16
+
17
17
 
18
18
  ### LIBRARY IMPORTS ###
19
19
  from .plan_cuda import feed_forward
20
20
  from .data_operations_cuda import normalization
21
- from .ui import loading_bars
21
+ from .ui import loading_bars, initialize_loading_bar
22
22
  from .activation_functions_cuda import apply_activation, all_activations
23
23
 
24
24
  def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32):
@@ -70,13 +70,13 @@ def define_genomes(input_shape, output_shape, population_size, dtype=cp.float32)
70
70
  return cp.array(population_weights, dtype=dtype), population_activations
71
71
 
72
72
 
73
- def evolve(weights, activation_potentiations, what_gen, y_reward, show_info=False, strategy='cross_over', policy='normal_selective', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=cp.float32):
73
+ def evolve(weights, activation_potentiations, what_gen, fitness, show_info=False, strategy='cross_over', bar_status=True, policy='normal_selective', target_fitness='max', mutations=True, bad_genoms_mutation_prob=None, activation_mutate_prob=0.5, save_best_genom=True, cross_over_mode='tpm', activation_add_prob=0.5, activation_delete_prob=0.5, activation_change_prob=0.5, weight_mutate_prob=1, weight_mutate_rate=32, activation_selection_add_prob=0.7, activation_selection_change_prob=0.5, activation_selection_rate=2, dtype=cp.float32):
74
74
  """
75
75
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
76
76
  The function modifies the population's weights and activation functions based on a specified policy, mutation probabilities, and strategy.
77
77
 
78
78
  Args:
79
- weights (numpy.ndarray): Array of weights for each genome.
79
+ weights (cupy.ndarray): Array of weights for each genome.
80
80
  (first returned value of define_genomes function)
81
81
 
82
82
  activation_potentiations (list): A list of activation functions for each genome.
@@ -84,7 +84,7 @@ Args:
84
84
 
85
85
  what_gen (int): The current generation number, used for informational purposes or logging.
86
86
 
87
- y_reward (numpy.ndarray): A 1D array containing the fitness or reward values of each genome.
87
+ fitness (cupy.ndarray): A 1D array containing the fitness or reward values of each genome.
88
88
  The array is used to rank the genomes based on their performance. PLANEAT maximizes the reward.
89
89
 
90
90
  show_info (bool, optional): If True, prints information about the current generation and the
@@ -97,12 +97,16 @@ Args:
97
97
  (PLAN feature, similar to arithmetic crossover but different.)
98
98
  Default is 'cross_over'.
99
99
 
100
+ bar_status (bool, optional): Loading bar status during evolving process of genomes. True or False. Default: True
101
+
100
102
  policy (str, optional): The selection policy that governs how genomes are selected for reproduction. Options:
101
103
  - 'normal_selective': Normal selection based on reward, where a portion of the bad genes are discarded.
102
104
  - 'more_selective': A more selective policy, where fewer bad genes survive.
103
105
  - 'less_selective': A less selective policy, where more bad genes survive.
104
106
  Default is 'normal_selective'.
105
107
 
108
+ target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
109
+
106
110
  mutations (bool, optional): If True, mutations are applied to the bad genomes and potentially
107
111
  to the best genomes as well. Default is True.
108
112
 
@@ -164,7 +168,7 @@ Returns:
164
168
 
165
169
  Notes:
166
170
  - **Selection Process**:
167
- - The genomes are sorted by their fitness (based on `y_reward`), and then split into "best" and "bad" halves.
171
+ - The genomes are sorted by their fitness (based on `fitness`), and then split into "best" and "bad" halves.
168
172
  - The best genomes are retained, and the bad genomes are modified based on the selected strategy.
169
173
 
170
174
  - **Crossover and Potentiation Strategies**:
@@ -176,13 +180,13 @@ Notes:
176
180
  - `bad_genoms_mutation_prob` determines the probability of applying mutations to the bad genomes.
177
181
  - If `activation_mutate_prob` is provided, activation function mutations are applied to the genomes based on this probability.
178
182
 
179
- - **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `y_reward` has an odd length, an error is raised.
183
+ - **Population Size**: The population size must be an even number to properly split the best and bad genomes. If `fitness` has an odd length, an error is raised.
180
184
 
181
185
  - **Logging**: If `show_info=True`, the current generation and the maximum reward from the population are printed for tracking the learning progress.
182
186
 
183
187
  Example:
184
188
  ```python
185
- weights, activation_potentiations = learner(weights, activation_potentiations, 1, y_reward, info=True, strategy='cross_over', policy='normal_selective')
189
+ weights, activation_potentiations = planeat.evolve(weights, activation_potentiations, 1, fitness, show_info=True, strategy='cross_over', policy='normal_selective')
186
190
  ```
187
191
 
188
192
  - The function returns the updated weights and activations after processing based on the chosen strategy, policy, and mutation parameters.
@@ -220,20 +224,21 @@ Example:
220
224
  if not isinstance(activation_mutate_prob, float) or activation_mutate_prob < 0 or activation_mutate_prob > 1:
221
225
  raise ValueError("activation_mutate_prob parameter must be float and 0-1 range")
222
226
 
223
- if len(y_reward) % 2 == 0:
224
- slice_center = int(len(y_reward) / 2)
227
+ if len(fitness) % 2 == 0:
228
+ slice_center = int(len(fitness) / 2)
225
229
 
226
230
  else:
227
231
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
228
232
 
229
- sort_indices = cp.argsort(y_reward)
233
+ ### FITNESS LIST IS SORTED IN ASCENDING (OR DESCENDING) ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
230
234
 
231
- ### REWARD LIST IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
235
+ if target_fitness == 'max': sort_indices = cp.argsort(fitness)
236
+ elif target_fitness == 'min': sort_indices = cp.argsort(-fitness)
232
237
 
233
- y_reward = y_reward[sort_indices]
238
+ fitness = fitness[sort_indices]
234
239
  weights = weights[sort_indices]
235
240
 
236
- activation_potentiations = [activation_potentiations[i] for i in sort_indices]
241
+ activation_potentiations = [activation_potentiations[int(i)] for i in sort_indices]
237
242
 
238
243
  ### GENOMES ARE DIVIDED INTO TWO GROUPS: GOOD GENOMES AND BAD GENOMES:
239
244
 
@@ -250,7 +255,9 @@ Example:
250
255
 
251
256
  bar_format = loading_bars()[0]
252
257
 
253
- for i in tqdm(range(len(bad_weights)), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50, ascii="▱▰"):
258
+ if bar_status: progress = initialize_loading_bar(len(bad_weights), desc="GENERATION: " + str(what_gen), bar_format=bar_format, ncols=50, ascii="▱▰")
259
+
260
+ for i in range(len(bad_weights)):
254
261
 
255
262
  if policy == 'normal_selective':
256
263
 
@@ -315,6 +322,7 @@ Example:
315
322
  elif mutation_prob < bad_genoms_mutation_prob:
316
323
  bad_weights[i], bad_activations[i] = mutation(bad_weights[i], bad_activations[i], activation_mutate_prob=activation_mutate_prob, activation_add_prob=activation_add_prob, activation_delete_prob=activation_delete_prob, activation_change_prob=activation_change_prob, weight_mutate_prob=weight_mutate_prob, threshold=weight_mutate_rate, dtype=dtype)
317
324
 
325
+ if bar_status: progress.update(1)
318
326
 
319
327
  weights = cp.vstack((bad_weights, best_weights))
320
328
  activation_potentiations = bad_activations + best_activations
@@ -345,9 +353,9 @@ Example:
345
353
  print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
346
354
 
347
355
  print("*** Performance ***")
348
- print(" MAX REWARD: ", str(round(max(y_reward), 2)))
349
- print(" MEAN REWARD: ", str(round(cp.mean(y_reward), 2)))
350
- print(" MIN REWARD: ", str(round(min(y_reward), 2)) + '\n')
356
+ print(" MAX REWARD: ", str(cp.round(max(fitness), 2)))
357
+ print(" MEAN REWARD: ", str(cp.round(cp.mean(fitness), 2)))
358
+ print(" MIN REWARD: ", str(cp.round(min(fitness), 2)) + '\n')
351
359
 
352
360
  print(" BEST GENOME INDEX: ", str(len(weights)-1))
353
361
  print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
@@ -318,10 +318,12 @@ def draw_activations(x_train, activation):
318
318
 
319
319
  elif activation == 'spiral':
320
320
  result = af.spiral_activation(x_train)
321
-
322
- return result
323
-
324
-
321
+
322
+ try: return result
323
+ except:
324
+ print('WARNING: error in drawing some activation.')
325
+ return x_train
326
+
325
327
  def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
326
328
 
327
329
  from .metrics import metrics, confusion_matrix, roc_curve
@@ -321,7 +321,10 @@ def draw_activations(x_train, activation):
321
321
  elif activation == 'spiral':
322
322
  result = af.spiral_activation(x_train)
323
323
 
324
- return result
324
+ try: return result
325
+ except:
326
+ print('WARNING: error in drawing some activation.')
327
+ return x_train
325
328
 
326
329
 
327
330
  def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation):
@@ -355,7 +358,7 @@ def plot_evaluate(x_test, y_test, y_preds, acc_list, W, activation_potentiation)
355
358
  fpr, tpr, thresholds = roc_curve(y_true, y_preds)
356
359
 
357
360
  roc_auc = cp.trapz(tpr, fpr)
358
- axs[1, 0].plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
361
+ axs[1, 0].plot(fpr.get(), tpr.get(), color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
359
362
  axs[1, 0].plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
360
363
  axs[1, 0].set_xlim([0.0, 1.0])
361
364
  axs[1, 0].set_ylim([0.0, 1.05])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.1.7
3
+ Version: 4.1.8
4
4
  Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,4 +1,4 @@
1
- pyerualjetwork/__init__.py,sha256=OjubIysSQiH969c2bNU3ssgHdyFk1VYOF1D-dWNn_JQ,2450
1
+ pyerualjetwork/__init__.py,sha256=uPTVoRI0TyHt86XYQOHZcvtk1faqxuocKknYhnKUZbY,2175
2
2
  pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
4
  pyerualjetwork/data_operations.py,sha256=ZM24BuPsIAtI0a_Exr4HgCjmlb285wEeO8juFY9sJr0,14680
@@ -6,19 +6,19 @@ pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvt
6
6
  pyerualjetwork/help.py,sha256=OZghUy7GZTgEX_i3NYtgcpzUgCDOi6r2vVUF1ROkFiI,774
7
7
  pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
8
  pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
- pyerualjetwork/memory_operations.py,sha256=TTowQVOa3whwphz9y7ed1ZdVyXjfRDbX1b12cpCTNY0,13471
9
+ pyerualjetwork/memory_operations.py,sha256=_Wu9FJc6ozQTPOC2tXfXWPCwUIvPRuDjmLw_McntVSI,13470
10
10
  pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
11
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
12
  pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
13
13
  pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
- pyerualjetwork/plan.py,sha256=ZadbCULBnfd8yrE21-shzifnILzQPZ9jEy6amQxuuvw,35251
15
- pyerualjetwork/plan_cuda.py,sha256=y1YoZQCSXGyLduG-IdcSPk2DPMAYG5G2pOfDefRZw0w,36287
16
- pyerualjetwork/planeat.py,sha256=oUTIW9ykPd_d0YJ3-zfforD9gXAQTNx-ethXP7ByLBQ,39621
17
- pyerualjetwork/planeat_cuda.py,sha256=oFCa6hKLVkihiSQzxyvkwZYwbk4pGhy7yPeNuUd-YI8,39678
14
+ pyerualjetwork/plan.py,sha256=5znxbnGO-3aL_SRDMRNNrDPJs_6hgU8yGiFnwPy69EY,34320
15
+ pyerualjetwork/plan_cuda.py,sha256=KoKjsoWTLM-q07G1Gy0-LYXGlp15Fno6JqHz-Jzi_yE,35983
16
+ pyerualjetwork/planeat.py,sha256=VtWtWndbKoFNYTWd1EsyKBV4Vp5U6cc7uWDgQ4WjHqo,40248
17
+ pyerualjetwork/planeat_cuda.py,sha256=fSn28ZbxctPvBjpKgtv_uGwwUdTEXkBizy76mMlZYJ0,40237
18
18
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
- pyerualjetwork/visualizations.py,sha256=9naPYMQKpkMcP_GEaBK90FEZAlImT_f-lgRqVCwvcb8,28660
20
- pyerualjetwork/visualizations_cuda.py,sha256=blOM-VQnAT_qzM3i_OWjL5C1qnUtYctEvja-a_X4Z0w,29085
21
- pyerualjetwork-4.1.7.dist-info/METADATA,sha256=-Netx9Dfim4DWMg5bQ3nl6PAXizHku4b93id2OCD0H4,7793
22
- pyerualjetwork-4.1.7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.1.7.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.1.7.dist-info/RECORD,,
19
+ pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
20
+ pyerualjetwork/visualizations_cuda.py,sha256=F60vQ92AXlMgBka3InXnOtGoM25vQJAlBIU2AlYTwks,29200
21
+ pyerualjetwork-4.1.8.dist-info/METADATA,sha256=ZhjLwRdVpV6yuxJD4-ExuvsLeqo-EtSVdfqVqWw7wKs,7793
22
+ pyerualjetwork-4.1.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.1.8.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.1.8.dist-info/RECORD,,