pyerualjetwork 4.2.1__py3-none-any.whl → 4.2.2b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,11 @@
1
- __version__ = "4.2.1"
1
+ __version__ = "4.2.2b1"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
5
5
  print(f"PyerualJetwork Version {__version__}" + '\n')
6
6
 
7
7
  def print_update_notes(__update__):
8
- print(f"Update Notes:\n{__update__}")
8
+ print(f"Notes:\n{__update__}")
9
9
 
10
10
  print_version(__version__)
11
11
  print_update_notes(__update__)
@@ -0,0 +1,171 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Thu Jun 20 03:55:15 2024
4
+
5
+ @author: hasan
6
+ """
7
+
8
+ import numpy as np
9
+ from colorama import Fore
10
+ from pyerualjetwork import plan, planeat, data_operations, model_operations
11
+ from sklearn.linear_model import LogisticRegression
12
+ from sklearn.ensemble import RandomForestClassifier
13
+ import xgboost as xgb
14
+ import tensorflow as tf
15
+ from tensorflow.keras.models import Sequential
16
+ from tensorflow.keras.layers import Dense
17
+ from tensorflow.keras.optimizers import RMSprop
18
+ from sklearn.metrics import classification_report, accuracy_score
19
+ from sklearn.datasets import make_blobs
20
+ from matplotlib import pyplot as plt
21
+
22
+ fig, ax = plt.subplots(2, 3) # Create a new figure and axe
23
+
24
+ # Karar sınırı çizimi
25
+ def plot_decision_boundary(x, y, model, feature_indices=[0, 1], h=0.02, model_name='str', ax=None, which_ax1=None, which_ax2=None, W=None, activation_potentiation=None):
26
+ """
27
+ Plot decision boundary by focusing on specific feature indices.
28
+
29
+ Parameters:
30
+ - x: Input data
31
+ - y: Target labels
32
+ - model: Trained model
33
+ - feature_indices: Indices of the features to plot (default: [0, 1])
34
+ - h: Step size for the mesh grid
35
+ """
36
+ x_min, x_max = x[:, feature_indices[0]].min() - 1, x[:, feature_indices[0]].max() + 1
37
+ y_min, y_max = x[:, feature_indices[1]].min() - 1, x[:, feature_indices[1]].max() + 1
38
+ xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
39
+ np.arange(y_min, y_max, h))
40
+
41
+ grid = np.c_[xx.ravel(), yy.ravel()]
42
+
43
+ # Create a full grid with zeros for non-selected features
44
+ grid_full = np.zeros((grid.shape[0], x.shape[1]))
45
+ grid_full[:, feature_indices] = grid
46
+
47
+ if model == 'PLAN':
48
+
49
+ Z = [None] * len(grid_full)
50
+
51
+ for i in range(len(grid_full)):
52
+ Z[i] = np.argmax(model_operations.predict_model_ram(grid_full[i], W=W, activation_potentiation=activation_potentiation))
53
+
54
+ Z = np.array(Z)
55
+ Z = Z.reshape(xx.shape)
56
+
57
+ else:
58
+
59
+ # Predict on the grid
60
+ #for i in range(len(grid_full)): grid_full[i] = activation_functions.apply_activation(grid_full[i], ['tanh', 'relu', 'elu', 'selu'])
61
+ Z = model.predict(grid_full)
62
+
63
+ if model_name == 'Deep Learning':
64
+ Z = np.argmax(Z, axis=1) # Get class predictions
65
+
66
+ Z = Z.reshape(xx.shape)
67
+
68
+ # Plot decision boundary
69
+ ax[which_ax1, which_ax2].contourf(xx, yy, Z, alpha=0.8)
70
+ ax[which_ax1, which_ax2].scatter(x[:, feature_indices[0]], x[:, feature_indices[1]], c=np.argmax(y, axis=1), edgecolors='k', marker='o', s=20, alpha=0.9)
71
+ ax[which_ax1, which_ax2].set_xlabel(f'Feature {feature_indices[0] + 1}')
72
+ ax[which_ax1, which_ax2].set_ylabel(f'Feature {feature_indices[1] + 1}')
73
+ ax[which_ax1, which_ax2].set_title(model_name)
74
+
75
+
76
+ X, y = make_blobs(n_samples=1000, centers=5, random_state=42)
77
+
78
+ # Eğitim, test ve doğrulama verilerini ayırma
79
+ x_train, x_test, y_train, y_test = data_operations.split(X, y, test_size=0.4, random_state=42) # For less train data use this: (X, y, test_size=0.9, random_state=42)
80
+
81
+ # One-hot encoding işlemi
82
+ y_train, y_test = data_operations.encode_one_hot(y_train, y_test)
83
+
84
+
85
+ # Veri dengesizliği durumunu otomatik dengeleme
86
+ x_train, y_train = data_operations.auto_balancer(x_train, y_train)
87
+ scaler_params, x_train, x_test = data_operations.standard_scaler(x_train, x_test)
88
+
89
+ # Lojistik Regresyon Modeli
90
+ print(Fore.YELLOW + "------Lojistik Regresyon Sonuçları------" + Fore.RESET)
91
+ lr_model = LogisticRegression(max_iter=1000, random_state=42)
92
+ y_train_decoded = data_operations.decode_one_hot(y_train)
93
+ lr_model.fit(x_train, y_train_decoded)
94
+
95
+ y_test_decoded = plan.decode_one_hot(y_test)
96
+ y_pred_lr = lr_model.predict(x_test)
97
+ test_acc_lr = accuracy_score(y_test_decoded, y_pred_lr)
98
+ print(f"Lojistik Regresyon Test Accuracy: {test_acc_lr:.4f}")
99
+ print(classification_report(y_test_decoded, y_pred_lr))
100
+ # Karar sınırını görselleştir
101
+ plot_decision_boundary(x_test, y_test, lr_model, feature_indices=[0, 1], model_name='Logistic Regression', ax=ax, which_ax1=0, which_ax2=0)
102
+
103
+ # Random Forest Modeli
104
+ print(Fore.CYAN + "------Random Forest Sonuçları------" + Fore.RESET)
105
+ rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
106
+ rf_model.fit(x_train, y_train_decoded)
107
+ y_pred_rf = rf_model.predict(x_test)
108
+ test_acc_rf = accuracy_score(y_test_decoded, y_pred_rf)
109
+ print(f"Random Forest Test Accuracy: {test_acc_rf:.4f}")
110
+ print(classification_report(y_test_decoded, y_pred_rf))
111
+ # Karar sınırını görselleştir
112
+ plot_decision_boundary(x_test, y_test, rf_model, feature_indices=[0, 1], model_name='Random Forest', ax=ax, which_ax1=0, which_ax2=1)
113
+
114
+ # XGBoost Modeli
115
+ print(Fore.MAGENTA + "------XGBoost Sonuçları------" + Fore.RESET)
116
+ xgb_model = xgb.XGBClassifier(use_label_encoder=False, eval_metric='mlogloss', random_state=42)
117
+ xgb_model.fit(x_train, y_train_decoded)
118
+ y_pred_xgb = xgb_model.predict(x_test)
119
+ test_acc_xgb = accuracy_score(y_test_decoded, y_pred_xgb)
120
+ print(f"XGBoost Test Accuracy: {test_acc_xgb:.4f}")
121
+ print(classification_report(y_test_decoded, y_pred_xgb))
122
+ # Karar sınırını görselleştir
123
+ plot_decision_boundary(x_test, y_test, xgb_model, feature_indices=[0, 1], model_name='XGBoost', ax=ax, which_ax1=0, which_ax2=2)
124
+
125
+ # Derin Öğrenme Modeli (Yapay Sinir Ağı)
126
+
127
+ input_dim = x_train.shape[1] # Giriş boyutu
128
+
129
+
130
+ model = Sequential()
131
+ model.add(Dense(32, activation='relu', input_dim=input_dim))
132
+ model.add(Dense(64, activation='tanh'))
133
+ model.add(Dense(32, activation='relu'))
134
+ model.add(Dense(64, activation='tanh'))
135
+ model.add(Dense(y_train.shape[1], activation='softmax'))
136
+
137
+ # Model derlemesi
138
+ model.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
139
+
140
+ # Model eğitimi (early stopping ile)
141
+ early_stop = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=10, restore_best_weights=True)
142
+ model.fit(x_train, y_train, epochs=5, batch_size=32, callbacks=[early_stop], verbose=1)
143
+
144
+ # Test verileri üzerinde modelin performansını değerlendirme
145
+ y_pred_dl = model.predict(x_test)
146
+ y_pred_dl_classes = np.argmax(y_pred_dl, axis=1) # Tahmin edilen sınıflar
147
+ y_test_decoded_dl = plan.decode_one_hot(y_test)
148
+ print(Fore.BLUE + "------Derin Öğrenme (ANN) Sonuçları------" + Fore.RESET)
149
+ test_acc_dl = accuracy_score(y_test_decoded_dl, y_pred_dl_classes)
150
+ print(f"Derin Öğrenme Test Accuracy: {test_acc_dl:.4f}")
151
+ print(classification_report(y_test_decoded_dl, y_pred_dl_classes))
152
+ # Karar sınırını görselleştir
153
+ plot_decision_boundary(x_test, y_test, model=model, feature_indices=[0, 1], model_name='Deep Learning', ax=ax, which_ax1=1, which_ax2=0)
154
+
155
+ # PLAN Modeli
156
+ # Configuring optimizer
157
+ genetic_optimizer = lambda *args, **kwargs: planeat.evolver(*args, **kwargs)
158
+
159
+ model = plan.learner(x_train, y_train, genetic_optimizer, batch_size=0.05, gen=15) # learner function = TFL(Test Feedback Learning). If test parameters not given then uses Train Feedback. More information: https://github.com/HCB06/pyerualjetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
160
+
161
+ W = model[model_operations.get_weights()]
162
+ activation_potentiation = model[model_operations.get_act_pot()]
163
+
164
+ test_model = plan.evaluate(x_test, y_test, W=W, activation_potentiation=activation_potentiation)
165
+ test_acc_plan = test_model[model_operations.get_acc()]
166
+ print(Fore.GREEN + "------PLAN Modeli Sonuçları------" + Fore.RESET)
167
+ print(f"PLAN Test Accuracy: {test_acc_plan:.4f}")
168
+ print(classification_report(plan.decode_one_hot(y_test), test_model[model_operations.get_preds()]))
169
+ # Karar sınırını görselleştir
170
+ plot_decision_boundary(x_test, y_test, model='PLAN', feature_indices=[0, 1], model_name='PLAN', ax=ax, which_ax1=1, which_ax2=1, W=W, activation_potentiation=activation_potentiation)
171
+ plt.show()
@@ -334,8 +334,8 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
334
334
  ndarray: Output from the model.
335
335
  """
336
336
 
337
- from .data_operations import standard_scaler
338
- from .plan import feed_forward
337
+ from data_operations import standard_scaler
338
+ from plan import feed_forward
339
339
 
340
340
  Input = standard_scaler(None, Input, scaler_params, dtype=dtype)
341
341
 
pyerualjetwork/plan.py CHANGED
@@ -16,7 +16,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import numpy as np
19
- from colorama import Fore
20
19
  import math
21
20
 
22
21
  ### LIBRARY IMPORTS ###
@@ -174,11 +173,11 @@ def fit(
174
173
  return normalization(LTPW, dtype=dtype)
175
174
 
176
175
 
177
- def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
178
- neural_web_history=False, show_current_activations=False, auto_normalization=True,
176
+ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_size=1,
177
+ neural_web_history=False, show_current_activations=False,
179
178
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
180
- interval=33.33, target_acc=None, target_loss=None, except_this=None,
181
- only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=np.float32):
179
+ interval=33.33, target_acc=None, target_loss=None,
180
+ start_this_act=None, start_this_W=None, dtype=np.float32):
182
181
  """
183
182
  Optimizes the activation functions for a neural network by leveraging train data to find
184
183
  the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
@@ -206,23 +205,18 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
206
205
  optimizer=genetic_optimizer,
207
206
  strategy='accuracy',
208
207
  show_history=True,
209
- target_acc=0.94,
208
+ gen=15,
209
+ batch_size=0.05,
210
210
  interval=16.67)
211
211
  ```
212
212
 
213
- x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
214
-
215
- y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
216
-
217
213
  strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
218
214
 
219
215
  gen (int, optional): The generation count for genetic optimization.
220
216
 
221
- batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
222
-
223
- auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
217
+ batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
224
218
 
225
- early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
219
+ early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
226
220
 
227
221
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
228
222
 
@@ -235,10 +229,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
235
229
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
236
230
 
237
231
  target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
238
-
239
- except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
240
-
241
- only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
242
232
 
243
233
  start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
244
234
 
@@ -247,8 +237,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
247
237
  neurons_history (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
248
238
 
249
239
  neural_web_history (bool, optional): Draws history of neural web. Default is False.
250
-
251
- target_fitness (str, optional): Target fitness strategy for PLANEAT optimization. ('max' for machine learning, 'min' for machine unlearning.) Default: 'max'
252
240
 
253
241
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
254
242
 
@@ -257,37 +245,23 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
257
245
 
258
246
  """
259
247
 
260
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
248
+ from .planeat import define_genomes
249
+
250
+ data = 'Train'
261
251
 
262
- activation_potentiation = all_activations()
252
+ activation_potentiation_len = len(all_activations())
263
253
 
264
254
  # Pre-checks
265
255
 
266
256
  x_train = x_train.astype(dtype, copy=False)
267
257
  y_train = optimize_labels(y_train, cuda=False)
268
258
 
269
- if x_test is None and y_test is None:
270
- x_test = x_train
271
- y_test = y_train
272
- data = 'Train'
273
- else:
274
- x_test = x_test.astype(dtype, copy=False)
275
- y_test = optimize_labels(y_test, cuda=False)
276
- data = 'Test'
277
-
278
- # Filter activation functions
279
- if only_this is not None:
280
- activation_potentiation = only_this
281
- if except_this is not None:
282
- activation_potentiation = [item for item in activation_potentiation if item not in except_this]
283
259
  if gen is None:
284
- gen = len(activation_potentiation)
260
+ gen = activation_potentiation_len
285
261
 
286
262
  if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
263
+ if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
287
264
 
288
- if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
289
- if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
290
-
291
265
  # Initialize visualization components
292
266
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
293
267
 
@@ -300,31 +274,23 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
300
274
  # Initialize variables
301
275
  act_pop = []
302
276
  weight_pop = []
303
-
304
- if start_this_act is None and start_this_W is None:
305
- best_acc = 0
306
- else:
307
- act_pop.append(start_this_act)
308
- weight_pop.append(start_this_W)
309
-
310
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
311
-
312
- model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype)
313
-
314
- if loss == 'categorical_crossentropy':
315
- test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
316
- else:
317
- test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
318
-
319
- best_acc = model[get_acc()]
320
- best_loss = test_loss
321
277
 
278
+ best_acc = 0
279
+ best_f1 = 0
280
+ best_recall = 0
281
+ best_precision = 0
322
282
  best_acc_per_gen_list = []
323
283
  postfix_dict = {}
324
284
  loss_list = []
325
285
  target_pop = []
326
286
 
327
- progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
287
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
288
+
289
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len)
290
+
291
+ if start_this_act is not None and start_this_W is not None:
292
+ weight_pop[0] = start_this_W
293
+ act_pop[0] = start_this_act
328
294
 
329
295
  for i in range(gen):
330
296
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -334,22 +300,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
334
300
  progress.last_print_n = 0
335
301
  progress.update(0)
336
302
 
337
- for j in range(len(activation_potentiation)):
338
-
339
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
340
-
341
- if i == 0:
342
- act_pop.append(activation_potentiation[j])
343
- W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
344
- weight_pop.append(W)
303
+ for j in range(activation_potentiation_len):
345
304
 
346
- model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
305
+ x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
306
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
347
307
  acc = model[get_acc()]
348
308
 
349
309
  if strategy == 'accuracy': target_pop.append(acc)
350
310
 
351
311
  elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
352
- precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
312
+ precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
353
313
 
354
314
  if strategy == 'precision':
355
315
  target_pop.append(precision_score)
@@ -391,22 +351,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
391
351
  print(f", Current Activations={final_activations}", end='')
392
352
 
393
353
  if loss == 'categorical_crossentropy':
394
- test_loss = categorical_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
354
+ train_loss = categorical_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
395
355
  else:
396
- test_loss = binary_crossentropy(y_true_batch=y_test_batch, y_pred_batch=model[get_preds_softmax()])
356
+ train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
397
357
 
398
358
  if batch_size == 1:
399
- postfix_dict[f"{data} Loss"] = test_loss
400
- best_loss = test_loss
359
+ postfix_dict[f"{data} Loss"] = train_loss
360
+ best_loss = train_loss
401
361
  else:
402
- postfix_dict[f"{data} Batch Loss"] = test_loss
362
+ postfix_dict[f"{data} Batch Loss"] = train_loss
403
363
  progress.set_postfix(postfix_dict)
404
- best_loss = test_loss
364
+ best_loss = train_loss
405
365
 
406
366
  # Update visualizations during training
407
367
  if show_history:
408
368
  gen_list = range(1, len(best_acc_per_gen_list) + 2)
409
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
369
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
410
370
  best_acc_per_gen_list + [best_acc], x_train, final_activations)
411
371
 
412
372
  if neurons_history:
@@ -415,7 +375,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
415
375
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
416
376
  y_train[0], viz_objects['neurons']['artists'],
417
377
  data=data, fig1=viz_objects['neurons']['fig'],
418
- acc=best_acc, loss=test_loss)
378
+ acc=best_acc, loss=train_loss)
419
379
  )
420
380
 
421
381
  if neural_web_history:
@@ -440,13 +400,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
440
400
  print('\nActivations: ', final_activations)
441
401
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
442
402
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
443
- if data == 'Test':
444
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
445
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
446
403
 
447
404
  # Display final visualizations
448
405
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
449
- test_loss, y_train, interval)
406
+ train_loss, y_train, interval)
450
407
  return best_weights, best_model[get_preds()], best_acc, final_activations
451
408
 
452
409
  # Check target loss
@@ -465,13 +422,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
465
422
  print('\nActivations: ', final_activations)
466
423
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
467
424
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
468
- if data == 'Test':
469
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
470
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
471
425
 
472
426
  # Display final visualizations
473
427
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
474
- test_loss, y_train, interval)
428
+ train_loss, y_train, interval)
475
429
  return best_weights, best_model[get_preds()], best_acc, final_activations
476
430
 
477
431
  progress.update(1)
@@ -479,7 +433,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
479
433
  best_acc_per_gen_list.append(best_acc)
480
434
  loss_list.append(best_loss)
481
435
 
482
- weight_pop, act_pop = optimizer(np.array(weight_pop, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
436
+ weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
483
437
  target_pop = []
484
438
 
485
439
  # Early stopping check
@@ -499,13 +453,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
499
453
  print('\nActivations: ', final_activations)
500
454
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
501
455
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
502
- if data == 'Test':
503
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
504
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
505
456
 
506
457
  # Display final visualizations
507
458
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
508
- test_loss, y_train, interval)
459
+ train_loss, y_train, interval)
509
460
  return best_weights, best_model[get_preds()], best_acc, final_activations
510
461
 
511
462
  # Final evaluation
@@ -518,15 +469,12 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
518
469
  else:
519
470
  train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
520
471
 
521
- print('\nActivations: ', act_pop[-1])
472
+ print('\nActivations: ', final_activations)
522
473
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
523
474
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
524
- if data == 'Test':
525
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
526
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
527
475
 
528
476
  # Display final visualizations
529
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval)
477
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
530
478
  return best_weights, best_model[get_preds()], best_acc, final_activations
531
479
 
532
480
 
@@ -16,7 +16,6 @@ PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import cupy as cp
19
- from colorama import Fore
20
19
  import math
21
20
 
22
21
  ### LIBRARY IMPORTS ###
@@ -189,11 +188,11 @@ def fit(
189
188
  return normalization(LTPW, dtype=dtype)
190
189
 
191
190
 
192
- def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='accuracy', gen=None, batch_size=1,
193
- neural_web_history=False, show_current_activations=False, auto_normalization=True,
191
+ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_size=1,
192
+ neural_web_history=False, show_current_activations=False,
194
193
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
195
- interval=33.33, target_acc=None, target_loss=None, except_this=None,
196
- only_this=None, start_this_act=None, start_this_W=None, target_fitness='max', dtype=cp.float32, memory='gpu'):
194
+ interval=33.33, target_acc=None, target_loss=None,
195
+ start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
197
196
  """
198
197
  Optimizes the activation functions for a neural network by leveraging train data to find
199
198
  the most accurate combination of activation potentiation for the given dataset.
@@ -217,29 +216,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
217
216
  **kwargs)
218
217
 
219
218
  model = plan_cuda.learner(x_train,
220
- y_train,
221
- optimizer=genetic_optimizer,
222
- strategy='accuracy',
223
- show_history=True,
224
- target_acc=0.94,
225
- interval=16.67)
219
+ y_train,
220
+ optimizer=genetic_optimizer,
221
+ strategy='accuracy',
222
+ show_history=True,
223
+ gen=15,
224
+ batch_size=0.05,
225
+ interval=16.67)
226
226
  ```
227
227
 
228
- x_test (array-like, optional): Test input data (for improve next gen generilization). If test data is not given then train feedback learning active
229
-
230
- y_test (array-like, optional): Test Labels (for improve next gen generilization). If test data is not given then train feedback learning active
231
-
232
228
  strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
233
229
 
234
- patience ((int, float), optional): patience value for adaptive strategies. For 'adaptive_accuracy' Default value: 5. For 'adaptive_loss' Default value: 0.150.
235
-
236
230
  gen (int, optional): The generation count for genetic optimization.
237
231
 
238
- batch_size (float, optional): Batch size is used in the prediction process to receive test feedback by dividing the test data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each test batch represents 8% of the test set. Default is 1. (%100 of test)
232
+ batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
239
233
 
240
- auto_normalization (bool, optional): If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
241
-
242
- early_stop (bool, optional): If True, implements early stopping during training.(If test accuracy not improves in two gen stops learning.) Default is False.
234
+ early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
243
235
 
244
236
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
245
237
 
@@ -252,10 +244,6 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
252
244
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
253
245
 
254
246
  target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
255
-
256
- except_this (list, optional): A list of activations to exclude from optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
257
-
258
- only_this (list, optional): A list of activations to focus on during optimization. Default is None. (For avaliable activation functions, run this code: plan.activations_list())
259
247
 
260
248
  start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
261
249
 
@@ -274,20 +262,14 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
274
262
 
275
263
  """
276
264
 
277
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
265
+ from .planeat_cuda import define_genomes
278
266
 
279
- activation_potentiation = all_activations()
267
+ data = 'Train'
268
+
269
+ activation_potentiation_len = len(all_activations())
280
270
 
281
271
  y_train = optimize_labels(y_train, cuda=True)
282
272
 
283
- if x_test is None and y_test is None:
284
- x_test = x_train
285
- y_test = y_train
286
- data = 'Train'
287
- else:
288
- data = 'Test'
289
- y_test = optimize_labels(y_test, cuda=True)
290
-
291
273
  if memory == 'gpu':
292
274
  x_train = transfer_to_gpu(x_train, dtype=dtype)
293
275
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
@@ -310,17 +292,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
310
292
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
311
293
 
312
294
  if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
313
-
314
- # Filter activation functions
315
- if only_this is not None:
316
- activation_potentiation = only_this
317
- if except_this is not None:
318
- activation_potentiation = [item for item in activation_potentiation if item not in except_this]
319
- if gen is None:
320
- gen = len(activation_potentiation)
321
-
322
- if start_this_act is None and len(activation_potentiation) % 2 != 0: raise ValueError("Activation length must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
323
- if start_this_act is not None and len(activation_potentiation) + 1 % 2 != 0: raise ValueError("You are using start_this parameter, activation length still must be even number. Please use 'except_this' parameter and except some activation. For example: except_this=['linear']")
295
+ if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
324
296
 
325
297
  # Initialize visualization components
326
298
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
@@ -335,29 +307,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
335
307
  act_pop = []
336
308
  weight_pop = []
337
309
 
338
- if start_this_act is None and start_this_W is None:
339
- best_acc = 0
340
- else:
341
- act_pop.append(start_this_act)
342
- weight_pop.append(start_this_W)
343
-
344
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
345
- model = evaluate(x_test_batch, y_test_batch, W=start_this_W, loading_bar_status=False, activation_potentiation=act_pop, dtype=dtype, memory=memory)
346
-
347
- if loss == 'categorical_crossentropy':
348
- test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
349
- else:
350
- test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
351
-
352
- best_acc = model[get_acc()]
353
- best_loss = test_loss
354
-
310
+ best_acc = 0
311
+ best_f1 = 0
312
+ best_recall = 0
313
+ best_precision = 0
355
314
  best_acc_per_gen_list = []
356
315
  postfix_dict = {}
357
316
  loss_list = []
358
317
  target_pop = []
359
318
 
360
- progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
319
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
320
+
321
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len)
322
+
323
+ if start_this_act is not None and start_this_W is not None:
324
+ weight_pop[0] = start_this_W
325
+ act_pop[0] = start_this_act
361
326
 
362
327
  for i in range(gen):
363
328
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -367,22 +332,16 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
367
332
  progress.last_print_n = 0
368
333
  progress.update(0)
369
334
 
370
- for j in range(len(activation_potentiation)):
371
-
372
- x_test_batch, y_test_batch = batcher(x_test, y_test, batch_size=batch_size)
373
-
374
- if i == 0:
375
- act_pop.append(activation_potentiation[j])
376
- W = fit(x_train, y_train, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
377
- weight_pop.append(W)
378
-
379
- model = evaluate(x_test_batch, y_test_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
335
+ for j in range(activation_potentiation_len):
380
336
 
337
+ x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
338
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
381
339
  acc = model[get_acc()]
340
+
382
341
  if strategy == 'accuracy': target_pop.append(acc)
383
342
 
384
343
  elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
385
- precision_score, recall_score, f1_score = metrics(y_test_batch, model[get_preds()])
344
+ precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
386
345
 
387
346
  if strategy == 'precision':
388
347
  target_pop.append(precision_score)
@@ -424,22 +383,22 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
424
383
  print(f", Current Activations={final_activations}", end='')
425
384
 
426
385
  if loss == 'categorical_crossentropy':
427
- test_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
386
+ train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
428
387
  else:
429
- test_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_test_batch, dtype=y_test_batch.dtype), y_pred_batch=model[get_preds_softmax()])
388
+ train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
430
389
 
431
390
  if batch_size == 1:
432
- postfix_dict[f"{data} Loss"] = test_loss
433
- best_loss = test_loss
391
+ postfix_dict[f"{data} Loss"] = train_loss
392
+ best_loss = train_loss
434
393
  else:
435
- postfix_dict[f"{data} Batch Loss"] = test_loss
394
+ postfix_dict[f"{data} Batch Loss"] = train_loss
436
395
  progress.set_postfix(postfix_dict)
437
- best_loss = test_loss
396
+ best_loss = train_loss
438
397
 
439
398
  # Update visualizations during training
440
399
  if show_history:
441
400
  gen_list = range(1, len(best_acc_per_gen_list) + 2)
442
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [test_loss],
401
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
443
402
  best_acc_per_gen_list + [best_acc], x_train, final_activations)
444
403
 
445
404
  if neurons_history:
@@ -448,7 +407,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
448
407
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
449
408
  y_train[0], viz_objects['neurons']['artists'],
450
409
  data=data, fig1=viz_objects['neurons']['fig'],
451
- acc=best_acc, loss=test_loss)
410
+ acc=best_acc, loss=train_loss)
452
411
  )
453
412
 
454
413
  if neural_web_history:
@@ -473,13 +432,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
473
432
  print('\nActivations: ', final_activations)
474
433
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
475
434
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
476
- if data == 'Test':
477
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
478
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
479
-
435
+
480
436
  # Display final visualizations
481
437
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
482
- test_loss, y_train, interval)
438
+ train_loss, y_train, interval)
483
439
  return best_weights, best_model[get_preds()], best_acc, final_activations
484
440
 
485
441
  # Check target loss
@@ -498,13 +454,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
498
454
  print('\nActivations: ', final_activations)
499
455
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
500
456
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
501
- if data == 'Test':
502
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
503
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
504
457
 
505
458
  # Display final visualizations
506
459
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
507
- test_loss, y_train, interval)
460
+ train_loss, y_train, interval)
508
461
  return best_weights, best_model[get_preds()], best_acc, final_activations
509
462
 
510
463
  progress.update(1)
@@ -512,7 +465,7 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
512
465
  best_acc_per_gen_list.append(best_acc)
513
466
  loss_list.append(best_loss)
514
467
 
515
- weight_pop, act_pop = optimizer(cp.array(weight_pop, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), target_fitness=target_fitness, bar_status=False)
468
+ weight_pop, act_pop = optimizer(weight_pop, act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
516
469
  target_pop = []
517
470
 
518
471
  # Early stopping check
@@ -532,13 +485,10 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
532
485
  print('\nActivations: ', final_activations)
533
486
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
534
487
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
535
- if data == 'Test':
536
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
537
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
538
488
 
539
489
  # Display final visualizations
540
490
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
541
- test_loss, y_train, interval)
491
+ train_loss, y_train, interval)
542
492
  return best_weights, best_model[get_preds()], best_acc, final_activations
543
493
 
544
494
  # Final evaluation
@@ -554,12 +504,9 @@ def learner(x_train, y_train, optimizer, x_test=None, y_test=None, strategy='acc
554
504
  print('\nActivations: ', final_activations)
555
505
  print(f'Train Accuracy (%{batch_size * 100} of train samples):', train_model[get_acc()])
556
506
  print(f'Train Loss (%{batch_size * 100} of train samples): ', train_loss, '\n')
557
- if data == 'Test':
558
- print(f'Test Accuracy (%{batch_size * 100} of test samples): ', best_acc)
559
- print(f'Test Loss (%{batch_size * 100} of test samples): ', best_loss, '\n')
560
507
 
561
508
  # Display final visualizations
562
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval)
509
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
563
510
  return best_weights, best_model[get_preds()], best_acc, final_activations
564
511
 
565
512
 
pyerualjetwork/planeat.py CHANGED
@@ -17,10 +17,10 @@ import random
17
17
  import math
18
18
 
19
19
  ### LIBRARY IMPORTS ###
20
- from .plan import feed_forward
21
- from .data_operations import normalization
22
- from .ui import loading_bars, initialize_loading_bar
23
- from .activation_functions import apply_activation, all_activations
20
+ from plan import feed_forward
21
+ from data_operations import normalization
22
+ from ui import loading_bars, initialize_loading_bar
23
+ from activation_functions import apply_activation, all_activations
24
24
 
25
25
  def define_genomes(input_shape, output_shape, population_size, dtype=np.float32):
26
26
  """
@@ -247,8 +247,6 @@ def evolver(weights,
247
247
 
248
248
  else:
249
249
  raise ValueError("strategy parameter must be: 'normal_selective' or 'more_selective' or 'less_selective'")
250
-
251
- if policy == 'explorer': fitness_bias = 0
252
250
 
253
251
  if ((activation_mutate_add_prob < 0 or activation_mutate_add_prob > 1) or
254
252
  (activation_mutate_change_prob < 0 or activation_mutate_change_prob > 1) or
@@ -328,7 +326,7 @@ def evolver(weights,
328
326
  cross_over_mode=cross_over_mode,
329
327
  activation_selection_add_prob=activation_selection_add_prob,
330
328
  activation_selection_change_prob=activation_selection_change_prob,
331
- activation_selection_rate=activation_selection_rate,
329
+ threshold=activation_selection_rate,
332
330
  bad_genomes_selection_prob=bad_genomes_selection_prob,
333
331
  first_parent_fitness=best_fitness,
334
332
  fitness_bias=fitness_bias,
@@ -351,8 +349,7 @@ def evolver(weights,
351
349
  activation_change_prob=activation_mutate_change_prob,
352
350
  weight_mutate_prob=weight_mutate_prob,
353
351
  threshold=weight_mutate_rate,
354
- genome_fitness=normalized_fitness[i],
355
- epsilon=epsilon
352
+ genome_fitness=normalized_fitness[i]
356
353
  )
357
354
 
358
355
  elif mutation_prob < bad_genomes_mutation_prob:
@@ -364,10 +361,9 @@ def evolver(weights,
364
361
  activation_change_prob=activation_mutate_change_prob,
365
362
  weight_mutate_prob=weight_mutate_prob,
366
363
  threshold=weight_mutate_rate,
367
- genome_fitness=normalized_fitness[i],
368
- epsilon=epsilon
364
+ genome_fitness=normalized_fitness[i]
369
365
  )
370
-
366
+
371
367
  if bar_status: progress.update(1)
372
368
 
373
369
  weights = np.vstack((bad_weights, good_weights))
@@ -397,11 +393,13 @@ def evolver(weights,
397
393
  print(" FITNESS BIAS: ", str(fitness_bias))
398
394
  print(" ACTIVATION SELECTION RATE (THRESHOLD VALUE FOR SINGLE CROSS OVER):", str(activation_selection_rate) + '\n')
399
395
 
396
+
400
397
  print("*** Performance ***")
401
398
  print(" MAX FITNESS: ", str(round(max(fitness), 2)))
402
399
  print(" MEAN FITNESS: ", str(round(np.mean(fitness), 2)))
403
400
  print(" MIN FITNESS: ", str(round(min(fitness), 2)) + '\n')
404
401
 
402
+ print(" BEST GENOME ACTIVATION LENGTH: ", str(len(activation_potentiations[-1])))
405
403
  print(" BEST GENOME INDEX: ", str(len(weights)-1))
406
404
  print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
407
405
 
@@ -487,7 +485,7 @@ def cross_over(first_parent_W,
487
485
  cross_over_mode,
488
486
  activation_selection_add_prob,
489
487
  activation_selection_change_prob,
490
- activation_selection_rate,
488
+ threshold,
491
489
  bad_genomes_selection_prob,
492
490
  first_parent_fitness,
493
491
  second_parent_fitness,
@@ -516,7 +514,7 @@ def cross_over(first_parent_W,
516
514
  activation_selection_change_prob (float): Probability of replacing an activation function in the child genome
517
515
  with one from the second parent.
518
516
 
519
- activation_selection_rate (float): Determines how quickly activation functions are added or replaced
517
+ threshold (float): Determines how quickly activation functions are added or replaced
520
518
  during the crossover process.
521
519
 
522
520
  bad_genomes_selection_prob (float): Probability of selecting a "bad" genome for replacement with the offspring.
@@ -549,7 +547,7 @@ def cross_over(first_parent_W,
549
547
  cross_over_mode='tpm',
550
548
  activation_selection_add_prob=0.8,
551
549
  activation_selection_change_prob=0.5,
552
- activation_selection_rate=0.1,
550
+ threshold=2,
553
551
  bad_genomes_selection_prob=0.7,
554
552
  first_parent_fitness=0.9,
555
553
  second_parent_fitness=0.85,
@@ -624,8 +622,8 @@ def cross_over(first_parent_W,
624
622
 
625
623
  if potential_activation_selection_add > activation_selection_add_prob:
626
624
 
627
- activation_selection_rate = activation_selection_rate / succes
628
- new_threshold = activation_selection_rate
625
+ threshold = threshold / succes
626
+ new_threshold = threshold
629
627
 
630
628
  while True:
631
629
 
@@ -635,7 +633,7 @@ def cross_over(first_parent_W,
635
633
  child_act.append(random_undominant_activation)
636
634
 
637
635
  if len(dominant_parent_act) > new_threshold:
638
- new_threshold += activation_selection_rate
636
+ new_threshold += threshold
639
637
  pass
640
638
 
641
639
  else:
@@ -646,8 +644,8 @@ def cross_over(first_parent_W,
646
644
 
647
645
  if potential_activation_selection_change_prob > activation_selection_change_prob:
648
646
 
649
- activation_selection_rate = activation_selection_rate / succes
650
- new_threshold = activation_selection_rate
647
+ threshold = threshold / succes
648
+ new_threshold = threshold
651
649
 
652
650
  while True:
653
651
 
@@ -658,7 +656,7 @@ def cross_over(first_parent_W,
658
656
  child_act[random_index_dominant] = random_undominant_activation
659
657
 
660
658
  if len(dominant_parent_act) > new_threshold:
661
- new_threshold += activation_selection_rate
659
+ new_threshold += threshold
662
660
  pass
663
661
 
664
662
  else:
@@ -674,8 +672,8 @@ def mutation(weight,
674
672
  activation_change_prob,
675
673
  weight_mutate_prob,
676
674
  threshold,
677
- genome_fitness,
678
- epsilon):
675
+ genome_fitness
676
+ ):
679
677
  """
680
678
  Performs mutation on the given weight matrix and activation functions.
681
679
  - The weight matrix is mutated by randomly changing its values based on the mutation probability.
@@ -699,8 +697,6 @@ def mutation(weight,
699
697
  threshold (float): If the value you enter here is equal to the result of input layer * output layer, only a single weight will be mutated during each mutation process. If the value you enter here is half of the result of input layer * output layer, two weights in the weight matrix will be mutated.
700
698
 
701
699
  genome_fitness (float): Fitness value of genome
702
-
703
- epsilon (float): Small epsilon constant
704
700
 
705
701
  Returns:
706
702
  tuple: A tuple containing:
@@ -730,7 +726,8 @@ def mutation(weight,
730
726
  row_end = weight.shape[0]
731
727
  col_end = weight.shape[1]
732
728
 
733
- threshold = threshold * (genome_fitness + epsilon)
729
+ threshold = threshold * genome_fitness
730
+ performance_control = 0
734
731
  new_threshold = threshold
735
732
 
736
733
  while True:
@@ -742,10 +739,14 @@ def mutation(weight,
742
739
 
743
740
  if int(row_end * col_end) > new_threshold:
744
741
  new_threshold += threshold
742
+ performance_control += 1
745
743
  pass
746
744
 
747
745
  else:
748
746
  break
747
+
748
+ if performance_control >= int(row_end * col_end):
749
+ break
749
750
 
750
751
  activation_mutate_prob = 1 - activation_mutate_prob
751
752
  potential_activation_mutation = random.uniform(0, 1)
@@ -248,8 +248,6 @@ def evolver(weights,
248
248
 
249
249
  else:
250
250
  raise ValueError("strategy parameter must be: 'normal_selective' or 'more_selective' or 'less_selective'")
251
-
252
- if policy =='explorer': fitness_bias = 0
253
251
 
254
252
  if ((activation_mutate_add_prob < 0 or activation_mutate_add_prob > 1) or
255
253
  (activation_mutate_change_prob < 0 or activation_mutate_change_prob > 1) or
@@ -328,7 +326,7 @@ def evolver(weights,
328
326
  cross_over_mode=cross_over_mode,
329
327
  activation_selection_add_prob=activation_selection_add_prob,
330
328
  activation_selection_change_prob=activation_selection_change_prob,
331
- activation_selection_rate=activation_selection_rate,
329
+ threshold=activation_selection_rate,
332
330
  bad_genomes_selection_prob=bad_genomes_selection_prob,
333
331
  first_parent_fitness=best_fitness,
334
332
  fitness_bias=fitness_bias,
@@ -351,8 +349,7 @@ def evolver(weights,
351
349
  activation_change_prob=activation_mutate_change_prob,
352
350
  weight_mutate_prob=weight_mutate_prob,
353
351
  threshold=weight_mutate_rate,
354
- genome_fitness=normalized_fitness[i],
355
- epsilon=epsilon
352
+ genome_fitness=normalized_fitness[i]
356
353
  )
357
354
 
358
355
  elif mutation_prob < bad_genomes_mutation_prob:
@@ -364,8 +361,7 @@ def evolver(weights,
364
361
  activation_change_prob=activation_mutate_change_prob,
365
362
  weight_mutate_prob=weight_mutate_prob,
366
363
  threshold=weight_mutate_rate,
367
- genome_fitness=normalized_fitness[i],
368
- epsilon=epsilon
364
+ genome_fitness=normalized_fitness[i]
369
365
  )
370
366
 
371
367
  if bar_status: progress.update(1)
@@ -401,6 +397,7 @@ def evolver(weights,
401
397
  print(" MEAN REWARD: ", str(cp.round(cp.mean(fitness), 2)))
402
398
  print(" MIN REWARD: ", str(cp.round(min(fitness), 2)) + '\n')
403
399
 
400
+ print(" BEST GENOME ACTIVATION LENGTH: ", str(len(activation_potentiations)))
404
401
  print(" BEST GENOME INDEX: ", str(len(weights)-1))
405
402
  print(" NOTE: Genomes are always sorted from the least successful to the most successful according to their performance ranking. Therefore, the genome at the last index is the king of the previous generation. " + '\n')
406
403
 
@@ -490,7 +487,7 @@ def cross_over(first_parent_W,
490
487
  cross_over_mode,
491
488
  activation_selection_add_prob,
492
489
  activation_selection_change_prob,
493
- activation_selection_rate,
490
+ threshold,
494
491
  bad_genomes_selection_prob,
495
492
  first_parent_fitness,
496
493
  second_parent_fitness,
@@ -519,7 +516,7 @@ def cross_over(first_parent_W,
519
516
  activation_selection_change_prob (float): Probability of replacing an activation function in the child genome
520
517
  with one from the second parent.
521
518
 
522
- activation_selection_rate (float): Determines how quickly activation functions are added or replaced
519
+ threshold (float): Determines how quickly activation functions are added or replaced
523
520
  during the crossover process.
524
521
 
525
522
  bad_genomes_selection_prob (float): Probability of selecting a "bad" genome for replacement with the offspring.
@@ -552,7 +549,7 @@ def cross_over(first_parent_W,
552
549
  cross_over_mode='tpm',
553
550
  activation_selection_add_prob=0.8,
554
551
  activation_selection_change_prob=0.5,
555
- activation_selection_rate=0.1,
552
+ threshold=2,
556
553
  bad_genomes_selection_prob=0.7,
557
554
  first_parent_fitness=0.9,
558
555
  second_parent_fitness=0.85,
@@ -628,8 +625,8 @@ def cross_over(first_parent_W,
628
625
 
629
626
  if potential_activation_selection_add > activation_selection_add_prob:
630
627
 
631
- activation_selection_rate = activation_selection_rate / succes
632
- new_threshold = activation_selection_rate
628
+ threshold = threshold / succes
629
+ new_threshold = threshold
633
630
 
634
631
  while True:
635
632
 
@@ -639,7 +636,7 @@ def cross_over(first_parent_W,
639
636
  child_act.append(random_undominant_activation)
640
637
 
641
638
  if len(dominant_parent_act) > new_threshold:
642
- new_threshold += activation_selection_rate
639
+ new_threshold += threshold
643
640
  pass
644
641
 
645
642
  else:
@@ -650,8 +647,8 @@ def cross_over(first_parent_W,
650
647
 
651
648
  if potential_activation_selection_change_prob > activation_selection_change_prob:
652
649
 
653
- activation_selection_rate = activation_selection_rate / succes
654
- new_threshold = activation_selection_rate
650
+ threshold = threshold / succes
651
+ new_threshold = threshold
655
652
 
656
653
  while True:
657
654
 
@@ -662,7 +659,7 @@ def cross_over(first_parent_W,
662
659
  child_act[random_index_dominant] = random_undominant_activation
663
660
 
664
661
  if len(dominant_parent_act) > new_threshold:
665
- new_threshold += activation_selection_rate
662
+ new_threshold += threshold
666
663
  pass
667
664
 
668
665
  else:
@@ -679,8 +676,7 @@ def mutation(weight,
679
676
  activation_change_prob,
680
677
  weight_mutate_prob,
681
678
  threshold,
682
- genome_fitness,
683
- epsilon):
679
+ genome_fitness):
684
680
  """
685
681
  Performs mutation on the given weight matrix and activation functions.
686
682
  - The weight matrix is mutated by randomly changing its values based on the mutation probability.
@@ -705,7 +701,6 @@ def mutation(weight,
705
701
 
706
702
  genome_fitness (float): Fitness value of genome
707
703
 
708
- epsilon (float): Small epsilon constant
709
704
 
710
705
  Returns:
711
706
  tuple: A tuple containing:
@@ -735,9 +730,10 @@ def mutation(weight,
735
730
  row_end = weight.shape[0]
736
731
  col_end = weight.shape[1]
737
732
 
738
- threshold = threshold * (genome_fitness + epsilon)
733
+ threshold = threshold * genome_fitness
739
734
  new_threshold = threshold
740
-
735
+ performance_control = 0
736
+
741
737
  while True:
742
738
 
743
739
  selected_row = int(random.uniform(start, row_end))
@@ -747,10 +743,14 @@ def mutation(weight,
747
743
 
748
744
  if int(row_end * col_end) > new_threshold:
749
745
  new_threshold += threshold
746
+ performance_control += 1
750
747
  pass
751
748
 
752
749
  else:
753
750
  break
751
+
752
+ if performance_control >= int(row_end * col_end):
753
+ break
754
754
 
755
755
  activation_mutate_prob = 1 - activation_mutate_prob
756
756
  potential_activation_mutation = random.uniform(0, 1)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.2.1
3
+ Version: 4.2.2b1
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,24 +1,25 @@
1
- pyerualjetwork/__init__.py,sha256=VhaX5FzNtwSCXlCCWPHIdrSCTimD14fwAnfZQcDjTqQ,646
1
+ pyerualjetwork/__init__.py,sha256=S17EvWZTRS5-syTAdq8gQNAfuzqHEMStWH5I6fI5KnQ,641
2
2
  pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
4
  pyerualjetwork/data_operations.py,sha256=pb5CqJ0Th6fCjTNMCtqQMiwH3KezTxAijacglsKUxmY,14730
5
5
  pyerualjetwork/data_operations_cuda.py,sha256=UpoJoFhIwTU4xg9dVuLAxLAT4CkRaGsxvtJG9j1xrNo,17629
6
+ pyerualjetwork/deneme.py,sha256=6b_g1ZyJRhPtqH5D5yUGeOKO6eHMwklUQenccs4G060,7869
6
7
  pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
7
8
  pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
9
  pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
10
  pyerualjetwork/memory_operations.py,sha256=I7QiZ--xSyRkFF0wcckPwZV7K9emEvyx5aJ3DiRHZFI,13468
10
11
  pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
12
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
- pyerualjetwork/model_operations.py,sha256=hnhR8dtoICNJWIwGgJ65-LN3GYN_DYH4LMe6YpZVbnI,12967
13
+ pyerualjetwork/model_operations.py,sha256=RKqnh7-MByFosxqme4q4jC1lOndX26O-OVXYV6ZxoEE,12965
13
14
  pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
- pyerualjetwork/plan.py,sha256=hvgRaJgz_31ixI6XDy_lkucQwbAN0iIqAj4aTc0oFiw,34422
15
- pyerualjetwork/plan_cuda.py,sha256=ZTScc7VlqBLKUh6OtweJeGh-gf244nlPHTOeYrNaIyg,36065
16
- pyerualjetwork/planeat.py,sha256=hofSkdFHoGXP52GUpTOQ-QwhPF2Vf6zwEdoesfpmWxU,41006
17
- pyerualjetwork/planeat_cuda.py,sha256=0DOU1cL-JeifSAF9Q_bBpEeAJp9zxB7_oS_GyNepixo,40970
15
+ pyerualjetwork/plan.py,sha256=mACNJnHlPkeqsHDYw0BVDXp8PCOlwWsF-1KD4a2Bk7U,30431
16
+ pyerualjetwork/plan_cuda.py,sha256=aSzXyIZtq38nszW9f9JibvqoEHBCYXvVLaXMVigUUKI,32016
17
+ pyerualjetwork/planeat.py,sha256=yLPQXO754lBv48f1MXdGodP93KstryeRG3HuUaRwj6g,40753
18
+ pyerualjetwork/planeat_cuda.py,sha256=9uopmM-gTZpSb0EOExrOZPT8FF5BqDdEfCX0zYQb9QU,40712
18
19
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
20
  pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
20
21
  pyerualjetwork/visualizations_cuda.py,sha256=F60vQ92AXlMgBka3InXnOtGoM25vQJAlBIU2AlYTwks,29200
21
- pyerualjetwork-4.2.1.dist-info/METADATA,sha256=1kq1oOOpZna0dDry5QSj3L8odHpHG73rf6KRbBHpE4o,7912
22
- pyerualjetwork-4.2.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.2.1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.2.1.dist-info/RECORD,,
22
+ pyerualjetwork-4.2.2b1.dist-info/METADATA,sha256=XBf-qok9_lB70pHC8EgwyPGxmCC9l-Penu0MiYc9Wvo,7914
23
+ pyerualjetwork-4.2.2b1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
+ pyerualjetwork-4.2.2b1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
+ pyerualjetwork-4.2.2b1.dist-info/RECORD,,