pyerualjetwork 4.3.9.dev2__py3-none-any.whl → 4.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,11 +1,33 @@
1
- __version__ = "4.3.9dev2"
2
- __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
1
+ __version__ = "4.3.10"
2
+ __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
3
+ * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
4
+ * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
5
+ * YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"""
3
6
 
4
- def print_version(__version__):
5
- print(f"PyerualJetwork Version {__version__}" + '\n')
7
+ def print_version(version):
8
+ print(f"PyerualJetwork Version {version}\n")
6
9
 
7
- def print_update_notes(__update__):
8
- print(f"Notes:\n{__update__}")
10
+ def print_update_notes(update):
11
+ print(f"Notes:\n{update}")
9
12
 
10
13
  print_version(__version__)
11
- print_update_notes(__update__)
14
+ print_update_notes(__update__)
15
+
16
+ required_modules = ["scipy", "tqdm", "pandas", "numpy", "colorama", "cupy", "psutil"]
17
+
18
+ missing_modules = []
19
+ for module in required_modules:
20
+ try:
21
+ __import__(module)
22
+ except ModuleNotFoundError:
23
+ missing_modules.append(module)
24
+
25
+ if missing_modules:
26
+ raise ImportError(
27
+ f"Missing modules detected: {', '.join(missing_modules)}\n"
28
+ "Please run the following command to install the missing packages:\n\n"
29
+ f" pip install {' '.join(missing_modules)}\n\n"
30
+ "For more information, visit the PyerualJetwork GitHub README.md file:\n"
31
+ "https://github.com/HCB06/PyerualJetwork/blob/main/README.md"
32
+
33
+ )
@@ -1,75 +1,16 @@
1
- import numpy as np
2
- from scipy.spatial.distance import pdist
3
-
4
- def diversity_score(population):
1
+ def wals(acc, loss, acc_impact, loss_impact):
5
2
  """
6
- Calculates the diversity score of a population based on the
7
- Euclidean distances between individuals in the population.
3
+ The WALS(weighted accuracy-loss score) function calculates a weighted sum of accuracy and loss based on their respective impacts.
8
4
 
9
- :param population: That calculates the diversity score of a population based on the Euclidean distances between
10
- individuals in the population
11
- :return: The function returns the diversity score,
12
- which is a measure of how spread out or diverse the population is in terms of their characteristics.
5
+ :param acc: The `acc` parameter represents the accuracy of a model or system
6
+ :param loss: The `loss` parameter in the `wals` function represents the amount of loss incurred. It
7
+ is used in the calculation to determine the overall impact based on the accuracy and loss impacts
8
+ provided
9
+ :param acc_impact: The `acc_impact` parameter represents the impact of accuracy on the overall score
10
+ calculation in the `wals` function. It is a multiplier that determines how much the accuracy
11
+ contributes to the final result
12
+ :param loss_impact: The `loss_impact` parameter in the `wals` function represents the weight of loss value when calculating the overall impact. It is used to determine how
13
+ much the loss affects the final result compared to the accuracy impact
14
+ :return: the weighted sum of accuracy and loss based on their respective impacts.
13
15
  """
14
- if len(population) < 2:
15
- return 0
16
-
17
- distances = pdist(population, metric='euclidean')
18
-
19
- if np.isnan(distances).any() or np.any(distances == 0):
20
- distances = np.maximum(distances, 1e-10)
21
- avg_distance = np.mean(distances)
22
-
23
- if population.shape[1] == 0:
24
- return 0
25
-
26
- max_possible_distance = np.sqrt(population.shape[1])
27
-
28
- if max_possible_distance == 0:
29
- max_possible_distance = 1e-10
30
-
31
- diversity = avg_distance / max_possible_distance
32
-
33
- return diversity
34
-
35
-
36
-
37
- def hybrid_accuracy_confidence(y_true, y_pred, diversity_score, accuracy, alpha=2, beta=1.5, lambda_div=0.05):
38
- """
39
- The function calculates a fitness score based on accuracy, margin loss, and diversity score using
40
- specified parameters.
41
-
42
- @param y_true The `y_true` parameter represents the true labels of the data points. It is an array
43
- or list containing the actual labels of the data points.
44
- @param y_pred The `y_pred` parameter in the `hybrid_accuracy_confidence` function represents the
45
- predicted values for a given dataset. It is a NumPy array containing the predicted values for each
46
- sample in the dataset.
47
- @param diversity_score The `diversity_score` parameter in the `hybrid_accuracy_confidence` function
48
- represents a measure of diversity in the predictions. It is used as a factor in calculating the
49
- fitness of the model. The function combines accuracy, margin loss, and diversity score to evaluate
50
- the overall performance of the model
51
- @param accuracy Accuracy is a measure of the correct predictions made by a model. It is typically
52
- calculated as the number of correct predictions divided by the total number of predictions. In the
53
- context of the `hybrid_accuracy_confidence` function, the accuracy parameter represents the accuracy
54
- of the model's predictions.
55
- @param alpha Alpha is a parameter that controls the impact of accuracy on the overall fitness score
56
- in the hybrid_accuracy_confidence function. It is used as an exponent to raise the accuracy value
57
- to, influencing its contribution to the fitness calculation.
58
- @param beta The `beta` parameter in the `hybrid_accuracy_confidence` function is used as an exponent
59
- in the fitness calculation formula. It controls the impact of the margin loss term on the overall
60
- fitness value. A higher value of `beta` will amplify the effect of the margin loss term, making it
61
- @param lambda_div The `lambda_div` parameter in the `hybrid_accuracy_confidence` function represents
62
- the weight given to the diversity score in calculating the fitness value. It is a hyperparameter
63
- that controls the impact of diversity score on the overall fitness calculation. A higher value of
64
- `lambda_div` will increase the importance
65
- @return The function `hybrid_accuracy_confidence` returns the fitness value calculated based on the
66
- input parameters `y_true`, `y_pred`, `diversity_score`, `accuracy`, and optional parameters `alpha`,
67
- `beta`, and `lambda_div`. The fitness value is computed using a formula that combines accuracy,
68
- margin loss, and diversity score with specified weights and coefficients.
69
- """
70
- incorrect = y_true != y_pred
71
- margin_loss = np.abs(y_true[incorrect] - y_pred[incorrect]).mean() if np.any(incorrect) else 0
72
-
73
- fitness = (accuracy ** alpha) * ((1 - margin_loss) ** beta) * (1 + lambda_div * diversity_score)
74
-
75
- return fitness
16
+ return (acc * acc_impact) + (-loss * loss_impact)
@@ -0,0 +1,21 @@
1
+
2
+ import numpy as np
3
+
4
+ def categorical_crossentropy(y_true_batch, y_pred_batch):
5
+ epsilon = 1e-7
6
+ y_pred_batch = np.clip(y_pred_batch, epsilon, 1. - epsilon)
7
+
8
+ losses = -np.sum(y_true_batch * np.log(y_pred_batch), axis=1)
9
+
10
+ mean_loss = np.mean(losses)
11
+ return mean_loss
12
+
13
+
14
+ def binary_crossentropy(y_true_batch, y_pred_batch):
15
+ epsilon = 1e-7
16
+ y_pred_batch = np.clip(y_pred_batch, epsilon, 1. - epsilon)
17
+
18
+ losses = -np.mean(y_true_batch * np.log(y_pred_batch) + (1 - y_true_batch) * np.log(1 - y_pred_batch), axis=1)
19
+
20
+ mean_loss = np.mean(losses)
21
+ return mean_loss
pyerualjetwork/plan.py CHANGED
@@ -6,7 +6,7 @@ MAIN MODULE FOR PLAN
6
6
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
7
7
 
8
8
  PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
9
- PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
9
+ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
10
10
 
11
11
  @author: Hasan Can Beydili
12
12
  @YouTube: https://www.youtube.com/@HasanCanBeydili
@@ -21,9 +21,10 @@ import numpy as np
21
21
  from .ui import loading_bars, initialize_loading_bar
22
22
  from .data_operations import normalization, batcher
23
23
  from .activation_functions import apply_activation, all_activations
24
- from .metrics import metrics
25
- from .model_operations import get_acc, get_preds, get_preds_softmax
24
+ from .model_operations import get_acc, get_preds_softmax
26
25
  from .memory_operations import optimize_labels
26
+ from .loss_functions import categorical_crossentropy, binary_crossentropy
27
+ from .fitness_functions import wals
27
28
  from .visualizations import (
28
29
  draw_neural_web,
29
30
  display_visualizations_for_learner,
@@ -82,10 +83,10 @@ def fit(
82
83
  return normalization(weight, dtype=dtype)
83
84
 
84
85
 
85
- def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batch_size=1, pop_size=None,
86
+ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
86
87
  neural_web_history=False, show_current_activations=False, auto_normalization=False,
87
- neurons_history=False, early_stop=False, show_history=False,
88
- interval=33.33, target_acc=None,
88
+ neurons_history=False, early_stop=False, show_history=False, target_loss=None,
89
+ interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
89
90
  start_this_act=None, start_this_W=None, dtype=np.float32):
90
91
  """
91
92
  Optimizes the activation functions for a neural network by leveraging train data to find
@@ -104,37 +105,26 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
104
105
  optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
105
106
  ```python
106
107
  optimizer = lambda *args, **kwargs: planeat.evolver(*args,
107
- activation_add_prob=0.85,
108
+ activation_add_prob=0.05,
108
109
  strategy='aggressive',
110
+ policy='more_selective',
109
111
  **kwargs)
110
112
 
111
113
  model = plan.learner(x_train,
112
114
  y_train,
113
115
  optimizer,
114
- fitness,
115
116
  fit_start=True,
116
- strategy='accuracy',
117
117
  show_history=True,
118
118
  gen=15,
119
119
  batch_size=0.05,
120
120
  interval=16.67)
121
121
  ```
122
122
 
123
- fitness (function): Fitness function. We have 'hybrid_accuracy_confidence' function in the fitness_functions module. Use this: from pyerualjetwork import fitness_functions (and) fitness_function = lambda *args, **kwargs: fitness_functions.hybrid_accuracy_confidence(*args, 'here give your fitness function hyperparameters for example: alpha=2, beta=1.5, lambda_div=0.05', **kwargs) Example:
124
- ```python
125
- fitness = lambda *args, **kwargs: fitness_functions.hybrid_accuracy_confidence(*args, alpha=2, beta=1.5, lambda_div=0.05, **kwargs)
123
+ loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
126
124
 
127
- model = plan.learner(x_train,
128
- y_train,
129
- optimizer,
130
- fitness
131
- fit_start=True,
132
- strategy='accuracy',
133
- show_history=True,
134
- gen=15,
135
- batch_size=0.05,
136
- interval=16.67)
137
- ```
125
+ target_acc (float, optional): The target accuracy to stop training early when achieved. Default is None.
126
+
127
+ target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
138
128
 
139
129
  fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
140
130
 
@@ -171,7 +161,6 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
171
161
 
172
162
  """
173
163
 
174
- from .fitness_functions import diversity_score
175
164
  from .planeat import define_genomes
176
165
 
177
166
  data = 'Train'
@@ -189,8 +178,7 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
189
178
 
190
179
  if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
191
180
 
192
- if gen is None:
193
- gen = activation_potentiation_len
181
+ if gen is None: gen = activation_potentiation_len
194
182
 
195
183
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
196
184
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
@@ -200,13 +188,14 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
200
188
 
201
189
  # Initialize variables
202
190
  best_acc = 0
203
- best_fit = 0
191
+ best_loss = float('inf')
192
+ best_fitness = float('-inf')
204
193
  best_acc_per_gen_list = []
205
194
  postfix_dict = {}
206
- fit_list = []
195
+ loss_list = []
207
196
  target_pop = []
208
197
 
209
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=78, bar_format=bar_format_learner)
198
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=77, bar_format=bar_format_learner)
210
199
 
211
200
  if fit_start is False or pop_size > activation_potentiation_len:
212
201
  weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
@@ -219,6 +208,7 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
219
208
  weight_pop[0] = start_this_W
220
209
  act_pop[0] = start_this_act
221
210
 
211
+ # LEARNING STARTED
222
212
  for i in range(gen):
223
213
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
224
214
  progress.set_postfix(postfix_dict)
@@ -242,51 +232,52 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
242
232
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
243
233
  acc = model[get_acc()]
244
234
 
245
- div_score = diversity_score(W)
246
- fit_score = fitness(y_train_batch, model[get_preds_softmax()], div_score, acc)
235
+ if loss == 'categorical_crossentropy':
236
+ train_loss = categorical_crossentropy(y_true_batch=y_train_batch,
237
+ y_pred_batch=model[get_preds_softmax()])
238
+ else:
239
+ train_loss = binary_crossentropy(y_true_batch=y_train_batch,
240
+ y_pred_batch=model[get_preds_softmax()])
247
241
 
248
- target_pop.append(fit_score)
242
+ fitness = wals(acc, train_loss, acc_impact, loss_impact)
243
+ target_pop.append(fitness)
249
244
 
250
- if fit_score >= best_fit:
245
+ if fitness >= best_fitness:
251
246
 
247
+ best_fitness = fitness
252
248
  best_acc = acc
253
- best_fit = fit_score
254
- best_weights = np.copy(weight_pop[j])
255
- final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
256
-
249
+ best_loss = train_loss
250
+ best_weight = np.copy(weight_pop[j])
257
251
  best_model = model
258
252
 
253
+ final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
259
254
  final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
260
255
 
261
256
  if batch_size == 1:
262
257
  postfix_dict[f"{data} Accuracy"] = np.round(best_acc, 4)
258
+ postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
263
259
  progress.set_postfix(postfix_dict)
264
260
 
265
261
  if show_current_activations:
266
262
  print(f", Current Activations={final_activations}", end='')
267
263
 
268
- if batch_size == 1:
269
- postfix_dict[f"{data} Fitness"] = np.round(fit_score, 4)
270
- progress.set_postfix(postfix_dict)
271
- best_fit = fit_score
272
-
273
264
  # Update visualizations during training
274
265
  if show_history:
275
266
  gen_list = range(1, len(best_acc_per_gen_list) + 2)
276
- update_history_plots_for_learner(viz_objects, gen_list, fit_list + [fit_score],
267
+ update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
277
268
  best_acc_per_gen_list + [best_acc], x_train, final_activations)
278
269
 
279
270
  if neurons_history:
280
271
  viz_objects['neurons']['artists'] = (
281
- update_neuron_history_for_learner(np.copy(best_weights), viz_objects['neurons']['ax'],
272
+ update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
282
273
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
283
274
  y_train[0], viz_objects['neurons']['artists'],
284
275
  data=data, fig1=viz_objects['neurons']['fig'],
285
- acc=best_acc, loss=fit_score)
276
+ acc=best_acc, loss=train_loss)
286
277
  )
287
278
 
288
279
  if neural_web_history:
289
- art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
280
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
290
281
  G=viz_objects['web']['G'], return_objs=True)
291
282
  art5_list = [art5_1] + [art5_2] + list(art5_3.values())
292
283
  viz_objects['web']['artists'].append(art5_list)
@@ -294,46 +285,68 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
294
285
  # Check target accuracy
295
286
  if target_acc is not None and best_acc >= target_acc:
296
287
  progress.close()
297
- train_model = evaluate(x_train, y_train, W=best_weights,
288
+ train_model = evaluate(x_train, y_train, W=best_weight,
298
289
  activation_potentiation=final_activations)
299
-
300
- div_score = diversity_score(W)
301
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
290
+ if loss == 'categorical_crossentropy':
291
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
292
+ y_pred_batch=train_model[get_preds_softmax()])
293
+ else:
294
+ train_loss = binary_crossentropy(y_true_batch=y_train,
295
+ y_pred_batch=train_model[get_preds_softmax()])
302
296
 
303
297
  print('\nActivations: ', final_activations)
304
298
  print(f'Train Accuracy:', train_model[get_acc()])
305
- print(f'Fitness Value: ', fit_score, '\n')
306
-
307
- postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
308
- postfix_dict[f"{data} Fitness"] = np.round(best_fit, 4)
309
- progress.set_postfix(postfix_dict)
299
+ print(f'Train Loss: ', train_loss, '\n')
300
+
301
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
302
+ best_loss, y_train, interval)
303
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
304
+
305
+ # Check target loss
306
+ if target_loss is not None and best_loss <= target_loss:
307
+ progress.close()
308
+ train_model = evaluate(x_train, y_train, W=best_weight,
309
+ activation_potentiation=final_activations)
310
+
311
+ if loss == 'categorical_crossentropy':
312
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
313
+ y_pred_batch=train_model[get_preds_softmax()])
314
+ else:
315
+ train_loss = binary_crossentropy(y_true_batch=y_train,
316
+ y_pred_batch=train_model[get_preds_softmax()])
317
+
318
+ print('\nActivations: ', final_activations)
319
+ print(f'Train Accuracy :', train_model[get_acc()])
320
+ print(f'Train Loss : ', train_loss, '\n')
321
+
310
322
  # Display final visualizations
311
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
312
- fit_score, y_train, interval)
313
- return best_weights, best_model[get_preds()], best_acc, final_activations
323
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
324
+ train_loss, y_train, interval)
325
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
314
326
 
327
+
315
328
  progress.update(1)
316
329
 
317
330
  if batch_size != 1:
318
- train_model = evaluate(x_train, y_train, best_weights, final_activations)
331
+ train_model = evaluate(x_train, y_train, best_weight, final_activations)
332
+
333
+ if loss == 'categorical_crossentropy':
334
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
335
+ y_pred_batch=train_model[get_preds_softmax()])
336
+ else:
337
+ train_loss = binary_crossentropy(y_true_batch=y_train,
338
+ y_pred_batch=train_model[get_preds_softmax()])
319
339
 
320
- div_score = diversity_score(W)
321
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
322
-
323
- print('\nActivations: ', final_activations)
324
- print(f'Train Accuracy:', train_model[get_acc()])
325
- print(f'Fitness Value: ', fit_score, '\n')
326
-
327
340
  postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
328
- postfix_dict[f"{data} Fitness"] = np.round(best_fit, 4)
341
+ postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
329
342
  progress.set_postfix(postfix_dict)
330
343
 
331
344
  best_acc_per_gen_list.append(train_model[get_acc()])
332
- fit_list.append(fit_score)
345
+ loss_list.append(train_loss)
333
346
 
334
347
  else:
335
348
  best_acc_per_gen_list.append(best_acc)
336
- fit_list.append(best_fit)
349
+ loss_list.append(best_loss)
337
350
 
338
351
  weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
339
352
  target_pop = []
@@ -342,36 +355,44 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
342
355
  if early_stop == True and i > 0:
343
356
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
344
357
  progress.close()
345
- train_model = evaluate(x_train, y_train, W=best_weights,
358
+ train_model = evaluate(x_train, y_train, W=best_weight,
346
359
  activation_potentiation=final_activations)
360
+
361
+ if loss == 'categorical_crossentropy':
362
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
363
+ y_pred_batch=train_model[get_preds_softmax()])
364
+ else:
365
+ train_loss = binary_crossentropy(y_true_batch=y_train,
366
+ y_pred_batch=train_model[get_preds_softmax()])
347
367
 
348
- div_score = diversity_score(W)
349
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
350
-
351
- print('\nActivations: ', final_activations)
352
- print(f'Train Accuracy:', train_model[get_acc()])
353
- print(f'Fitness Value: ', fit_score, '\n')
368
+ print('\nActivations: ', final_activations)
369
+ print(f'Train Accuracy:', train_model[get_acc()])
370
+ print(f'Train Loss: ', train_loss, '\n')
354
371
 
355
- # Display final visualizations
356
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
357
- fit_score, y_train, interval)
358
- return best_weights, best_model[get_preds()], best_acc, final_activations
372
+ # Display final visualizations
373
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
374
+ train_loss, y_train, interval)
375
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
359
376
 
360
377
  # Final evaluation
361
378
  progress.close()
362
- train_model = evaluate(x_train, y_train, W=best_weights,
379
+ train_model = evaluate(x_train, y_train, W=best_weight,
363
380
  activation_potentiation=final_activations)
364
381
 
365
- div_score = diversity_score(W)
366
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
367
-
382
+ if loss == 'categorical_crossentropy':
383
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
384
+ y_pred_batch=train_model[get_preds_softmax()])
385
+ else:
386
+ train_loss = binary_crossentropy(y_true_batch=y_train,
387
+ y_pred_batch=train_model[get_preds_softmax()])
388
+
368
389
  print('\nActivations: ', final_activations)
369
390
  print(f'Train Accuracy:', train_model[get_acc()])
370
- print(f'Fitness Value: ', fit_score, '\n')
391
+ print(f'Train Loss: ', train_loss, '\n')
371
392
 
372
393
  # Display final visualizations
373
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, fit_score, y_train, interval)
374
- return best_weights, best_model[get_preds()], best_acc, final_activations
394
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc, train_loss, y_train, interval)
395
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
375
396
 
376
397
 
377
398
  def evaluate(
@@ -397,9 +418,12 @@ def evaluate(
397
418
  """
398
419
 
399
420
  x_test = apply_activation(x_test, activation_potentiation)
421
+
400
422
  result = x_test @ W.T
401
- epsilon = 1e-10
402
- accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
403
- softmax_preds = np.exp(result) / (np.sum(np.exp(result), axis=1, keepdims=True) + epsilon)
404
-
405
- return W, None, accuracy, None, None, softmax_preds
423
+
424
+ max_vals = np.max(result, axis=1, keepdims=True)
425
+
426
+ softmax_preds = np.exp(result - max_vals) / np.sum(np.exp(result - max_vals), axis=1, keepdims=True)
427
+ accuracy = (np.argmax(softmax_preds, axis=1) == np.argmax(y_test, axis=1)).mean()
428
+
429
+ return W, result, accuracy, None, None, softmax_preds
@@ -6,7 +6,7 @@ MAIN MODULE FOR PLAN_CUDA
6
6
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
7
7
 
8
8
  PLAN document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PLAN/PLAN.pdf
9
- PYERUALJETWORK document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
9
+ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
10
10
 
11
11
  @author: Hasan Can Beydili
12
12
  @YouTube: https://www.youtube.com/@HasanCanBeydili
@@ -20,11 +20,11 @@ import cupy as cp
20
20
  ### LIBRARY IMPORTS ###
21
21
  from .ui import loading_bars, initialize_loading_bar
22
22
  from .data_operations_cuda import normalization
23
- from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
24
23
  from .activation_functions_cuda import apply_activation, all_activations
25
- from .metrics_cuda import metrics
26
- from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
24
+ from .model_operations_cuda import get_acc, get_preds_softmax
27
25
  from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
26
+ from .loss_functions_cuda import categorical_crossentropy, binary_crossentropy
27
+ from .fitness_functions import wals
28
28
  from .visualizations_cuda import (
29
29
  draw_neural_web,
30
30
  display_visualizations_for_learner,
@@ -82,10 +82,10 @@ def fit(
82
82
  return normalization(weight, dtype=dtype)
83
83
 
84
84
 
85
- def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
86
- neural_web_history=False, show_current_activations=False, auto_normalization=False,
87
- neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
88
- interval=33.33, target_acc=None, target_loss=None,
85
+ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1, pop_size=None,
86
+ neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
87
+ neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
88
+ interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
89
89
  start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
90
90
  """
91
91
  Optimizes the activation functions for a neural network by leveraging train data to find
@@ -103,29 +103,37 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
103
103
 
104
104
  optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat_cuda (and) optimizer = lambda *args, **kwargs: planeat_cuda.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
105
105
  ```python
106
- genetic_optimizer = lambda *args, **kwargs: planeat_cuda.evolver(*args,
107
- activation_add_prob=0.85,
106
+ optimizer = lambda *args, **kwargs: planeat_cuda.evolver(*args,
107
+ activation_add_prob=0.05,
108
108
  strategy='aggressive',
109
+ policy='more_selective',
109
110
  **kwargs)
110
111
 
111
112
  model = plan_cuda.learner(x_train,
112
113
  y_train,
113
- optimizer=genetic_optimizer,
114
+ optimizer,
114
115
  fit_start=True,
115
- strategy='accuracy',
116
116
  show_history=True,
117
117
  gen=15,
118
118
  batch_size=0.05,
119
119
  interval=16.67)
120
120
  ```
121
+ loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
122
+
123
+ target_acc (float, optional): The target accuracy to stop training early when achieved. Default is None.
124
+
125
+ target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
126
+
121
127
  fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
122
128
 
123
- strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
124
-
125
129
  gen (int, optional): The generation count for genetic optimization.
126
130
 
127
131
  batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
128
132
 
133
+ loss_impact (float, optional): Impact of loss during evolve. [0-1] Default: 0.1
134
+
135
+ acc_impact (float, optional): Impact of accuracy during evolve. [0-1] Default: 0.9
136
+
129
137
  pop_size (int, optional): Population size of each generation. Default: count of activation functions
130
138
 
131
139
  early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
@@ -134,15 +142,11 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
134
142
 
135
143
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
136
144
 
137
- loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
138
-
139
145
  auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
140
146
 
141
147
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
142
148
 
143
149
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
144
-
145
- target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
146
150
 
147
151
  start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
148
152
 
@@ -174,6 +178,8 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
174
178
 
175
179
  if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
176
180
 
181
+ if gen is None: gen = activation_potentiation_len
182
+
177
183
  if memory == 'gpu':
178
184
  x_train = transfer_to_gpu(x_train, dtype=dtype)
179
185
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
@@ -189,7 +195,6 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
189
195
  else:
190
196
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
191
197
 
192
- if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
193
198
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
194
199
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
195
200
 
@@ -198,19 +203,18 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
198
203
 
199
204
  # Initialize variables
200
205
  best_acc = 0
201
- best_f1 = 0
202
- best_recall = 0
203
- best_precision = 0
206
+ best_loss = float('inf')
207
+ best_fitness = float('-inf')
204
208
  best_acc_per_gen_list = []
205
209
  postfix_dict = {}
206
210
  loss_list = []
207
211
  target_pop = []
208
212
 
209
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=78, bar_format=bar_format_learner)
213
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=79, bar_format=bar_format_learner)
210
214
 
211
215
  if fit_start is False or pop_size > activation_potentiation_len:
212
216
  weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
213
-
217
+
214
218
  else:
215
219
  weight_pop = [0] * pop_size
216
220
  act_pop = [0] * pop_size
@@ -219,6 +223,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
219
223
  weight_pop[0] = start_this_W
220
224
  act_pop[0] = start_this_act
221
225
 
226
+ # LEARNING STARTED
222
227
  for i in range(gen):
223
228
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
224
229
  progress.set_postfix(postfix_dict)
@@ -232,7 +237,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
232
237
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
233
238
 
234
239
  x_train_batch = cp.array(x_train_batch, dtype=dtype, copy=False)
235
- y_train_batch = cp.array(y_train_batch, dtype=dtype, copy=False)
240
+ y_train_batch = cp.array(y_train_batch, dtype=y_train.dtype)
236
241
 
237
242
  if fit_start is True and i == 0 and j < activation_potentiation_len:
238
243
  if start_this_act is not None and j == 0:
@@ -241,62 +246,39 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
241
246
  act_pop[j] = activation_potentiation[j]
242
247
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
243
248
  weight_pop[j] = W
244
-
249
+
245
250
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
246
251
  acc = model[get_acc()]
247
-
248
- if strategy == 'accuracy': target_pop.append(acc)
249
-
250
- elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
251
- precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
252
-
253
- if strategy == 'precision':
254
- target_pop.append(precision_score)
255
-
256
- if i == 0 and j == 0:
257
- best_precision = precision_score
258
-
259
- if strategy == 'recall':
260
- target_pop.append(recall_score)
261
-
262
- if i == 0 and j == 0:
263
- best_recall = recall_score
252
+
253
+ if loss == 'categorical_crossentropy':
254
+ train_loss = categorical_crossentropy(y_true_batch=y_train_batch,
255
+ y_pred_batch=model[get_preds_softmax()])
256
+ else:
257
+ train_loss = binary_crossentropy(y_true_batch=y_train_batch,
258
+ y_pred_batch=model[get_preds_softmax()])
264
259
 
265
- if strategy == 'f1':
266
- target_pop.append(f1_score)
267
260
 
268
- if i == 0 and j == 0:
269
- best_f1 = f1_score
261
+ fitness = wals(acc, train_loss, acc_impact, loss_impact)
262
+ target_pop.append(fitness)
270
263
 
271
- if ((strategy == 'accuracy' and acc >= best_acc) or
272
- (strategy == 'f1' and f1_score >= best_f1) or
273
- (strategy == 'precision' and precision_score >= best_precision) or
274
- (strategy == 'recall' and recall_score >= best_recall)):
264
+ if fitness >= best_fitness:
275
265
 
266
+ best_fitness = fitness
276
267
  best_acc = acc
277
- best_weights = cp.copy(weight_pop[j])
278
- final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
279
-
268
+ best_loss = train_loss
269
+ best_weight = cp.copy(weight_pop[j])
280
270
  best_model = model
281
271
 
272
+ final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
282
273
  final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
283
274
 
284
275
  if batch_size == 1:
285
276
  postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 4)
277
+ postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
286
278
  progress.set_postfix(postfix_dict)
287
279
 
288
280
  if show_current_activations:
289
281
  print(f", Current Activations={final_activations}", end='')
290
-
291
- if loss == 'categorical_crossentropy':
292
- train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
293
- else:
294
- train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
295
-
296
- if batch_size == 1:
297
- postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
298
- best_loss = train_loss
299
- progress.set_postfix(postfix_dict)
300
282
 
301
283
  # Update visualizations during training
302
284
  if show_history:
@@ -306,7 +288,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
306
288
 
307
289
  if neurons_history:
308
290
  viz_objects['neurons']['artists'] = (
309
- update_neuron_history_for_learner(cp.copy(best_weights), viz_objects['neurons']['ax'],
291
+ update_neuron_history_for_learner(cp.copy(best_weight), viz_objects['neurons']['ax'],
310
292
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
311
293
  y_train[0], viz_objects['neurons']['artists'],
312
294
  data=data, fig1=viz_objects['neurons']['fig'],
@@ -314,7 +296,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
314
296
  )
315
297
 
316
298
  if neural_web_history:
317
- art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
299
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
318
300
  G=viz_objects['web']['G'], return_objs=True)
319
301
  art5_list = [art5_1] + [art5_2] + list(art5_3.values())
320
302
  viz_objects['web']['artists'].append(art5_list)
@@ -322,9 +304,8 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
322
304
  # Check target accuracy
323
305
  if target_acc is not None and best_acc >= target_acc:
324
306
  progress.close()
325
- train_model = evaluate(x_train, y_train, W=best_weights,
307
+ train_model = evaluate(x_train, y_train, W=best_weight,
326
308
  activation_potentiation=final_activations)
327
-
328
309
  if loss == 'categorical_crossentropy':
329
310
  train_loss = categorical_crossentropy(y_true_batch=y_train,
330
311
  y_pred_batch=train_model[get_preds_softmax()])
@@ -335,16 +316,15 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
335
316
  print('\nActivations: ', final_activations)
336
317
  print(f'Train Accuracy:', train_model[get_acc()])
337
318
  print(f'Train Loss: ', train_loss, '\n')
338
-
339
- # Display final visualizations
340
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
341
- train_loss, y_train, interval)
342
- return best_weights, best_model[get_preds()], best_acc, final_activations
319
+
320
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
321
+ best_loss, y_train, interval)
322
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
343
323
 
344
324
  # Check target loss
345
325
  if target_loss is not None and best_loss <= target_loss:
346
326
  progress.close()
347
- train_model = evaluate(x_train, y_train, W=best_weights,
327
+ train_model = evaluate(x_train, y_train, W=best_weight,
348
328
  activation_potentiation=final_activations)
349
329
 
350
330
  if loss == 'categorical_crossentropy':
@@ -355,28 +335,31 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
355
335
  y_pred_batch=train_model[get_preds_softmax()])
356
336
 
357
337
  print('\nActivations: ', final_activations)
358
- print(f'Train Accuracy: ', train_model[get_acc()])
359
- print(f'Train Loss: ', train_loss, '\n')
338
+ print(f'Train Accuracy :', train_model[get_acc()])
339
+ print(f'Train Loss : ', train_loss, '\n')
360
340
 
361
341
  # Display final visualizations
362
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
342
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
363
343
  train_loss, y_train, interval)
364
- return best_weights, best_model[get_preds()], best_acc, final_activations
344
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
345
+
365
346
 
366
347
  progress.update(1)
367
348
 
368
349
  if batch_size != 1:
369
- train_model = evaluate(x_train, y_train, best_weights, final_activations)
370
-
350
+ train_model = evaluate(x_train, y_train, best_weight, final_activations)
351
+
371
352
  if loss == 'categorical_crossentropy':
372
- train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train, dtype=y_train.dtype), y_pred_batch=train_model[get_preds_softmax()])
353
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
354
+ y_pred_batch=train_model[get_preds_softmax()])
373
355
  else:
374
- train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train, dtype=y_train.dtype), y_pred_batch=train_model[get_preds_softmax()])
375
-
356
+ train_loss = binary_crossentropy(y_true_batch=y_train,
357
+ y_pred_batch=train_model[get_preds_softmax()])
358
+
376
359
  postfix_dict[f"{data} Accuracy"] = cp.round(train_model[get_acc()], 4)
377
360
  postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
378
361
  progress.set_postfix(postfix_dict)
379
-
362
+
380
363
  best_acc_per_gen_list.append(train_model[get_acc()])
381
364
  loss_list.append(train_loss)
382
365
 
@@ -391,42 +374,44 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
391
374
  if early_stop == True and i > 0:
392
375
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
393
376
  progress.close()
394
- train_model = evaluate(x_train, y_train, W=best_weights,
377
+ train_model = evaluate(x_train, y_train, W=best_weight,
395
378
  activation_potentiation=final_activations)
379
+
380
+ if loss == 'categorical_crossentropy':
381
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
382
+ y_pred_batch=train_model[get_preds_softmax()])
383
+ else:
384
+ train_loss = binary_crossentropy(y_true_batch=y_train,
385
+ y_pred_batch=train_model[get_preds_softmax()])
396
386
 
397
- if loss == 'categorical_crossentropy':
398
- train_loss = categorical_crossentropy(y_true_batch=y_train,
399
- y_pred_batch=train_model[get_preds_softmax()])
400
- else:
401
- train_loss = binary_crossentropy(y_true_batch=y_train,
402
- y_pred_batch=train_model[get_preds_softmax()])
403
-
404
- print('\nActivations: ', final_activations)
405
- print(f'Train Accuracy:', train_model[get_acc()])
406
- print(f'Train Loss: ', train_loss, '\n')
387
+ print('\nActivations: ', final_activations)
388
+ print(f'Train Accuracy:', train_model[get_acc()])
389
+ print(f'Train Loss: ', train_loss, '\n')
407
390
 
408
- # Display final visualizations
409
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
410
- train_loss, y_train, interval)
411
- return best_weights, best_model[get_preds()], best_acc, final_activations
391
+ # Display final visualizations
392
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
393
+ train_loss, y_train, interval)
394
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
412
395
 
413
396
  # Final evaluation
414
397
  progress.close()
415
- train_model = evaluate(x_train, y_train, W=best_weights,
398
+ train_model = evaluate(x_train, y_train, W=best_weight,
416
399
  activation_potentiation=final_activations)
417
400
 
418
401
  if loss == 'categorical_crossentropy':
419
- train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
402
+ train_loss = categorical_crossentropy(y_true_batch=y_train,
403
+ y_pred_batch=train_model[get_preds_softmax()])
420
404
  else:
421
- train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
422
-
405
+ train_loss = binary_crossentropy(y_true_batch=y_train,
406
+ y_pred_batch=train_model[get_preds_softmax()])
407
+
423
408
  print('\nActivations: ', final_activations)
424
- print(f'Train Accuracy: ', train_model[get_acc()])
425
- print(f'Train Loss : ', train_loss, '\n')
409
+ print(f'Train Accuracy:', train_model[get_acc()])
410
+ print(f'Train Loss: ', train_loss, '\n')
426
411
 
427
412
  # Display final visualizations
428
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
429
- return best_weights, best_model[get_preds()], best_acc, final_activations
413
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc, train_loss, y_train, interval)
414
+ return best_weight, best_model[get_preds_softmax()], best_acc, final_activations
430
415
 
431
416
  def evaluate(
432
417
  x_test,
@@ -439,9 +424,9 @@ def evaluate(
439
424
 
440
425
  Args:
441
426
  x_test (cp.ndarray): Test data.
442
-
427
+
443
428
  y_test (cp.ndarray): Test labels (one-hot encoded).
444
-
429
+
445
430
  W (cp.ndarray): Neural net weight matrix.
446
431
 
447
432
  activation_potentiation (list): Activation list. Default = ['linear'].
@@ -451,9 +436,12 @@ def evaluate(
451
436
  """
452
437
 
453
438
  x_test = apply_activation(x_test, activation_potentiation)
439
+
454
440
  result = x_test @ W.T
455
- epsilon = 1e-10
456
- accuracy = (cp.argmax(result, axis=1) == cp.argmax(y_test, axis=1)).mean()
457
- softmax_preds = cp.exp(result) / (cp.sum(cp.exp(result), axis=1, keepdims=True) + epsilon)
458
441
 
459
- return W, None, accuracy, None, None, softmax_preds
442
+ max_vals = cp.max(result, axis=1, keepdims=True)
443
+
444
+ softmax_preds = cp.exp(result - max_vals) / cp.sum(cp.exp(result - max_vals), axis=1, keepdims=True)
445
+ accuracy = (cp.argmax(softmax_preds, axis=1) == cp.argmax(y_test, axis=1)).mean()
446
+
447
+ return W, result, accuracy, None, None, softmax_preds
pyerualjetwork/planeat.py CHANGED
@@ -3,12 +3,12 @@ MAIN MODULE FOR PLANEAT
3
3
 
4
4
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
5
5
 
6
- ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
6
+ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
7
7
 
8
8
  @author: Hasan Can Beydili
9
9
  @YouTube: https://www.youtube.com/@HasanCanBeydili
10
10
  @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
11
- @Instagram: https://www.instagram.com/canbeydili.06/
11
+ @Instagram: https://www.instagram.com/canbeydilj
12
12
  @contact: tchasancan@gmail.com
13
13
  """
14
14
 
@@ -560,11 +560,11 @@ def cross_over(first_parent_W,
560
560
  selection_bias = random.uniform(0, 1)
561
561
 
562
562
  if fitness_bias > selection_bias:
563
- row_cut_start = math.floor(row_cut_start * (succes + epsilon))
564
- row_cut_end = math.ceil(row_cut_end * (succes + epsilon))
563
+ row_cut_start = math.floor(row_cut_start * succes)
564
+ row_cut_end = math.ceil(row_cut_end * succes)
565
565
 
566
- col_cut_start = math.floor(col_cut_start * (succes + epsilon))
567
- col_cut_end = math.ceil(col_cut_end * (succes + epsilon))
566
+ col_cut_start = math.floor(col_cut_start * succes)
567
+ col_cut_end = math.ceil(col_cut_end * succes)
568
568
 
569
569
  child_W = dominant_parent_W
570
570
 
@@ -590,9 +590,10 @@ def cross_over(first_parent_W,
590
590
  random_undominant_activation = undominant_parent_act[random_index]
591
591
 
592
592
  child_act.append(random_undominant_activation)
593
-
593
+ new_threshold += threshold
594
+
594
595
  if len(dominant_parent_act) > new_threshold:
595
- new_threshold += threshold
596
+ pass
596
597
 
597
598
  else:
598
599
  break
@@ -612,9 +613,10 @@ def cross_over(first_parent_W,
612
613
  random_undominant_activation = undominant_parent_act[random_index_undominant]
613
614
 
614
615
  child_act[random_index_dominant] = random_undominant_activation
615
-
616
+ new_threshold += threshold
617
+
616
618
  if len(dominant_parent_act) > new_threshold:
617
- new_threshold += threshold
619
+ pass
618
620
 
619
621
  else:
620
622
  break
@@ -690,7 +692,7 @@ def mutation(weight,
690
692
 
691
693
  max_threshold = row_end * col_end
692
694
 
693
- threshold = weight_mutate_threshold * genome_fitness
695
+ threshold = weight_mutate_threshold * (genome_fitness + epsilon)
694
696
  new_threshold = threshold
695
697
 
696
698
  for _ in range(max_threshold):
@@ -3,12 +3,12 @@ MAIN MODULE FOR PLANEAT
3
3
 
4
4
  Examples: https://github.com/HCB06/PyerualJetwork/tree/main/Welcome_to_PyerualJetwork/ExampleCodes
5
5
 
6
- ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
6
+ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/ANAPLAN_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
7
7
 
8
8
  @author: Hasan Can Beydili
9
9
  @YouTube: https://www.youtube.com/@HasanCanBeydili
10
10
  @Linkedin: https://www.linkedin.com/in/hasan-can-beydili-77a1b9270/
11
- @Instagram: https://www.instagram.com/canbeydili.06/
11
+ @Instagram: https://www.instagram.com/canbeydilj
12
12
  @contact: tchasancan@gmail.com
13
13
  """
14
14
 
@@ -694,7 +694,7 @@ def mutation(weight,
694
694
 
695
695
  max_threshold = row_end * col_end
696
696
 
697
- threshold = weight_mutate_threshold * genome_fitness
697
+ threshold = weight_mutate_threshold * (genome_fitness + epsilon)
698
698
  new_threshold = threshold
699
699
 
700
700
  for _ in range(max_threshold):
@@ -771,7 +771,7 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
771
771
  translated_x_train += draw_activations(x, activation)
772
772
 
773
773
  art3 = hist['ax'][2].plot(x, translated_x_train, color='b', markersize=6, linewidth=2)
774
- hist['ax'][2].set_title('Potentiation Shape Over Gen')
774
+ hist['ax'][2].set_title('Activation Shape Over Gen')
775
775
  hist['artist3'].append(art3)
776
776
 
777
777
  def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.9.dev2
3
+ Version: 4.3.10
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -43,6 +43,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
43
43
  'tqdm==4.66.4',
44
44
  'pandas==2.2.2',
45
45
  'networkx==3.3',
46
+ 'seaborn==0.13.2',
46
47
  'numpy==1.26.4',
47
48
  'matplotlib==3.9.0',
48
49
  'colorama==0.4.6',
@@ -50,7 +51,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
50
51
  'psutil==6.1.1'
51
52
  ]
52
53
 
53
- matplotlib, networkx (optional).
54
+ matplotlib, networkx, seaborn (optional).
54
55
 
55
56
  ##############################
56
57
 
@@ -1,24 +1,25 @@
1
- pyerualjetwork/__init__.py,sha256=9eOTCzpFVUX9DY_9CL8Cw_Z88H6-tD0Jnw8a-gKKiWg,643
1
+ pyerualjetwork/__init__.py,sha256=hic_KmaifwtlpUkR05uf02m0h2bZVKUYUvTa4cWuP2U,1280
2
2
  pyerualjetwork/activation_functions.py,sha256=bKf00lsuuLJNO-4vVp4OqBi4zJ-qZ8L3v-vl52notkY,7721
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
4
4
  pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
5
5
  pyerualjetwork/data_operations_cuda.py,sha256=ZcjmLXE1-HVwedextYdJZ1rgrns1OfSekzFpr1a9m6o,17625
6
- pyerualjetwork/fitness_functions.py,sha256=0tj1Ri8x8PXR9cuZF_PaUF3mjgsys0IRAEKklipKxZA,4064
6
+ pyerualjetwork/fitness_functions.py,sha256=9ZtCasBVjk3qQldi2pIzV6c5hTBJJyGCU0eVhJsoxPo,1145
7
7
  pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
8
+ pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
9
  pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
10
  pyerualjetwork/memory_operations.py,sha256=I7QiZ--xSyRkFF0wcckPwZV7K9emEvyx5aJ3DiRHZFI,13468
10
11
  pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
12
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
13
  pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
13
14
  pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
14
- pyerualjetwork/plan.py,sha256=vps6EHyHoFWBlp3g5BF3ygD4vIkFvvRsmB7xS0IJ70E,20452
15
- pyerualjetwork/plan_cuda.py,sha256=DCKWStOZhBq-NYKA2BQTKe3YgvDYuGtujtIr56ZBjiw,24477
16
- pyerualjetwork/planeat.py,sha256=OxwSjfSFPwh7kVrhnuAkr8Lrk73GB-Wk2ajUFIfZcbQ,37556
17
- pyerualjetwork/planeat_cuda.py,sha256=aTdBmhJeIKC58pssODtqVsdOtJP7W6TRmVyGHp7k_CM,37612
15
+ pyerualjetwork/plan.py,sha256=M8KVSXUjmSkf4bK7FnizLZjWQfI_3suHIJ-XminM2SM,22291
16
+ pyerualjetwork/plan_cuda.py,sha256=hRPPw4YsiVpfoXkNN1le1zaVHJuSySvihiFBt2eHT7c,23388
17
+ pyerualjetwork/planeat.py,sha256=FBoYirDE8R0zGtB-EFsHdQ50RijC7rnZHqDIiXx67rs,37571
18
+ pyerualjetwork/planeat_cuda.py,sha256=5suVlOfHcThhGbPlHbxAqbp5-rpbw4E2H6trvDvrmg4,37627
18
19
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
- pyerualjetwork/visualizations.py,sha256=08O5uEewuYiovZRX1uHWEHjn19LcnhndWYvqVN74xs0,28290
20
+ pyerualjetwork/visualizations.py,sha256=t1BqnFUH5jKiPdFMI2kWjFg6-amrBV0wvW05aD77NQs,28288
20
21
  pyerualjetwork/visualizations_cuda.py,sha256=PYRqj4QYUbuYMYcNwO8yaTPB-jK7E6kZHhTrAi0lwPU,28749
21
- pyerualjetwork-4.3.9.dev2.dist-info/METADATA,sha256=Z0nThYRtlJKsScG6ZRZYdPcBPU_GfIcxtE8oz1nFVRI,7479
22
- pyerualjetwork-4.3.9.dev2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.3.9.dev2.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.3.9.dev2.dist-info/RECORD,,
22
+ pyerualjetwork-4.3.10.dist-info/METADATA,sha256=rmH-W0wUOGbJUEZEp_CrCukqz3VT2pSbxl0xxCKJcO8,7506
23
+ pyerualjetwork-4.3.10.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
+ pyerualjetwork-4.3.10.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
+ pyerualjetwork-4.3.10.dist-info/RECORD,,