pyerualjetwork 4.3.9.dev2__py3-none-any.whl → 4.3.10b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.9dev2"
1
+ __version__ = "4.3.10b0"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -13,56 +13,51 @@ def diversity_score(population):
13
13
  """
14
14
  if len(population) < 2:
15
15
  return 0
16
-
16
+
17
+ population = np.nan_to_num(population, nan=0.0)
18
+
17
19
  distances = pdist(population, metric='euclidean')
18
-
19
- if np.isnan(distances).any() or np.any(distances == 0):
20
- distances = np.maximum(distances, 1e-10)
20
+ distances = np.maximum(distances, 1e-10)
21
+
21
22
  avg_distance = np.mean(distances)
22
-
23
23
  if population.shape[1] == 0:
24
24
  return 0
25
-
25
+
26
26
  max_possible_distance = np.sqrt(population.shape[1])
27
-
28
- if max_possible_distance == 0:
29
- max_possible_distance = 1e-10
30
-
31
- diversity = avg_distance / max_possible_distance
27
+ max_possible_distance = max(max_possible_distance, 1e-10)
32
28
 
29
+ diversity = avg_distance / max_possible_distance
33
30
  return diversity
34
31
 
35
-
36
-
37
- def hybrid_accuracy_confidence(y_true, y_pred, diversity_score, accuracy, alpha=2, beta=1.5, lambda_div=0.05):
32
+ def multi_head_fitness(y_true, y_pred, diversity_score, accuracy, alpha=2, beta=1.5, lambda_div=0.05):
38
33
  """
39
34
  The function calculates a fitness score based on accuracy, margin loss, and diversity score using
40
35
  specified parameters.
41
36
 
42
37
  @param y_true The `y_true` parameter represents the true labels of the data points. It is an array
43
38
  or list containing the actual labels of the data points.
44
- @param y_pred The `y_pred` parameter in the `hybrid_accuracy_confidence` function represents the
39
+ @param y_pred The `y_pred` parameter in the `multi_head_fitness` function represents the
45
40
  predicted values for a given dataset. It is a NumPy array containing the predicted values for each
46
41
  sample in the dataset.
47
- @param diversity_score The `diversity_score` parameter in the `hybrid_accuracy_confidence` function
42
+ @param diversity_score The `diversity_score` parameter in the `multi_head_fitness` function
48
43
  represents a measure of diversity in the predictions. It is used as a factor in calculating the
49
44
  fitness of the model. The function combines accuracy, margin loss, and diversity score to evaluate
50
45
  the overall performance of the model
51
46
  @param accuracy Accuracy is a measure of the correct predictions made by a model. It is typically
52
47
  calculated as the number of correct predictions divided by the total number of predictions. In the
53
- context of the `hybrid_accuracy_confidence` function, the accuracy parameter represents the accuracy
48
+ context of the `multi_head_fitness` function, the accuracy parameter represents the accuracy
54
49
  of the model's predictions.
55
50
  @param alpha Alpha is a parameter that controls the impact of accuracy on the overall fitness score
56
- in the hybrid_accuracy_confidence function. It is used as an exponent to raise the accuracy value
51
+ in the multi_head_fitness function. It is used as an exponent to raise the accuracy value
57
52
  to, influencing its contribution to the fitness calculation.
58
- @param beta The `beta` parameter in the `hybrid_accuracy_confidence` function is used as an exponent
53
+ @param beta The `beta` parameter in the `multi_head_fitness` function is used as an exponent
59
54
  in the fitness calculation formula. It controls the impact of the margin loss term on the overall
60
55
  fitness value. A higher value of `beta` will amplify the effect of the margin loss term, making it
61
- @param lambda_div The `lambda_div` parameter in the `hybrid_accuracy_confidence` function represents
56
+ @param lambda_div The `lambda_div` parameter in the `multi_head_fitness` function represents
62
57
  the weight given to the diversity score in calculating the fitness value. It is a hyperparameter
63
58
  that controls the impact of diversity score on the overall fitness calculation. A higher value of
64
59
  `lambda_div` will increase the importance
65
- @return The function `hybrid_accuracy_confidence` returns the fitness value calculated based on the
60
+ @return The function `multi_head_fitness` returns the fitness value calculated based on the
66
61
  input parameters `y_true`, `y_pred`, `diversity_score`, `accuracy`, and optional parameters `alpha`,
67
62
  `beta`, and `lambda_div`. The fitness value is computed using a formula that combines accuracy,
68
63
  margin loss, and diversity score with specified weights and coefficients.
@@ -70,6 +65,8 @@ def hybrid_accuracy_confidence(y_true, y_pred, diversity_score, accuracy, alpha=
70
65
  incorrect = y_true != y_pred
71
66
  margin_loss = np.abs(y_true[incorrect] - y_pred[incorrect]).mean() if np.any(incorrect) else 0
72
67
 
73
- fitness = (accuracy ** alpha) * ((1 - margin_loss) ** beta) * (1 + lambda_div * diversity_score)
68
+ margin_loss = np.nan_to_num(margin_loss, nan=0.0)
69
+ accuracy = max(accuracy, 1e-10)
74
70
 
71
+ fitness = (accuracy ** alpha) * ((1 - margin_loss) ** beta) * (1 + lambda_div * diversity_score)
75
72
  return fitness
@@ -0,0 +1,85 @@
1
+ import cupy as cp
2
+
3
+ def cupy_pairwise_distances(X):
4
+ """
5
+ Pairwise Euclidean distances
6
+ """
7
+ r = cp.sum(X * X, 1)
8
+ r = r.reshape(-1, 1)
9
+
10
+ distances = cp.maximum(r - 2 * cp.dot(X, X.T) + r.T, 0.0)
11
+
12
+ triu_indices = cp.triu_indices(distances.shape[0], k=1)
13
+ return cp.sqrt(distances[triu_indices])
14
+
15
+
16
+ def diversity_score(population):
17
+ """
18
+ Calculates the diversity score of a population based on the
19
+ Euclidean distances between individuals in the population.
20
+
21
+ :param population: That calculates the diversity score of a population based on the Euclidean distances between
22
+ individuals in the population
23
+ :return: The function returns the diversity score,
24
+ which is a measure of how spread out or diverse the population is in terms of their characteristics.
25
+ """
26
+ if len(population) < 2:
27
+ return 0
28
+
29
+ population = cp.nan_to_num(population, nan=0.0)
30
+
31
+ distances = cupy_pairwise_distances(population)
32
+ distances = cp.maximum(distances, 1e-10)
33
+
34
+ avg_distance = cp.mean(distances)
35
+ if population.shape[1] == 0:
36
+ return 0
37
+
38
+ max_possible_distance = cp.sqrt(population.shape[1])
39
+ max_possible_distance = float(max(max_possible_distance, 1e-10))
40
+
41
+ diversity = float(avg_distance / max_possible_distance)
42
+ return diversity
43
+
44
+
45
+ def multi_head_fitness(y_true, y_pred, diversity_score, accuracy, alpha=2, beta=1.5, lambda_div=0.05):
46
+ """
47
+ The function calculates a fitness score based on accuracy, margin loss, and diversity score using
48
+ specified parameters.
49
+
50
+ @param y_true The `y_true` parameter represents the true labels of the data points. It is an array
51
+ or list containing the actual labels of the data points.
52
+ @param y_pred The `y_pred` parameter in the `multi_head_fitness` function represents the
53
+ predicted values for a given dataset. It is a NumPy array containing the predicted values for each
54
+ sample in the dataset.
55
+ @param diversity_score The `diversity_score` parameter in the `multi_head_fitness` function
56
+ represents a measure of diversity in the predictions. It is used as a factor in calculating the
57
+ fitness of the model. The function combines accuracy, margin loss, and diversity score to evaluate
58
+ the overall performance of the model
59
+ @param accuracy Accuracy is a measure of the correct predictions made by a model. It is typically
60
+ calculated as the number of correct predictions divided by the total number of predictions. In the
61
+ context of the `multi_head_fitness` function, the accuracy parameter represents the accuracy
62
+ of the model's predictions.
63
+ @param alpha Alpha is a parameter that controls the impact of accuracy on the overall fitness score
64
+ in the multi_head_fitness function. It is used as an exponent to raise the accuracy value
65
+ to, influencing its contribution to the fitness calculation.
66
+ @param beta The `beta` parameter in the `multi_head_fitness` function is used as an exponent
67
+ in the fitness calculation formula. It controls the impact of the margin loss term on the overall
68
+ fitness value. A higher value of `beta` will amplify the effect of the margin loss term, making it
69
+ @param lambda_div The `lambda_div` parameter in the `multi_head_fitness` function represents
70
+ the weight given to the diversity score in calculating the fitness value. It is a hyperparameter
71
+ that controls the impact of diversity score on the overall fitness calculation. A higher value of
72
+ `lambda_div` will increase the importance
73
+ @return The function `multi_head_fitness` returns the fitness value calculated based on the
74
+ icput parameters `y_true`, `y_pred`, `diversity_score`, `accuracy`, and optional parameters `alpha`,
75
+ `beta`, and `lambda_div`. The fitness value is computed using a formula that combines accuracy,
76
+ margin loss, and diversity score with specified weights and coefficients.
77
+ """
78
+ incorrect = y_true != y_pred
79
+ margin_loss = cp.abs(y_true[incorrect] - y_pred[incorrect]).mean() if cp.any(incorrect) else 0
80
+
81
+ margin_loss = cp.nan_to_num(margin_loss, nan=0.0)
82
+ accuracy = max(accuracy, 1e-10)
83
+
84
+ fitness = (accuracy ** alpha) * ((1 - margin_loss) ** beta) * (1 + lambda_div * diversity_score)
85
+ return fitness
pyerualjetwork/plan.py CHANGED
@@ -21,7 +21,6 @@ import numpy as np
21
21
  from .ui import loading_bars, initialize_loading_bar
22
22
  from .data_operations import normalization, batcher
23
23
  from .activation_functions import apply_activation, all_activations
24
- from .metrics import metrics
25
24
  from .model_operations import get_acc, get_preds, get_preds_softmax
26
25
  from .memory_operations import optimize_labels
27
26
  from .visualizations import (
@@ -171,8 +170,8 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
171
170
 
172
171
  """
173
172
 
174
- from .fitness_functions import diversity_score
175
- from .planeat import define_genomes
173
+ from fitness_functions import diversity_score
174
+ from planeat import define_genomes
176
175
 
177
176
  data = 'Train'
178
177
 
@@ -206,10 +205,11 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
206
205
  fit_list = []
207
206
  target_pop = []
208
207
 
209
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=78, bar_format=bar_format_learner)
208
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=74, bar_format=bar_format_learner)
210
209
 
211
210
  if fit_start is False or pop_size > activation_potentiation_len:
212
211
  weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
212
+ div_score = diversity_score(weight_pop)
213
213
 
214
214
  else:
215
215
  weight_pop = [0] * pop_size
@@ -238,12 +238,12 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
238
238
  act_pop[j] = activation_potentiation[j]
239
239
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
240
240
  weight_pop[j] = W
241
+ div_score = diversity_score(weight_pop)
241
242
 
242
243
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
243
244
  acc = model[get_acc()]
244
245
 
245
- div_score = diversity_score(W)
246
- fit_score = fitness(y_train_batch, model[get_preds_softmax()], div_score, acc)
246
+ fit_score = fitness(y_train_batch, model[get_preds()], div_score[j], acc)
247
247
 
248
248
  target_pop.append(fit_score)
249
249
 
@@ -251,7 +251,8 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
251
251
 
252
252
  best_acc = acc
253
253
  best_fit = fit_score
254
- best_weights = np.copy(weight_pop[j])
254
+ best_div_score = div_score[j]
255
+ best_weight = np.copy(weight_pop[j])
255
256
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
256
257
 
257
258
  best_model = model
@@ -266,7 +267,7 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
266
267
  print(f", Current Activations={final_activations}", end='')
267
268
 
268
269
  if batch_size == 1:
269
- postfix_dict[f"{data} Fitness"] = np.round(fit_score, 4)
270
+ postfix_dict["Fitness"] = np.round(fit_score, 4)
270
271
  progress.set_postfix(postfix_dict)
271
272
  best_fit = fit_score
272
273
 
@@ -278,7 +279,7 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
278
279
 
279
280
  if neurons_history:
280
281
  viz_objects['neurons']['artists'] = (
281
- update_neuron_history_for_learner(np.copy(best_weights), viz_objects['neurons']['ax'],
282
+ update_neuron_history_for_learner(np.copy(best_weight), viz_objects['neurons']['ax'],
282
283
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
283
284
  y_train[0], viz_objects['neurons']['artists'],
284
285
  data=data, fig1=viz_objects['neurons']['fig'],
@@ -286,7 +287,7 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
286
287
  )
287
288
 
288
289
  if neural_web_history:
289
- art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
290
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
290
291
  G=viz_objects['web']['G'], return_objs=True)
291
292
  art5_list = [art5_1] + [art5_2] + list(art5_3.values())
292
293
  viz_objects['web']['artists'].append(art5_list)
@@ -294,38 +295,36 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
294
295
  # Check target accuracy
295
296
  if target_acc is not None and best_acc >= target_acc:
296
297
  progress.close()
297
- train_model = evaluate(x_train, y_train, W=best_weights,
298
+ train_model = evaluate(x_train, y_train, W=best_weight,
298
299
  activation_potentiation=final_activations)
299
300
 
300
- div_score = diversity_score(W)
301
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
301
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
302
302
 
303
303
  print('\nActivations: ', final_activations)
304
304
  print(f'Train Accuracy:', train_model[get_acc()])
305
305
  print(f'Fitness Value: ', fit_score, '\n')
306
306
 
307
307
  postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
308
- postfix_dict[f"{data} Fitness"] = np.round(best_fit, 4)
308
+ postfix_dict["Fitness"] = np.round(best_fit, 4)
309
309
  progress.set_postfix(postfix_dict)
310
310
  # Display final visualizations
311
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
311
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
312
312
  fit_score, y_train, interval)
313
- return best_weights, best_model[get_preds()], best_acc, final_activations
313
+ return best_weight, best_model[get_preds()], best_acc, final_activations
314
314
 
315
315
  progress.update(1)
316
316
 
317
317
  if batch_size != 1:
318
- train_model = evaluate(x_train, y_train, best_weights, final_activations)
319
-
320
- div_score = diversity_score(W)
321
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
318
+ train_model = evaluate(x_train, y_train, best_weight, final_activations)
319
+
320
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
322
321
 
323
322
  print('\nActivations: ', final_activations)
324
323
  print(f'Train Accuracy:', train_model[get_acc()])
325
324
  print(f'Fitness Value: ', fit_score, '\n')
326
-
325
+
327
326
  postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
328
- postfix_dict[f"{data} Fitness"] = np.round(best_fit, 4)
327
+ postfix_dict["Fitness"] = np.round(best_fit, 4)
329
328
  progress.set_postfix(postfix_dict)
330
329
 
331
330
  best_acc_per_gen_list.append(train_model[get_acc()])
@@ -336,42 +335,41 @@ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batc
336
335
  fit_list.append(best_fit)
337
336
 
338
337
  weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
338
+ div_score = diversity_score(weight_pop)
339
339
  target_pop = []
340
340
 
341
341
  # Early stopping check
342
342
  if early_stop == True and i > 0:
343
343
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
344
344
  progress.close()
345
- train_model = evaluate(x_train, y_train, W=best_weights,
345
+ train_model = evaluate(x_train, y_train, W=best_weight,
346
346
  activation_potentiation=final_activations)
347
-
348
- div_score = diversity_score(W)
349
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
347
+
348
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
350
349
 
351
350
  print('\nActivations: ', final_activations)
352
351
  print(f'Train Accuracy:', train_model[get_acc()])
353
352
  print(f'Fitness Value: ', fit_score, '\n')
354
353
 
355
354
  # Display final visualizations
356
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
355
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
357
356
  fit_score, y_train, interval)
358
- return best_weights, best_model[get_preds()], best_acc, final_activations
357
+ return best_weight, best_model[get_preds()], best_acc, final_activations
359
358
 
360
359
  # Final evaluation
361
360
  progress.close()
362
- train_model = evaluate(x_train, y_train, W=best_weights,
361
+ train_model = evaluate(x_train, y_train, W=best_weight,
363
362
  activation_potentiation=final_activations)
364
363
 
365
- div_score = diversity_score(W)
366
- fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
364
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
367
365
 
368
366
  print('\nActivations: ', final_activations)
369
367
  print(f'Train Accuracy:', train_model[get_acc()])
370
368
  print(f'Fitness Value: ', fit_score, '\n')
371
369
 
372
370
  # Display final visualizations
373
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, fit_score, y_train, interval)
374
- return best_weights, best_model[get_preds()], best_acc, final_activations
371
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc, fit_score, y_train, interval)
372
+ return best_weight, best_model[get_preds()], best_acc, final_activations
375
373
 
376
374
 
377
375
  def evaluate(
@@ -20,9 +20,7 @@ import cupy as cp
20
20
  ### LIBRARY IMPORTS ###
21
21
  from .ui import loading_bars, initialize_loading_bar
22
22
  from .data_operations_cuda import normalization
23
- from .loss_functions_cuda import binary_crossentropy, categorical_crossentropy
24
23
  from .activation_functions_cuda import apply_activation, all_activations
25
- from .metrics_cuda import metrics
26
24
  from .model_operations_cuda import get_acc, get_preds, get_preds_softmax
27
25
  from .memory_operations import transfer_to_gpu, transfer_to_cpu, optimize_labels
28
26
  from .visualizations_cuda import (
@@ -82,10 +80,10 @@ def fit(
82
80
  return normalization(weight, dtype=dtype)
83
81
 
84
82
 
85
- def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
83
+ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batch_size=1, pop_size=None,
86
84
  neural_web_history=False, show_current_activations=False, auto_normalization=False,
87
- neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
88
- interval=33.33, target_acc=None, target_loss=None,
85
+ neurons_history=False, early_stop=False, show_history=False,
86
+ interval=33.33, target_acc=None,
89
87
  start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
90
88
  """
91
89
  Optimizes the activation functions for a neural network by leveraging train data to find
@@ -118,10 +116,25 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
118
116
  batch_size=0.05,
119
117
  interval=16.67)
120
118
  ```
121
- fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
122
119
 
123
- strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
120
+ fitness (function): Fitness function. We have 'multi_head_fitness' function in the fitness_functions module. Use this: from pyerualjetwork import fitness_functions (and) fitness_function = lambda *args, **kwargs: fitness_functions.multi_head_fitness(*args, 'here give your fitness function hyperparameters for example: alpha=2, beta=1.5, lambda_div=0.05', **kwargs) Example:
121
+ ```python
122
+ fitness = lambda *args, **kwargs: fitness_functions.multi_head_fitness(*args, alpha=2, beta=1.5, lambda_div=0.05, **kwargs)
123
+
124
+ model = plan.learner(x_train,
125
+ y_train,
126
+ optimizer,
127
+ fitness
128
+ fit_start=True,
129
+ strategy='accuracy',
130
+ show_history=True,
131
+ gen=15,
132
+ batch_size=0.05,
133
+ interval=16.67)
134
+ ```
124
135
 
136
+ fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
137
+
125
138
  gen (int, optional): The generation count for genetic optimization.
126
139
 
127
140
  batch_size (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire test set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents 8% of the train set. Default is 1. (%100 of train)
@@ -134,15 +147,11 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
134
147
 
135
148
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
136
149
 
137
- loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
138
-
139
150
  auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
140
151
 
141
152
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
142
153
 
143
154
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
144
-
145
- target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
146
155
 
147
156
  start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
148
157
 
@@ -161,6 +170,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
161
170
 
162
171
  """
163
172
 
173
+ from .fitness_functions_cuda import diversity_score
164
174
  from .planeat_cuda import define_genomes
165
175
 
166
176
  data = 'Train'
@@ -189,7 +199,6 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
189
199
  else:
190
200
  raise ValueError("memory parameter must be 'cpu' or 'gpu'.")
191
201
 
192
- if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
193
202
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
194
203
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
195
204
 
@@ -198,18 +207,17 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
198
207
 
199
208
  # Initialize variables
200
209
  best_acc = 0
201
- best_f1 = 0
202
- best_recall = 0
203
- best_precision = 0
210
+ best_fit = 0
204
211
  best_acc_per_gen_list = []
205
212
  postfix_dict = {}
206
- loss_list = []
213
+ fit_list = []
207
214
  target_pop = []
208
215
 
209
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=78, bar_format=bar_format_learner)
216
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=76, bar_format=bar_format_learner)
210
217
 
211
218
  if fit_start is False or pop_size > activation_potentiation_len:
212
219
  weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
220
+ div_score = diversity_score(weight_pop)
213
221
 
214
222
  else:
215
223
  weight_pop = [0] * pop_size
@@ -241,40 +249,21 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
241
249
  act_pop[j] = activation_potentiation[j]
242
250
  W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
243
251
  weight_pop[j] = W
244
-
252
+ div_score = diversity_score(weight_pop)
253
+
245
254
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
246
255
  acc = model[get_acc()]
247
-
248
- if strategy == 'accuracy': target_pop.append(acc)
249
-
250
- elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
251
- precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
252
-
253
- if strategy == 'precision':
254
- target_pop.append(precision_score)
255
-
256
- if i == 0 and j == 0:
257
- best_precision = precision_score
258
-
259
- if strategy == 'recall':
260
- target_pop.append(recall_score)
261
-
262
- if i == 0 and j == 0:
263
- best_recall = recall_score
256
+
257
+ fit_score = fitness(y_train_batch, model[get_preds()], div_score[j], acc)
264
258
 
265
- if strategy == 'f1':
266
- target_pop.append(f1_score)
259
+ target_pop.append(fit_score)
267
260
 
268
- if i == 0 and j == 0:
269
- best_f1 = f1_score
270
-
271
- if ((strategy == 'accuracy' and acc >= best_acc) or
272
- (strategy == 'f1' and f1_score >= best_f1) or
273
- (strategy == 'precision' and precision_score >= best_precision) or
274
- (strategy == 'recall' and recall_score >= best_recall)):
261
+ if fit_score >= best_fit:
275
262
 
276
263
  best_acc = acc
277
- best_weights = cp.copy(weight_pop[j])
264
+ best_fit = fit_score
265
+ best_div_score = div_score[j]
266
+ best_weight = cp.copy(weight_pop[j])
278
267
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
279
268
 
280
269
  best_model = model
@@ -287,34 +276,29 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
287
276
 
288
277
  if show_current_activations:
289
278
  print(f", Current Activations={final_activations}", end='')
290
-
291
- if loss == 'categorical_crossentropy':
292
- train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
293
- else:
294
- train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train_batch, dtype=y_train_batch.dtype), y_pred_batch=model[get_preds_softmax()])
295
279
 
296
280
  if batch_size == 1:
297
- postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
298
- best_loss = train_loss
281
+ postfix_dict["Fitness"] = cp.round(fit_score, 4)
299
282
  progress.set_postfix(postfix_dict)
283
+ best_fit = fit_score
300
284
 
301
285
  # Update visualizations during training
302
286
  if show_history:
303
287
  gen_list = range(1, len(best_acc_per_gen_list) + 2)
304
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
288
+ update_history_plots_for_learner(viz_objects, gen_list, fit_list + [fit_score],
305
289
  best_acc_per_gen_list + [best_acc], x_train, final_activations)
306
290
 
307
291
  if neurons_history:
308
292
  viz_objects['neurons']['artists'] = (
309
- update_neuron_history_for_learner(cp.copy(best_weights), viz_objects['neurons']['ax'],
293
+ update_neuron_history_for_learner(cp.copy(best_weight), viz_objects['neurons']['ax'],
310
294
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
311
295
  y_train[0], viz_objects['neurons']['artists'],
312
296
  data=data, fig1=viz_objects['neurons']['fig'],
313
- acc=best_acc, loss=train_loss)
297
+ acc=best_acc, loss=fit_score)
314
298
  )
315
299
 
316
300
  if neural_web_history:
317
- art5_1, art5_2, art5_3 = draw_neural_web(W=best_weights, ax=viz_objects['web']['ax'],
301
+ art5_1, art5_2, art5_3 = draw_neural_web(W=best_weight, ax=viz_objects['web']['ax'],
318
302
  G=viz_objects['web']['G'], return_objs=True)
319
303
  art5_list = [art5_1] + [art5_2] + list(art5_3.values())
320
304
  viz_objects['web']['artists'].append(art5_list)
@@ -322,111 +306,77 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
322
306
  # Check target accuracy
323
307
  if target_acc is not None and best_acc >= target_acc:
324
308
  progress.close()
325
- train_model = evaluate(x_train, y_train, W=best_weights,
309
+ train_model = evaluate(x_train, y_train, W=best_weight,
326
310
  activation_potentiation=final_activations)
327
311
 
328
- if loss == 'categorical_crossentropy':
329
- train_loss = categorical_crossentropy(y_true_batch=y_train,
330
- y_pred_batch=train_model[get_preds_softmax()])
331
- else:
332
- train_loss = binary_crossentropy(y_true_batch=y_train,
333
- y_pred_batch=train_model[get_preds_softmax()])
312
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
334
313
 
335
314
  print('\nActivations: ', final_activations)
336
315
  print(f'Train Accuracy:', train_model[get_acc()])
337
- print(f'Train Loss: ', train_loss, '\n')
338
-
339
- # Display final visualizations
340
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
341
- train_loss, y_train, interval)
342
- return best_weights, best_model[get_preds()], best_acc, final_activations
343
-
344
- # Check target loss
345
- if target_loss is not None and best_loss <= target_loss:
346
- progress.close()
347
- train_model = evaluate(x_train, y_train, W=best_weights,
348
- activation_potentiation=final_activations)
349
-
350
- if loss == 'categorical_crossentropy':
351
- train_loss = categorical_crossentropy(y_true_batch=y_train,
352
- y_pred_batch=train_model[get_preds_softmax()])
353
- else:
354
- train_loss = binary_crossentropy(y_true_batch=y_train,
355
- y_pred_batch=train_model[get_preds_softmax()])
356
-
357
- print('\nActivations: ', final_activations)
358
- print(f'Train Accuracy: ', train_model[get_acc()])
359
- print(f'Train Loss: ', train_loss, '\n')
360
-
361
- # Display final visualizations
362
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
363
- train_loss, y_train, interval)
364
- return best_weights, best_model[get_preds()], best_acc, final_activations
316
+ print(f'Fitness Value: ', fit_score, '\n')
317
+
318
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
319
+ fit_score, y_train, interval)
320
+ return best_weight, best_model[get_preds()], best_acc, final_activations
365
321
 
366
322
  progress.update(1)
367
323
 
368
324
  if batch_size != 1:
369
- train_model = evaluate(x_train, y_train, best_weights, final_activations)
370
-
371
- if loss == 'categorical_crossentropy':
372
- train_loss = categorical_crossentropy(y_true_batch=transfer_to_gpu(y_train, dtype=y_train.dtype), y_pred_batch=train_model[get_preds_softmax()])
373
- else:
374
- train_loss = binary_crossentropy(y_true_batch=transfer_to_gpu(y_train, dtype=y_train.dtype), y_pred_batch=train_model[get_preds_softmax()])
325
+ train_model = evaluate(x_train, y_train, best_weight, final_activations)
375
326
 
327
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
328
+
329
+ print('\nActivations: ', final_activations)
330
+ print(f'Train Accuracy:', train_model[get_acc()])
331
+ print(f'Fitness Value: ', fit_score, '\n')
332
+
376
333
  postfix_dict[f"{data} Accuracy"] = cp.round(train_model[get_acc()], 4)
377
- postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
334
+ postfix_dict["Fitness"] = cp.round(best_fit, 4)
378
335
  progress.set_postfix(postfix_dict)
379
-
336
+
380
337
  best_acc_per_gen_list.append(train_model[get_acc()])
381
- loss_list.append(train_loss)
338
+ fit_list.append(fit_score)
382
339
 
383
340
  else:
384
341
  best_acc_per_gen_list.append(best_acc)
385
- loss_list.append(best_loss)
342
+ fit_list.append(best_fit)
386
343
 
387
344
  weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
345
+ div_score = diversity_score(weight_pop)
388
346
  target_pop = []
389
347
 
390
348
  # Early stopping check
391
349
  if early_stop == True and i > 0:
392
350
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
393
351
  progress.close()
394
- train_model = evaluate(x_train, y_train, W=best_weights,
352
+ train_model = evaluate(x_train, y_train, W=best_weight,
395
353
  activation_potentiation=final_activations)
396
354
 
397
- if loss == 'categorical_crossentropy':
398
- train_loss = categorical_crossentropy(y_true_batch=y_train,
399
- y_pred_batch=train_model[get_preds_softmax()])
400
- else:
401
- train_loss = binary_crossentropy(y_true_batch=y_train,
402
- y_pred_batch=train_model[get_preds_softmax()])
355
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
403
356
 
404
357
  print('\nActivations: ', final_activations)
405
358
  print(f'Train Accuracy:', train_model[get_acc()])
406
- print(f'Train Loss: ', train_loss, '\n')
359
+ print(f'Fitness Value: ', fit_score, '\n')
407
360
 
408
361
  # Display final visualizations
409
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
410
- train_loss, y_train, interval)
411
- return best_weights, best_model[get_preds()], best_acc, final_activations
362
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc,
363
+ fit_score, y_train, interval)
364
+ return best_weight, best_model[get_preds()], best_acc, final_activations
412
365
 
413
366
  # Final evaluation
414
367
  progress.close()
415
- train_model = evaluate(x_train, y_train, W=best_weights,
368
+ train_model = evaluate(x_train, y_train, W=best_weight,
416
369
  activation_potentiation=final_activations)
417
370
 
418
- if loss == 'categorical_crossentropy':
419
- train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
420
- else:
421
- train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
371
+ fit_score = fitness(y_train, train_model[get_preds()], best_div_score, train_model[get_acc()])
422
372
 
423
373
  print('\nActivations: ', final_activations)
424
- print(f'Train Accuracy: ', train_model[get_acc()])
425
- print(f'Train Loss : ', train_loss, '\n')
374
+ print(f'Train Accuracy:', train_model[get_acc()])
375
+ print(f'Fitness Value: ', fit_score, '\n')
426
376
 
427
377
  # Display final visualizations
428
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
429
- return best_weights, best_model[get_preds()], best_acc, final_activations
378
+ display_visualizations_for_learner(viz_objects, best_weight, data, best_acc, fit_score, y_train, interval)
379
+ return best_weight, best_model[get_preds()], best_acc, final_activations
430
380
 
431
381
  def evaluate(
432
382
  x_test,
@@ -456,4 +406,4 @@ def evaluate(
456
406
  accuracy = (cp.argmax(result, axis=1) == cp.argmax(y_test, axis=1)).mean()
457
407
  softmax_preds = cp.exp(result) / (cp.sum(cp.exp(result), axis=1, keepdims=True) + epsilon)
458
408
 
459
- return W, None, accuracy, None, None, softmax_preds
409
+ return W, result, accuracy, None, None, softmax_preds
pyerualjetwork/planeat.py CHANGED
@@ -560,11 +560,11 @@ def cross_over(first_parent_W,
560
560
  selection_bias = random.uniform(0, 1)
561
561
 
562
562
  if fitness_bias > selection_bias:
563
- row_cut_start = math.floor(row_cut_start * (succes + epsilon))
564
- row_cut_end = math.ceil(row_cut_end * (succes + epsilon))
563
+ row_cut_start = math.floor(row_cut_start * succes)
564
+ row_cut_end = math.ceil(row_cut_end * succes)
565
565
 
566
- col_cut_start = math.floor(col_cut_start * (succes + epsilon))
567
- col_cut_end = math.ceil(col_cut_end * (succes + epsilon))
566
+ col_cut_start = math.floor(col_cut_start * succes)
567
+ col_cut_end = math.ceil(col_cut_end * succes)
568
568
 
569
569
  child_W = dominant_parent_W
570
570
 
@@ -590,9 +590,10 @@ def cross_over(first_parent_W,
590
590
  random_undominant_activation = undominant_parent_act[random_index]
591
591
 
592
592
  child_act.append(random_undominant_activation)
593
-
593
+ new_threshold += threshold
594
+
594
595
  if len(dominant_parent_act) > new_threshold:
595
- new_threshold += threshold
596
+ pass
596
597
 
597
598
  else:
598
599
  break
@@ -612,9 +613,10 @@ def cross_over(first_parent_W,
612
613
  random_undominant_activation = undominant_parent_act[random_index_undominant]
613
614
 
614
615
  child_act[random_index_dominant] = random_undominant_activation
615
-
616
+ new_threshold += threshold
617
+
616
618
  if len(dominant_parent_act) > new_threshold:
617
- new_threshold += threshold
619
+ pass
618
620
 
619
621
  else:
620
622
  break
@@ -690,7 +692,7 @@ def mutation(weight,
690
692
 
691
693
  max_threshold = row_end * col_end
692
694
 
693
- threshold = weight_mutate_threshold * genome_fitness
695
+ threshold = weight_mutate_threshold * (genome_fitness + epsilon)
694
696
  new_threshold = threshold
695
697
 
696
698
  for _ in range(max_threshold):
@@ -694,7 +694,7 @@ def mutation(weight,
694
694
 
695
695
  max_threshold = row_end * col_end
696
696
 
697
- threshold = weight_mutate_threshold * genome_fitness
697
+ threshold = weight_mutate_threshold * (genome_fitness + epsilon)
698
698
  new_threshold = threshold
699
699
 
700
700
  for _ in range(max_threshold):
@@ -754,9 +754,9 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
754
754
 
755
755
  hist = viz_objects['history']
756
756
 
757
- # Loss plot
758
- art1 = hist['ax'][0].plot(depth_list, loss_list, color='r', markersize=6, linewidth=2)
759
- hist['ax'][0].set_title('Train Loss Over Gen')
757
+ # Fitness plot
758
+ art1 = hist['ax'][0].plot(depth_list, loss_list, color='m', markersize=6, linewidth=2)
759
+ hist['ax'][0].set_title('Fitness Score Over Gen')
760
760
  hist['artist1'].append(art1)
761
761
 
762
762
  # Accuracy plot
@@ -771,7 +771,7 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
771
771
  translated_x_train += draw_activations(x, activation)
772
772
 
773
773
  art3 = hist['ax'][2].plot(x, translated_x_train, color='b', markersize=6, linewidth=2)
774
- hist['ax'][2].set_title('Potentiation Shape Over Gen')
774
+ hist['ax'][2].set_title('Activation Shape Over Gen')
775
775
  hist['artist3'].append(art3)
776
776
 
777
777
  def display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, test_loss, y_train, interval):
@@ -748,13 +748,12 @@ def update_history_plots_for_learner(viz_objects, depth_list, loss_list, best_ac
748
748
  for i in range(len(loss_list)):
749
749
  loss_list[i] = loss_list[i].get()
750
750
 
751
- # Loss plot
752
- art1 = hist['ax'][0].plot(depth_list, loss_list, color='r', markersize=6, linewidth=2)
753
- hist['ax'][0].set_title('Train Loss Over Gen')
751
+ # Fitness plot
752
+ art1 = hist['ax'][0].plot(depth_list, loss_list, color='m', markersize=6, linewidth=2)
753
+ hist['ax'][0].set_title('Fitness Score Over Gen')
754
754
  hist['artist1'].append(art1)
755
755
 
756
756
  # Accuracy plot
757
-
758
757
  for i in range(len(best_acc_per_depth_list)):
759
758
  best_acc_per_depth_list[i] = best_acc_per_depth_list[i].get()
760
759
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.9.dev2
3
+ Version: 4.3.10b0
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -43,6 +43,7 @@ YouTube Tutorials: https://www.youtube.com/watch?v=6wMQstZ00is&list=PLNgNWpM7Hbs
43
43
  'tqdm==4.66.4',
44
44
  'pandas==2.2.2',
45
45
  'networkx==3.3',
46
+ 'seaborn==0.13.2',
46
47
  'numpy==1.26.4',
47
48
  'matplotlib==3.9.0',
48
49
  'colorama==0.4.6',
@@ -0,0 +1,24 @@
1
+ pyerualjetwork/__init__.py,sha256=PeTvh1FsdWQz17kIzq4JB4kuMWgHJ4NpNKMHQcOwPAk,642
2
+ pyerualjetwork/activation_functions.py,sha256=bKf00lsuuLJNO-4vVp4OqBi4zJ-qZ8L3v-vl52notkY,7721
3
+ pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
4
+ pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
5
+ pyerualjetwork/data_operations_cuda.py,sha256=ZcjmLXE1-HVwedextYdJZ1rgrns1OfSekzFpr1a9m6o,17625
6
+ pyerualjetwork/fitness_functions.py,sha256=kewx8yMtQFUO-9rjCD3n5z26X2vLeDsb0HPTIdizFF0,4084
7
+ pyerualjetwork/fitness_functions_cuda.py,sha256=ouUh8hhEvEcATBuKg2gOBuqEOyJoygHORC851OUZrO8,4360
8
+ pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
9
+ pyerualjetwork/memory_operations.py,sha256=I7QiZ--xSyRkFF0wcckPwZV7K9emEvyx5aJ3DiRHZFI,13468
10
+ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
+ pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
+ pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
13
+ pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
14
+ pyerualjetwork/plan.py,sha256=DWWrvE9sLxx0xBQ2JbmFmetWuPRC92hmxI0OL6aIEuA,20341
15
+ pyerualjetwork/plan_cuda.py,sha256=mF6ukdUc7uSdAHEiNQAQIEMebFRR7tUXtePq3IqeEpw,20858
16
+ pyerualjetwork/planeat.py,sha256=QyfPW5k0rvVnowtymI3LXLKp39y69M5DIyNBAeKIhRQ,37568
17
+ pyerualjetwork/planeat_cuda.py,sha256=zzsEu612Ren5K8YyvIn00RjYBG9Z6AOikiReGAPrBbg,37624
18
+ pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
+ pyerualjetwork/visualizations.py,sha256=6uY9U4qGOlG4aIhGdAFcTm2Z5m9Hxww4tAAG61_Vqb8,28294
20
+ pyerualjetwork/visualizations_cuda.py,sha256=o6MyZUVz040lmdI2kGlOEU2IoxvyWJPhSOKe9vkLMvY,28753
21
+ pyerualjetwork-4.3.10b0.dist-info/METADATA,sha256=HBNJhe_U2XRfN7QJhiznxsj1Wargl3hQcz4aTkzUC1k,7499
22
+ pyerualjetwork-4.3.10b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.3.10b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.3.10b0.dist-info/RECORD,,
@@ -1,21 +0,0 @@
1
-
2
- import cupy as cp
3
-
4
- def categorical_crossentropy(y_true_batch, y_pred_batch):
5
- epsilon = 1e-7
6
- y_pred_batch = cp.clip(y_pred_batch, epsilon, 1. - epsilon)
7
-
8
- losses = -cp.sum(y_true_batch * cp.log(y_pred_batch), axis=1)
9
-
10
- mean_loss = cp.mean(losses)
11
- return mean_loss
12
-
13
-
14
- def binary_crossentropy(y_true_batch, y_pred_batch):
15
- epsilon = 1e-7
16
- y_pred_batch = cp.clip(y_pred_batch, epsilon, 1. - epsilon)
17
-
18
- losses = -cp.mean(y_true_batch * cp.log(y_pred_batch) + (1 - y_true_batch) * cp.log(1 - y_pred_batch), axis=1)
19
-
20
- mean_loss = cp.mean(losses)
21
- return mean_loss
@@ -1,24 +0,0 @@
1
- pyerualjetwork/__init__.py,sha256=9eOTCzpFVUX9DY_9CL8Cw_Z88H6-tD0Jnw8a-gKKiWg,643
2
- pyerualjetwork/activation_functions.py,sha256=bKf00lsuuLJNO-4vVp4OqBi4zJ-qZ8L3v-vl52notkY,7721
3
- pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
4
- pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
5
- pyerualjetwork/data_operations_cuda.py,sha256=ZcjmLXE1-HVwedextYdJZ1rgrns1OfSekzFpr1a9m6o,17625
6
- pyerualjetwork/fitness_functions.py,sha256=0tj1Ri8x8PXR9cuZF_PaUF3mjgsys0IRAEKklipKxZA,4064
7
- pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
8
- pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
- pyerualjetwork/memory_operations.py,sha256=I7QiZ--xSyRkFF0wcckPwZV7K9emEvyx5aJ3DiRHZFI,13468
10
- pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
- pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
- pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
13
- pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
14
- pyerualjetwork/plan.py,sha256=vps6EHyHoFWBlp3g5BF3ygD4vIkFvvRsmB7xS0IJ70E,20452
15
- pyerualjetwork/plan_cuda.py,sha256=DCKWStOZhBq-NYKA2BQTKe3YgvDYuGtujtIr56ZBjiw,24477
16
- pyerualjetwork/planeat.py,sha256=OxwSjfSFPwh7kVrhnuAkr8Lrk73GB-Wk2ajUFIfZcbQ,37556
17
- pyerualjetwork/planeat_cuda.py,sha256=aTdBmhJeIKC58pssODtqVsdOtJP7W6TRmVyGHp7k_CM,37612
18
- pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
- pyerualjetwork/visualizations.py,sha256=08O5uEewuYiovZRX1uHWEHjn19LcnhndWYvqVN74xs0,28290
20
- pyerualjetwork/visualizations_cuda.py,sha256=PYRqj4QYUbuYMYcNwO8yaTPB-jK7E6kZHhTrAi0lwPU,28749
21
- pyerualjetwork-4.3.9.dev2.dist-info/METADATA,sha256=Z0nThYRtlJKsScG6ZRZYdPcBPU_GfIcxtE8oz1nFVRI,7479
22
- pyerualjetwork-4.3.9.dev2.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.3.9.dev2.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.3.9.dev2.dist-info/RECORD,,