pyerualjetwork 4.3.9b7__py3-none-any.whl → 4.3.9.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.9b7"
1
+ __version__ = "4.3.9dev1"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
@@ -0,0 +1,67 @@
1
+ import numpy as np
2
+ from scipy.spatial.distance import pdist
3
+
4
+ def diversity_score(population):
5
+ """
6
+ Calculates the diversity score of a population based on the
7
+ Euclidean distances between individuals in the population.
8
+
9
+ :param population: That calculates the diversity score of a population based on the Euclidean distances between
10
+ individuals in the population
11
+ :return: The function returns the diversity score,
12
+ which is a measure of how spread out or diverse the population is in terms of their characteristics.
13
+
14
+ """
15
+
16
+ if len(population) < 2:
17
+ return 0
18
+
19
+ distances = pdist(population, metric='euclidean')
20
+
21
+ avg_distance = np.mean(distances)
22
+
23
+ max_possible_distance = np.sqrt(population.shape[1])
24
+ diversity = avg_distance / max_possible_distance
25
+
26
+ return diversity
27
+
28
+
29
+ def hybrid_accuracy_confidence(y_true, y_pred, diversity_score, accuracy, alpha=2, beta=1.5, lambda_div=0.05):
30
+ """
31
+ The function calculates a fitness score based on accuracy, margin loss, and diversity score using
32
+ specified parameters.
33
+
34
+ @param y_true The `y_true` parameter represents the true labels of the data points. It is an array
35
+ or list containing the actual labels of the data points.
36
+ @param y_pred The `y_pred` parameter in the `hybrid_accuracy_confidence` function represents the
37
+ predicted values for a given dataset. It is a NumPy array containing the predicted values for each
38
+ sample in the dataset.
39
+ @param diversity_score The `diversity_score` parameter in the `hybrid_accuracy_confidence` function
40
+ represents a measure of diversity in the predictions. It is used as a factor in calculating the
41
+ fitness of the model. The function combines accuracy, margin loss, and diversity score to evaluate
42
+ the overall performance of the model
43
+ @param accuracy Accuracy is a measure of the correct predictions made by a model. It is typically
44
+ calculated as the number of correct predictions divided by the total number of predictions. In the
45
+ context of the `hybrid_accuracy_confidence` function, the accuracy parameter represents the accuracy
46
+ of the model's predictions.
47
+ @param alpha Alpha is a parameter that controls the impact of accuracy on the overall fitness score
48
+ in the hybrid_accuracy_confidence function. It is used as an exponent to raise the accuracy value
49
+ to, influencing its contribution to the fitness calculation.
50
+ @param beta The `beta` parameter in the `hybrid_accuracy_confidence` function is used as an exponent
51
+ in the fitness calculation formula. It controls the impact of the margin loss term on the overall
52
+ fitness value. A higher value of `beta` will amplify the effect of the margin loss term, making it
53
+ @param lambda_div The `lambda_div` parameter in the `hybrid_accuracy_confidence` function represents
54
+ the weight given to the diversity score in calculating the fitness value. It is a hyperparameter
55
+ that controls the impact of diversity score on the overall fitness calculation. A higher value of
56
+ `lambda_div` will increase the importance
57
+ @return The function `hybrid_accuracy_confidence` returns the fitness value calculated based on the
58
+ input parameters `y_true`, `y_pred`, `diversity_score`, `accuracy`, and optional parameters `alpha`,
59
+ `beta`, and `lambda_div`. The fitness value is computed using a formula that combines accuracy,
60
+ margin loss, and diversity score with specified weights and coefficients.
61
+ """
62
+ incorrect = y_true != y_pred
63
+ margin_loss = np.abs(y_true[incorrect] - y_pred[incorrect]).mean() if np.any(incorrect) else 0
64
+
65
+ fitness = (accuracy ** alpha) * ((1 - margin_loss) ** beta) * (1 + lambda_div * diversity_score)
66
+
67
+ return fitness
pyerualjetwork/plan.py CHANGED
@@ -20,7 +20,6 @@ import numpy as np
20
20
  ### LIBRARY IMPORTS ###
21
21
  from .ui import loading_bars, initialize_loading_bar
22
22
  from .data_operations import normalization, batcher
23
- from .loss_functions import binary_crossentropy, categorical_crossentropy
24
23
  from .activation_functions import apply_activation, all_activations
25
24
  from .metrics import metrics
26
25
  from .model_operations import get_acc, get_preds, get_preds_softmax
@@ -83,10 +82,10 @@ def fit(
83
82
  return normalization(weight, dtype=dtype)
84
83
 
85
84
 
86
- def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
85
+ def learner(x_train, y_train, optimizer, fitness, fit_start=True, gen=None, batch_size=1, pop_size=None,
87
86
  neural_web_history=False, show_current_activations=False, auto_normalization=False,
88
- neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
89
- interval=33.33, target_acc=None, target_loss=None,
87
+ neurons_history=False, early_stop=False, show_history=False,
88
+ interval=33.33, target_acc=None,
90
89
  start_this_act=None, start_this_W=None, dtype=np.float32):
91
90
  """
92
91
  Optimizes the activation functions for a neural network by leveraging train data to find
@@ -104,14 +103,15 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
104
103
 
105
104
  optimizer (function): PLAN optimization technique with hyperparameters. (PLAN using NEAT(PLANEAT) for optimization.) Please use this: from pyerualjetwork import planeat (and) optimizer = lambda *args, **kwargs: planeat.evolve(*args, 'here give your neat hyperparameters for example: activation_add_prob=0.85', **kwargs) Example:
106
105
  ```python
107
- genetic_optimizer = lambda *args, **kwargs: planeat.evolver(*args,
108
- activation_add_prob=0.85,
109
- strategy='aggressive',
110
- **kwargs)
106
+ optimizer = lambda *args, **kwargs: planeat.evolver(*args,
107
+ activation_add_prob=0.85,
108
+ strategy='aggressive',
109
+ **kwargs)
111
110
 
112
111
  model = plan.learner(x_train,
113
112
  y_train,
114
- optimizer=genetic_optimizer,
113
+ optimizer,
114
+ fitness,
115
115
  fit_start=True,
116
116
  strategy='accuracy',
117
117
  show_history=True,
@@ -120,9 +120,23 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
120
120
  interval=16.67)
121
121
  ```
122
122
 
123
- fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
123
+ fitness (function): Fitness function. We have 'hybrid_accuracy_confidence' function in the fitness_functions module. Use this: from pyerualjetwork import fitness_functions (and) fitness_function = lambda *args, **kwargs: fitness_functions.hybrid_accuracy_confidence(*args, 'here give your fitness function hyperparameters for example: alpha=2, beta=1.5, lambda_div=0.05', **kwargs) Example:
124
+ ```python
125
+ fitness = lambda *args, **kwargs: fitness_functions.hybrid_accuracy_confidence(*args, alpha=2, beta=1.5, lambda_div=0.05, **kwargs)
124
126
 
125
- strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
127
+ model = plan.learner(x_train,
128
+ y_train,
129
+ optimizer,
130
+ fitness
131
+ fit_start=True,
132
+ strategy='accuracy',
133
+ show_history=True,
134
+ gen=15,
135
+ batch_size=0.05,
136
+ interval=16.67)
137
+ ```
138
+
139
+ fit_start (bool, optional): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. Default: True
126
140
 
127
141
  gen (int, optional): The generation count for genetic optimization.
128
142
 
@@ -138,14 +152,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
138
152
 
139
153
  auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
140
154
 
141
- loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
142
-
143
155
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
144
156
 
145
157
  target_acc (int, optional): The target accuracy to stop training early when achieved. Default is None.
146
158
 
147
- target_loss (float, optional): The target loss to stop training early when achieved. Default is None.
148
-
149
159
  start_this_act (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
150
160
 
151
161
  start_this_W (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
@@ -161,6 +171,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
161
171
 
162
172
  """
163
173
 
174
+ from .fitness_functions import diversity_score
164
175
  from .planeat import define_genomes
165
176
 
166
177
  data = 'Train'
@@ -181,7 +192,6 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
181
192
  if gen is None:
182
193
  gen = activation_potentiation_len
183
194
 
184
- if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
185
195
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
186
196
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
187
197
 
@@ -190,12 +200,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
190
200
 
191
201
  # Initialize variables
192
202
  best_acc = 0
193
- best_f1 = 0
194
- best_recall = 0
195
- best_precision = 0
203
+ best_fit = 0
196
204
  best_acc_per_gen_list = []
197
205
  postfix_dict = {}
198
- loss_list = []
206
+ fit_list = []
199
207
  target_pop = []
200
208
 
201
209
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=78, bar_format=bar_format_learner)
@@ -234,35 +242,15 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
234
242
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
235
243
  acc = model[get_acc()]
236
244
 
237
- if strategy == 'accuracy': target_pop.append(acc)
238
-
239
- elif strategy == 'f1' or strategy == 'precision' or strategy == 'recall':
240
- precision_score, recall_score, f1_score = metrics(y_train_batch, model[get_preds()])
241
-
242
- if strategy == 'precision':
243
- target_pop.append(precision_score)
244
-
245
- if i == 0 and j == 0:
246
- best_precision = precision_score
247
-
248
- if strategy == 'recall':
249
- target_pop.append(recall_score)
250
-
251
- if i == 0 and j == 0:
252
- best_recall = recall_score
245
+ div_score = diversity_score(W)
246
+ fit_score = fitness(y_train_batch, model[get_preds_softmax()], div_score, acc)
253
247
 
254
- if strategy == 'f1':
255
- target_pop.append(f1_score)
248
+ target_pop.append(fit_score)
256
249
 
257
- if i == 0 and j == 0:
258
- best_f1 = f1_score
259
-
260
- if ((strategy == 'accuracy' and acc >= best_acc) or
261
- (strategy == 'f1' and f1_score >= best_f1) or
262
- (strategy == 'precision' and precision_score >= best_precision) or
263
- (strategy == 'recall' and recall_score >= best_recall)):
250
+ if fit_score >= best_fit:
264
251
 
265
252
  best_acc = acc
253
+ best_fit = fit_score
266
254
  best_weights = np.copy(weight_pop[j])
267
255
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
268
256
 
@@ -277,20 +265,15 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
277
265
  if show_current_activations:
278
266
  print(f", Current Activations={final_activations}", end='')
279
267
 
280
- if loss == 'categorical_crossentropy':
281
- train_loss = categorical_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
282
- else:
283
- train_loss = binary_crossentropy(y_true_batch=y_train_batch, y_pred_batch=model[get_preds_softmax()])
284
-
285
268
  if batch_size == 1:
286
- postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
269
+ postfix_dict[f"{data} Fitness"] = fit_score
287
270
  progress.set_postfix(postfix_dict)
288
- best_loss = train_loss
271
+ best_fit = fit_score
289
272
 
290
273
  # Update visualizations during training
291
274
  if show_history:
292
275
  gen_list = range(1, len(best_acc_per_gen_list) + 2)
293
- update_history_plots_for_learner(viz_objects, gen_list, loss_list + [train_loss],
276
+ update_history_plots_for_learner(viz_objects, gen_list, fit_list + [fit_score],
294
277
  best_acc_per_gen_list + [best_acc], x_train, final_activations)
295
278
 
296
279
  if neurons_history:
@@ -299,7 +282,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
299
282
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
300
283
  y_train[0], viz_objects['neurons']['artists'],
301
284
  data=data, fig1=viz_objects['neurons']['fig'],
302
- acc=best_acc, loss=train_loss)
285
+ acc=best_acc, loss=fit_score)
303
286
  )
304
287
 
305
288
  if neural_web_history:
@@ -314,42 +297,19 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
314
297
  train_model = evaluate(x_train, y_train, W=best_weights,
315
298
  activation_potentiation=final_activations)
316
299
 
317
- if loss == 'categorical_crossentropy':
318
- train_loss = categorical_crossentropy(y_true_batch=y_train,
319
- y_pred_batch=train_model[get_preds_softmax()])
320
- else:
321
- train_loss = binary_crossentropy(y_true_batch=y_train,
322
- y_pred_batch=train_model[get_preds_softmax()])
300
+ div_score = diversity_score(W)
301
+ fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
323
302
 
324
303
  print('\nActivations: ', final_activations)
325
304
  print(f'Train Accuracy:', train_model[get_acc()])
326
- print(f'Train Loss: ', train_loss, '\n')
327
-
328
- # Display final visualizations
329
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
330
- train_loss, y_train, interval)
331
- return best_weights, best_model[get_preds()], best_acc, final_activations
332
-
333
- # Check target loss
334
- if target_loss is not None and best_loss <= target_loss:
335
- progress.close()
336
- train_model = evaluate(x_train, y_train, W=best_weights,
337
- activation_potentiation=final_activations)
338
-
339
- if loss == 'categorical_crossentropy':
340
- train_loss = categorical_crossentropy(y_true_batch=y_train,
341
- y_pred_batch=train_model[get_preds_softmax()])
342
- else:
343
- train_loss = binary_crossentropy(y_true_batch=y_train,
344
- y_pred_batch=train_model[get_preds_softmax()])
345
-
346
- print('\nActivations: ', final_activations)
347
- print(f'Train Accuracy:', train_model[get_acc()])
348
- print(f'Train Loss: ', train_loss, '\n')
349
-
305
+ print(f'Fitness Value: ', fit_score, '\n')
306
+
307
+ postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
308
+ postfix_dict[f"{data} Fitness"] = best_fit
309
+ progress.set_postfix(postfix_dict)
350
310
  # Display final visualizations
351
311
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
352
- train_loss, y_train, interval)
312
+ fit_score, y_train, interval)
353
313
  return best_weights, best_model[get_preds()], best_acc, final_activations
354
314
 
355
315
  progress.update(1)
@@ -357,23 +317,23 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
357
317
  if batch_size != 1:
358
318
  train_model = evaluate(x_train, y_train, best_weights, final_activations)
359
319
 
360
- if loss == 'categorical_crossentropy':
361
- train_loss = categorical_crossentropy(y_true_batch=y_train,
362
- y_pred_batch=train_model[get_preds_softmax()])
363
- else:
364
- train_loss = binary_crossentropy(y_true_batch=y_train,
365
- y_pred_batch=train_model[get_preds_softmax()])
320
+ div_score = diversity_score(W)
321
+ fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
322
+
323
+ print('\nActivations: ', final_activations)
324
+ print(f'Train Accuracy:', train_model[get_acc()])
325
+ print(f'Fitness Value: ', fit_score, '\n')
366
326
 
367
327
  postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
368
- postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
328
+ postfix_dict[f"{data} Fitness"] = best_fit
369
329
  progress.set_postfix(postfix_dict)
370
330
 
371
331
  best_acc_per_gen_list.append(train_model[get_acc()])
372
- loss_list.append(train_loss)
332
+ fit_list.append(fit_score)
373
333
 
374
334
  else:
375
335
  best_acc_per_gen_list.append(best_acc)
376
- loss_list.append(best_loss)
336
+ fit_list.append(best_fit)
377
337
 
378
338
  weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
379
339
  target_pop = []
@@ -385,20 +345,16 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
385
345
  train_model = evaluate(x_train, y_train, W=best_weights,
386
346
  activation_potentiation=final_activations)
387
347
 
388
- if loss == 'categorical_crossentropy':
389
- train_loss = categorical_crossentropy(y_true_batch=y_train,
390
- y_pred_batch=train_model[get_preds_softmax()])
391
- else:
392
- train_loss = binary_crossentropy(y_true_batch=y_train,
393
- y_pred_batch=train_model[get_preds_softmax()])
348
+ div_score = diversity_score(W)
349
+ fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
394
350
 
395
351
  print('\nActivations: ', final_activations)
396
352
  print(f'Train Accuracy:', train_model[get_acc()])
397
- print(f'Train Loss: ', train_loss, '\n')
353
+ print(f'Fitness Value: ', fit_score, '\n')
398
354
 
399
355
  # Display final visualizations
400
356
  display_visualizations_for_learner(viz_objects, best_weights, data, best_acc,
401
- train_loss, y_train, interval)
357
+ fit_score, y_train, interval)
402
358
  return best_weights, best_model[get_preds()], best_acc, final_activations
403
359
 
404
360
  # Final evaluation
@@ -406,17 +362,15 @@ def learner(x_train, y_train, optimizer, fit_start=True, strategy='accuracy', ge
406
362
  train_model = evaluate(x_train, y_train, W=best_weights,
407
363
  activation_potentiation=final_activations)
408
364
 
409
- if loss == 'categorical_crossentropy':
410
- train_loss = categorical_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
411
- else:
412
- train_loss = binary_crossentropy(y_true_batch=y_train, y_pred_batch=train_model[get_preds_softmax()])
365
+ div_score = diversity_score(W)
366
+ fit_score = fitness(y_train, train_model[get_preds_softmax()], div_score, train_model[get_acc()])
413
367
 
414
368
  print('\nActivations: ', final_activations)
415
369
  print(f'Train Accuracy:', train_model[get_acc()])
416
- print(f'Train Loss: ', train_loss, '\n')
370
+ print(f'Fitness Value: ', fit_score, '\n')
417
371
 
418
372
  # Display final visualizations
419
- display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, train_loss, y_train, interval)
373
+ display_visualizations_for_learner(viz_objects, best_weights, data, best_acc, fit_score, y_train, interval)
420
374
  return best_weights, best_model[get_preds()], best_acc, final_activations
421
375
 
422
376
 
@@ -444,6 +398,8 @@ def evaluate(
444
398
 
445
399
  x_test = apply_activation(x_test, activation_potentiation)
446
400
  result = x_test @ W.T
447
- softmax_preds = np.exp(result) / np.sum(np.exp(result), axis=1, keepdims=True); accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
401
+ epsilon = 1e-10
402
+ accuracy = (np.argmax(result, axis=1) == np.argmax(y_test, axis=1)).mean()
403
+ softmax_preds = np.exp(result) / (np.sum(np.exp(result), axis=1, keepdims=True) + epsilon)
448
404
 
449
405
  return W, None, accuracy, None, None, softmax_preds
@@ -452,6 +452,8 @@ def evaluate(
452
452
 
453
453
  x_test = apply_activation(x_test, activation_potentiation)
454
454
  result = x_test @ W.T
455
- softmax_preds = cp.exp(result) / cp.sum(cp.exp(result), axis=1, keepdims=True); accuracy = (cp.argmax(result, axis=1) == cp.argmax(y_test, axis=1)).mean()
455
+ epsilon = 1e-10
456
+ accuracy = (cp.argmax(result, axis=1) == cp.argmax(y_test, axis=1)).mean()
457
+ softmax_preds = cp.exp(result) / (cp.sum(cp.exp(result), axis=1, keepdims=True) + epsilon)
456
458
 
457
459
  return W, None, accuracy, None, None, softmax_preds
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.9b7
3
+ Version: 4.3.9.dev1
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,24 +1,24 @@
1
- pyerualjetwork/__init__.py,sha256=9pV3ZfPPqMatL9LwixrmgVVhQ_rTYeIyrTAAutNbiuw,641
1
+ pyerualjetwork/__init__.py,sha256=TkG_18QqarnCsZ99v_XYRuWoDYGIh2dQErq5tUwJtNU,643
2
2
  pyerualjetwork/activation_functions.py,sha256=bKf00lsuuLJNO-4vVp4OqBi4zJ-qZ8L3v-vl52notkY,7721
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
4
4
  pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
5
5
  pyerualjetwork/data_operations_cuda.py,sha256=ZcjmLXE1-HVwedextYdJZ1rgrns1OfSekzFpr1a9m6o,17625
6
+ pyerualjetwork/fitness_functions.py,sha256=EkBCRWr70Y1yG0bc4styDUDkVxLwJ0wjirLKPh74-9U,3835
6
7
  pyerualjetwork/help.py,sha256=nQ_YbYA2RtuafhuvkreNpX0WWL1I_nzlelwCtvei0_Y,775
7
- pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
8
  pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
9
  pyerualjetwork/memory_operations.py,sha256=I7QiZ--xSyRkFF0wcckPwZV7K9emEvyx5aJ3DiRHZFI,13468
10
10
  pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
11
11
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
12
  pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
13
13
  pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
14
- pyerualjetwork/plan.py,sha256=mcVxwBus_GwfYjWLMwx8KDc25uJT8l773l-mHCfa0Xk,23558
15
- pyerualjetwork/plan_cuda.py,sha256=8uEyYdQQX122Hcc-XfFoPSiCeLADt-y-cGX3AuRYPt0,24440
14
+ pyerualjetwork/plan.py,sha256=p8OX4Qo3vxtKU74eO68I61ZHrKBju1p8sk4Pv6x8e2w,20413
15
+ pyerualjetwork/plan_cuda.py,sha256=DCKWStOZhBq-NYKA2BQTKe3YgvDYuGtujtIr56ZBjiw,24477
16
16
  pyerualjetwork/planeat.py,sha256=OxwSjfSFPwh7kVrhnuAkr8Lrk73GB-Wk2ajUFIfZcbQ,37556
17
17
  pyerualjetwork/planeat_cuda.py,sha256=aTdBmhJeIKC58pssODtqVsdOtJP7W6TRmVyGHp7k_CM,37612
18
18
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
19
  pyerualjetwork/visualizations.py,sha256=08O5uEewuYiovZRX1uHWEHjn19LcnhndWYvqVN74xs0,28290
20
20
  pyerualjetwork/visualizations_cuda.py,sha256=PYRqj4QYUbuYMYcNwO8yaTPB-jK7E6kZHhTrAi0lwPU,28749
21
- pyerualjetwork-4.3.9b7.dist-info/METADATA,sha256=LaPTIsNcmnCw7sl_Q5PrEizpPa6aX236xs62-xnBOYw,7476
22
- pyerualjetwork-4.3.9b7.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.3.9b7.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.3.9b7.dist-info/RECORD,,
21
+ pyerualjetwork-4.3.9.dev1.dist-info/METADATA,sha256=q2sjP-KKIkuv4idsaFXFY1MUu041vHy2jpvIoFmoLI0,7479
22
+ pyerualjetwork-4.3.9.dev1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.3.9.dev1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.3.9.dev1.dist-info/RECORD,,
@@ -1,21 +0,0 @@
1
-
2
- import numpy as np
3
-
4
- def categorical_crossentropy(y_true_batch, y_pred_batch):
5
- epsilon = 1e-7
6
- y_pred_batch = np.clip(y_pred_batch, epsilon, 1. - epsilon)
7
-
8
- losses = -np.sum(y_true_batch * np.log(y_pred_batch), axis=1)
9
-
10
- mean_loss = np.mean(losses)
11
- return mean_loss
12
-
13
-
14
- def binary_crossentropy(y_true_batch, y_pred_batch):
15
- epsilon = 1e-7
16
- y_pred_batch = np.clip(y_pred_batch, epsilon, 1. - epsilon)
17
-
18
- losses = -np.mean(y_true_batch * np.log(y_pred_batch) + (1 - y_true_batch) * np.log(1 - y_pred_batch), axis=1)
19
-
20
- mean_loss = np.mean(losses)
21
- return mean_loss