pyerualjetwork 4.7.2b0__py3-none-any.whl → 4.8b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.7.2b0"
1
+ __version__ = "4.8b0"
2
2
  __update__ = """* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES
3
3
  * PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main
4
4
  * PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf
@@ -314,10 +314,10 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=np.float32):
314
314
  W = model[get_weights()]
315
315
  model_type = model[get_model_type()]
316
316
 
317
- if isinstance(activation_potentiations, str):
318
- activation_potentiations = [activation_potentiations]
319
- elif isinstance(activation_potentiations, list):
320
- activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
317
+ if isinstance(activation_potentiation, str):
318
+ activation_potentiation = [activation_potentiation]
319
+ elif isinstance(activation_potentiation, list):
320
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
321
321
 
322
322
  Input = standard_scaler(None, Input, scaler_params)
323
323
 
@@ -398,10 +398,10 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
398
398
 
399
399
  Input = standard_scaler(None, Input, scaler_params)
400
400
 
401
- if isinstance(activation_potentiations, str):
402
- activation_potentiations = [activation_potentiations]
403
- elif isinstance(activation_potentiations, list):
404
- activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
401
+ if isinstance(activation_potentiation, str):
402
+ activation_potentiation = [activation_potentiation]
403
+ elif isinstance(activation_potentiation, list):
404
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
405
405
 
406
406
  if is_mlp:
407
407
 
@@ -333,10 +333,10 @@ def predict_model_ssd(Input, model_name, model_path='', dtype=cp.float32):
333
333
  W = model[get_weights()]
334
334
  model_type = model[get_model_type()]
335
335
 
336
- if isinstance(activation_potentiations, str):
337
- activation_potentiations = [activation_potentiations]
338
- elif isinstance(activation_potentiations, list):
339
- activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
336
+ if isinstance(activation_potentiation, str):
337
+ activation_potentiation = [activation_potentiation]
338
+ elif isinstance(activation_potentiation, list):
339
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
340
340
 
341
341
  Input = standard_scaler(None, Input, scaler_params)
342
342
 
@@ -419,10 +419,10 @@ def predict_model_ram(Input, W, scaler_params=None, activation_potentiation=['li
419
419
 
420
420
  try:
421
421
 
422
- if isinstance(activation_potentiations, str):
423
- activation_potentiations = [activation_potentiations]
424
- elif isinstance(activation_potentiations, list):
425
- activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
422
+ if isinstance(activation_potentiation, str):
423
+ activation_potentiation = [activation_potentiation]
424
+ elif isinstance(activation_potentiation, list):
425
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
426
426
 
427
427
  Input = standard_scaler(None, Input, scaler_params)
428
428
 
pyerualjetwork/plan.py CHANGED
@@ -16,6 +16,7 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import numpy as np
19
+ import copy
19
20
 
20
21
  ### LIBRARY IMPORTS ###
21
22
  from .ui import loading_bars, initialize_loading_bar
@@ -68,7 +69,7 @@ def fit(
68
69
  """
69
70
 
70
71
  # Pre-check
71
-
72
+
72
73
  if len(x_train) != len(y_train): raise ValueError("x_train and y_train must have the same length.")
73
74
 
74
75
  weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
@@ -86,12 +87,12 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
86
87
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False,
87
88
  neurons_history=False, early_stop=False, show_history=False, target_loss=None,
88
89
  interval=33.33, target_acc=None, loss='categorical_crossentropy', acc_impact=0.9, loss_impact=0.1,
89
- start_this_act=None, start_this_W=None, dtype=np.float32):
90
+ start_this_act=None, start_this_W=None, neurons=None, hidden=0, activation_functions=None, dtype=np.float32):
90
91
  """
91
92
  Optimizes the activation functions for a neural network by leveraging train data to find
92
- the most accurate combination of activation potentiation for the given dataset using genetic algorithm NEAT (Neuroevolution of Augmenting Topologies). But modifided for PLAN version. Created by me: PLANEAT.
93
+ the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
93
94
 
94
- Why genetic optimization and not backpropagation?
95
+ Why genetic optimization ENE(Eugenic NeuroEvolution) and not backpropagation?
95
96
  Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
96
97
  Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
97
98
 
@@ -120,10 +121,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
120
121
  :param batch_size: (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
121
122
  :param pop_size: (int, optional): Population size of each generation. Default: count of activation functions
122
123
  :param weight_evolve: (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
123
- :param neural_web_history: (bool, optional): Draws history of neural web. Default is False.
124
+ :param neural_web_history: (bool, optional): Draws history of neural web. Default is False. [ONLY FOR PLAN MODELS]
124
125
  :param show_current_activations: (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
125
126
  :param auto_normalization: (bool, optional): Normalization may solves overflow problem. Default: False
126
- :param neurons_history: (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
127
+ :param neurons_history: (bool, optional): Shows the history of changes that neurons undergo during the ENE optimization process. True or False. Default is False. [ONLY FOR PLAN MODELS]
127
128
  :param early_stop: (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
128
129
  :param show_history: (bool, optional): If True, displays the training history after optimization. Default is False.
129
130
  :param target_loss: (float, optional): The target loss to stop training early when achieved. Default is None.
@@ -134,7 +135,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
134
135
  :param loss_impact: (float, optional): Impact of loss for optimization [0-1]. Default: 0.1
135
136
  :param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
136
137
  :param start_this_W: (numpy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
137
- :param dtype: (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16.
138
+ :param hidden: (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
139
+ :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
140
+ :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
141
+ :param dtype: (numpy.dtype): Data type for the Weight matrices. np.float32 by default. Example: np.float64 or np.float16.
138
142
 
139
143
  Returns:
140
144
  tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
@@ -151,8 +155,6 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
151
155
  # Pre-checks
152
156
 
153
157
  if pop_size is None: pop_size = activation_potentiation_len
154
-
155
- x_train = x_train.astype(dtype, copy=False)
156
158
  y_train = optimize_labels(y_train, cuda=False)
157
159
 
158
160
  if pop_size < activation_potentiation_len: raise ValueError(f"pop_size must be higher or equal to {activation_potentiation_len}")
@@ -162,6 +164,14 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
162
164
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
163
165
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
164
166
 
167
+ if hidden is not 0 and neurons is not None and activation_functions is not None:
168
+ fit_start = False
169
+ weight_evolve = True
170
+ is_mlp = True
171
+ activation_potentiation = activation_functions
172
+ else:
173
+ is_mlp = False
174
+
165
175
  # Initialize visualization components
166
176
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
167
177
 
@@ -177,7 +187,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
177
187
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=77, bar_format=bar_format_learner)
178
188
 
179
189
  if fit_start is False or pop_size > activation_potentiation_len:
180
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
190
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, hidden=hidden, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
181
191
 
182
192
  else:
183
193
  weight_pop = [0] * pop_size
@@ -210,8 +220,8 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
210
220
 
211
221
  if weight_evolve is False:
212
222
  weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
213
-
214
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization)
223
+
224
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
215
225
  acc = model[get_acc()]
216
226
 
217
227
  if loss == 'categorical_crossentropy':
@@ -229,11 +239,11 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
229
239
  best_fitness = fitness
230
240
  best_acc = acc
231
241
  best_loss = train_loss
232
- best_weight = np.copy(weight_pop[j])
242
+ best_weight = np.copy(weight_pop[j]) if is_mlp is False else copy.deepcopy(weight_pop[j])
233
243
  best_model = model
234
244
 
235
245
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
236
- final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
246
+ if is_mlp is False: final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
237
247
 
238
248
  if batch_size == 1:
239
249
  postfix_dict[f"{data} Accuracy"] = np.round(best_acc, 4)
@@ -268,7 +278,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
268
278
  if target_acc is not None and best_acc >= target_acc:
269
279
  progress.close()
270
280
  train_model = evaluate(x_train, y_train, W=best_weight,
271
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
281
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
272
282
  if loss == 'categorical_crossentropy':
273
283
  train_loss = categorical_crossentropy(y_true_batch=y_train,
274
284
  y_pred_batch=train_model[get_preds_softmax()])
@@ -288,7 +298,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
288
298
  if target_loss is not None and best_loss <= target_loss:
289
299
  progress.close()
290
300
  train_model = evaluate(x_train, y_train, W=best_weight,
291
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
301
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
292
302
 
293
303
  if loss == 'categorical_crossentropy':
294
304
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -310,7 +320,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
310
320
  progress.update(1)
311
321
 
312
322
  if batch_size != 1:
313
- train_model = evaluate(x_train, y_train, best_weight, final_activations)
323
+ train_model = evaluate(x_train, y_train, best_weight, final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
314
324
 
315
325
  if loss == 'categorical_crossentropy':
316
326
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -330,7 +340,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
330
340
  best_acc_per_gen_list.append(best_acc)
331
341
  loss_list.append(best_loss)
332
342
 
333
- weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, bar_status=False)
343
+ if is_mlp is False: weight_pop = np.array(weight_pop, copy=False, dtype=dtype)
344
+ else: weight_pop = np.array(weight_pop, copy=False, dtype=object)
345
+
346
+ weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, bar_status=False)
334
347
  target_pop = []
335
348
 
336
349
  # Early stopping check
@@ -338,7 +351,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
338
351
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
339
352
  progress.close()
340
353
  train_model = evaluate(x_train, y_train, W=best_weight,
341
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
354
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
342
355
 
343
356
  if loss == 'categorical_crossentropy':
344
357
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -358,8 +371,9 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
358
371
 
359
372
  # Final evaluation
360
373
  progress.close()
374
+
361
375
  train_model = evaluate(x_train, y_train, W=best_weight,
362
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
376
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
363
377
 
364
378
  if loss == 'categorical_crossentropy':
365
379
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -382,7 +396,8 @@ def evaluate(
382
396
  y_test,
383
397
  W,
384
398
  activation_potentiation=['linear'],
385
- auto_normalization=False
399
+ auto_normalization=False,
400
+ is_mlp=False
386
401
  ) -> tuple:
387
402
  """
388
403
  Evaluates the neural network model using the given test data.
@@ -398,6 +413,8 @@ def evaluate(
398
413
 
399
414
  auto_normalization (bool, optional): Normalization for x_test ? Default = False.
400
415
 
416
+ is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
417
+
401
418
  Returns:
402
419
  tuple: Model (list).
403
420
  """
@@ -406,12 +423,22 @@ def evaluate(
406
423
 
407
424
  if isinstance(activation_potentiation, str):
408
425
  activation_potentiation = [activation_potentiation]
426
+ elif isinstance(activation_potentiation, list):
427
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
428
+
429
+ if is_mlp:
430
+ layer = x_test
431
+ for i in range(len(W)):
432
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activation_potentiation[i])
433
+
434
+ layer = layer @ W[i].T
435
+
436
+ result = layer
437
+
409
438
  else:
410
- activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
411
439
 
412
- x_test = apply_activation(x_test, activation_potentiation)
413
-
414
- result = x_test @ W.T
440
+ x_test = apply_activation(x_test, activation_potentiation)
441
+ result = x_test @ W.T
415
442
 
416
443
  max_vals = np.max(result, axis=1, keepdims=True)
417
444
 
@@ -16,6 +16,8 @@ PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welco
16
16
  """
17
17
 
18
18
  import cupy as cp
19
+ import numpy as np
20
+ import copy
19
21
 
20
22
  ### LIBRARY IMPORTS ###
21
23
  from .ui import loading_bars, initialize_loading_bar
@@ -80,12 +82,12 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
80
82
  weight_evolve=True, neural_web_history=False, show_current_activations=False, auto_normalization=False, target_acc=None,
81
83
  neurons_history=False, early_stop=False, show_history=False, loss='categorical_crossentropy',
82
84
  interval=33.33, target_loss=None, loss_impact=0.1, acc_impact=0.9,
83
- start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
85
+ start_this_act=None, start_this_W=None, neurons=None, hidden=0, activation_functions=None, dtype=cp.float32, memory='gpu'):
84
86
  """
85
87
  Optimizes the activation functions for a neural network by leveraging train data to find
86
- the most accurate combination of activation potentiation for the given dataset.
88
+ the most accurate combination of activation potentiation(or activation function) & weight values for the given dataset.
87
89
 
88
- Why genetic optimization and not backpropagation?
90
+ Why genetic optimization ENE(Eugenic NeuroEvolution) and not backpropagation?
89
91
  Because PLAN is different from other neural network architectures. In PLAN, the learnable parameters are not the weights; instead, the learnable parameters are the activation functions.
90
92
  Since activation functions are not differentiable, we cannot use gradient descent or backpropagation. However, I developed a more powerful genetic optimization algorithm: PLANEAT.
91
93
 
@@ -114,11 +116,11 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
114
116
  :param batch_size: (float, optional): Batch size is used in the prediction process to receive train feedback by dividing the train data into chunks and selecting activations based on randomly chosen partitions. This process reduces computational cost and time while still covering the entire train set due to random selection, so it doesn't significantly impact accuracy. For example, a batch size of 0.08 means each train batch represents %8 of the train set. Default is 1. (%100 of train)
115
117
  :param pop_size: (int, optional): Population size of each generation. Default: count of activation functions
116
118
  :param weight_evolve: (bool, optional): Activation combinations already optimizes by PLANEAT genetic search algorithm. Should the weight parameters also evolve or should the weights be determined according to the aggregating learning principle of the PLAN algorithm? Default: True (Evolves Weights)
117
- :param neural_web_history: (bool, optional): Draws history of neural web. Default is False.
119
+ :param neural_web_history: (bool, optional): Draws history of neural web. Default is False. [ONLY FOR PLAN MODELS]
118
120
  :param show_current_activations: (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
119
121
  :param auto_normalization: (bool, optional): Normalization may solves overflow problem. Default: False
120
122
  :param target_acc: (float, optional): The target accuracy to stop training early when achieved. Default is None.
121
- :param neurons_history: (bool, optional): Shows the history of changes that neurons undergo during the TFL (Test or Train Feedback Learning) stages. True or False. Default is False.
123
+ :param neurons_history: (bool, optional): Shows the history of changes that neurons undergo during the ENE process. True or False. Default is False. [ONLY FOR PLAN MODELS]
122
124
  :param early_stop: (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
123
125
  :param show_history: (bool, optional): If True, displays the training history after optimization. Default is False.
124
126
  :param loss: (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
@@ -128,7 +130,10 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
128
130
  :param acc_impact: (float, optional): Impact of accuracy for optimization [0-1]. Default: 0.9
129
131
  :param start_this_act: (list, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the list of activation functions selected up to the learned portion to this parameter. Default is None
130
132
  :param start_this_W: (cupy.array, optional): To resume a previously canceled or interrupted training from where it left off, or to continue from that point with a different strategy, provide the weight matrix of this genome. Default is None
131
- :param dtype: (cupy.dtype): Data type for the arrays. np.float32 by default. Example: cp.float64 or cp.float16.
133
+ :param hidden: (int, optional): If you dont want train PLAN model this parameter represents a hidden layer count for MLP model. Default: 0 (PLAN)
134
+ :param neurons: (list[int], optional): If you dont want train PLAN model this parameter represents neuron count of each hidden layer for MLP. Default: None (PLAN)
135
+ :param activation_functions: (list[str], optional): If you dont want train PLAN model this parameter represents activation function of each hidden layer for MLP. Default: None (PLAN) NOTE: THIS EFFECTS HIDDEN LAYERS OUTPUT. NOT OUTPUT LAYER!
136
+ :param dtype: (cupy.dtype): Data type for the Weight matrices. np.float32 by default. Example: cp.float64 or cp.float16.
132
137
  :param memory: (str): The memory parameter determines whether the dataset to be processed on the GPU will be stored in the CPU's RAM or the GPU's RAM. Options: 'gpu', 'cpu'. Default: 'gpu'.
133
138
 
134
139
  Returns:
@@ -151,13 +156,13 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
151
156
  if gen is None: gen = activation_potentiation_len
152
157
 
153
158
  if memory == 'gpu':
154
- x_train = transfer_to_gpu(x_train, dtype=dtype)
159
+ x_train = transfer_to_gpu(x_train, dtype=x_train.dtype)
155
160
  y_train = transfer_to_gpu(y_train, dtype=y_train.dtype)
156
161
 
157
162
  from .data_operations_cuda import batcher
158
163
 
159
164
  elif memory == 'cpu':
160
- x_train = transfer_to_cpu(x_train, dtype=dtype)
165
+ x_train = transfer_to_cpu(x_train, dtype=x_train.dtype)
161
166
  y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
162
167
 
163
168
  from .data_operations import batcher
@@ -168,6 +173,14 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
168
173
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
169
174
  if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
170
175
 
176
+ if hidden is not 0 and neurons is not None and activation_functions is not None:
177
+ fit_start = False
178
+ weight_evolve = True
179
+ is_mlp = True
180
+ activation_potentiation = activation_functions
181
+ else:
182
+ is_mlp = False
183
+
171
184
  # Initialize visualization components
172
185
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
173
186
 
@@ -183,7 +196,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
183
196
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=79, bar_format=bar_format_learner)
184
197
 
185
198
  if fit_start is False or pop_size > activation_potentiation_len:
186
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
199
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), neurons=neurons, hidden=hidden, activation_functions=activation_functions, population_size=pop_size, dtype=dtype)
187
200
 
188
201
  else:
189
202
  weight_pop = [0] * pop_size
@@ -206,7 +219,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
206
219
 
207
220
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
208
221
 
209
- x_train_batch = cp.array(x_train_batch, dtype=dtype, copy=False)
222
+ x_train_batch = cp.array(x_train_batch, dtype=x_train_batch.dtype, copy=False)
210
223
  y_train_batch = cp.array(y_train_batch, dtype=y_train.dtype)
211
224
 
212
225
  if fit_start is True and i == 0 and j < activation_potentiation_len:
@@ -220,7 +233,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
220
233
  if weight_evolve is False:
221
234
  weight_pop[j] = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
222
235
 
223
- model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization)
236
+ model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j], auto_normalization=auto_normalization, is_mlp=is_mlp)
224
237
  acc = model[get_acc()]
225
238
 
226
239
  if loss == 'categorical_crossentropy':
@@ -232,18 +245,18 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
232
245
 
233
246
 
234
247
  fitness = wals(acc, train_loss, acc_impact, loss_impact)
235
- target_pop.append(fitness)
248
+ target_pop.append(fitness.get())
236
249
 
237
250
  if fitness >= best_fitness:
238
251
 
239
252
  best_fitness = fitness
240
253
  best_acc = acc
241
254
  best_loss = train_loss
242
- best_weight = cp.copy(weight_pop[j])
255
+ best_weight = cp.copy(weight_pop[j]) if is_mlp is False else copy.deepcopy(weight_pop[j])
243
256
  best_model = model
244
257
 
245
258
  final_activations = act_pop[j].copy() if isinstance(act_pop[j], list) else act_pop[j]
246
- final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
259
+ if is_mlp is False: final_activations = [final_activations[0]] if len(set(final_activations)) == 1 else final_activations # removing if all same
247
260
 
248
261
  if batch_size == 1:
249
262
  postfix_dict[f"{data} Accuracy"] = cp.round(best_acc, 4)
@@ -278,7 +291,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
278
291
  if target_acc is not None and best_acc >= target_acc:
279
292
  progress.close()
280
293
  train_model = evaluate(x_train, y_train, W=best_weight,
281
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
294
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
282
295
  if loss == 'categorical_crossentropy':
283
296
  train_loss = categorical_crossentropy(y_true_batch=y_train,
284
297
  y_pred_batch=train_model[get_preds_softmax()])
@@ -298,7 +311,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
298
311
  if target_loss is not None and best_loss <= target_loss:
299
312
  progress.close()
300
313
  train_model = evaluate(x_train, y_train, W=best_weight,
301
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
314
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
302
315
 
303
316
  if loss == 'categorical_crossentropy':
304
317
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -320,7 +333,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
320
333
  progress.update(1)
321
334
 
322
335
  if batch_size != 1:
323
- train_model = evaluate(x_train, y_train, best_weight, final_activations)
336
+ train_model = evaluate(x_train, y_train, best_weight, final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
324
337
 
325
338
  if loss == 'categorical_crossentropy':
326
339
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -340,7 +353,9 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
340
353
  best_acc_per_gen_list.append(best_acc)
341
354
  loss_list.append(best_loss)
342
355
 
343
- weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), weight_evolve=weight_evolve, bar_status=False)
356
+ if is_mlp is False: weight_pop = cp.array(weight_pop, copy=False, dtype=dtype)
357
+
358
+ weight_pop, act_pop = optimizer(weight_pop, act_pop, i, np.array(target_pop), weight_evolve=weight_evolve, bar_status=False)
344
359
  target_pop = []
345
360
 
346
361
  # Early stopping check
@@ -348,7 +363,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
348
363
  if best_acc_per_gen_list[i] == best_acc_per_gen_list[i-1]:
349
364
  progress.close()
350
365
  train_model = evaluate(x_train, y_train, W=best_weight,
351
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
366
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
352
367
 
353
368
  if loss == 'categorical_crossentropy':
354
369
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -369,7 +384,7 @@ def learner(x_train, y_train, optimizer, fit_start=True, gen=None, batch_size=1,
369
384
  # Final evaluation
370
385
  progress.close()
371
386
  train_model = evaluate(x_train, y_train, W=best_weight,
372
- activation_potentiation=final_activations, auto_normalization=auto_normalization)
387
+ activation_potentiation=final_activations, auto_normalization=auto_normalization, is_mlp=is_mlp)
373
388
 
374
389
  if loss == 'categorical_crossentropy':
375
390
  train_loss = categorical_crossentropy(y_true_batch=y_train,
@@ -391,7 +406,8 @@ def evaluate(
391
406
  y_test,
392
407
  W,
393
408
  activation_potentiation=['linear'],
394
- auto_normalization=False
409
+ auto_normalization=False,
410
+ is_mlp=False
395
411
  ) -> tuple:
396
412
  """
397
413
  Evaluates the neural network model using the given test data.
@@ -407,6 +423,8 @@ def evaluate(
407
423
 
408
424
  auto_normalization (bool, optional): Normalization for x_test ? Default = False.
409
425
 
426
+ is_mlp (bool, optional): Evaluate PLAN model or MLP model ? Default: False (PLAN)
427
+
410
428
  Returns:
411
429
  tuple: Model (list).
412
430
  """
@@ -415,12 +433,22 @@ def evaluate(
415
433
 
416
434
  if isinstance(activation_potentiation, str):
417
435
  activation_potentiation = [activation_potentiation]
436
+ elif isinstance(activation_potentiation, list):
437
+ activation_potentiation = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiation]
438
+
439
+ if is_mlp:
440
+ layer = x_test
441
+ for i in range(len(W)):
442
+ if i != len(W) - 1 and i != 0: layer = apply_activation(layer, activation_potentiation[i])
443
+
444
+ layer = layer @ W[i].T
445
+
446
+ result = layer
447
+
418
448
  else:
419
- activation_potentiation = [item if isinstance(item, list) else [item] for item in activation_potentiation]
420
449
 
421
- x_test = apply_activation(x_test, activation_potentiation)
422
-
423
- result = x_test @ W.T
450
+ x_test = apply_activation(x_test, activation_potentiation)
451
+ result = x_test @ W.T
424
452
 
425
453
  max_vals = cp.max(result, axis=1, keepdims=True)
426
454
 
pyerualjetwork/planeat.py CHANGED
@@ -15,6 +15,7 @@ PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_A
15
15
  import numpy as np
16
16
  import random
17
17
  import math
18
+ import copy
18
19
 
19
20
  ### LIBRARY IMPORTS ###
20
21
  from .data_operations import normalization, non_neg_normalization
@@ -134,6 +135,7 @@ def evolver(weights,
134
135
  weight_mutate_threshold=16,
135
136
  weight_mutate_prob=1,
136
137
  is_mlp=False,
138
+ save_best_genome=False,
137
139
  dtype=np.float32):
138
140
  """
139
141
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
@@ -222,6 +224,8 @@ def evolver(weights,
222
224
 
223
225
  is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
224
226
 
227
+ save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. Default: False
228
+
225
229
  dtype (numpy.dtype, optional): Data type for the arrays. Default: np.float32.
226
230
  Example: np.float64 or np.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
227
231
 
@@ -316,6 +320,8 @@ def evolver(weights,
316
320
 
317
321
  ### FITNESS IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
318
322
 
323
+ previous_best_genome_index = np.argmax(fitness)
324
+
319
325
  sort_indices = np.argsort(fitness)
320
326
 
321
327
  fitness = fitness[sort_indices]
@@ -327,7 +333,7 @@ def evolver(weights,
327
333
 
328
334
  good_weights = weights[slice_center:]
329
335
  bad_weights = weights[:slice_center]
330
- best_weight = np.copy(good_weights[-1])
336
+ best_weight = np.copy(good_weights[-1]) if is_mlp is False else copy.deepcopy(good_weights[-1])
331
337
 
332
338
  good_activations = list(activation_potentiations[slice_center:])
333
339
  bad_activations = list(activation_potentiations[:slice_center])
@@ -354,7 +360,7 @@ def evolver(weights,
354
360
  for i in range(len(bad_weights)):
355
361
 
356
362
  if policy == 'aggressive':
357
- first_parent_W = best_weight
363
+ first_parent_W = np.copy(best_weight)
358
364
  first_parent_act = best_activations
359
365
  first_parent_fitness = best_fitness
360
366
 
@@ -457,9 +463,6 @@ def evolver(weights,
457
463
  )
458
464
 
459
465
  if bar_status: progress.update(1)
460
-
461
- child_W[0] = best_weight
462
- child_act[0] = best_activations
463
466
 
464
467
  if is_mlp:
465
468
  weights = np.vstack((child_W, mutated_W), dtype=object)
@@ -468,7 +471,10 @@ def evolver(weights,
468
471
 
469
472
  activation_potentiations = child_act + mutated_act
470
473
 
471
-
474
+ if save_best_genome:
475
+ weights[0] = best_weight
476
+ activation_potentiations[0] = best_activations
477
+
472
478
  ### INFO PRINTING CONSOLE
473
479
 
474
480
  if show_info == True:
@@ -492,6 +498,7 @@ def evolver(weights,
492
498
  print(" ACTIVATION SELECTION ADD PROB: ", str(activation_selection_add_prob))
493
499
  print(" ACTIVATION SELECTION CHANGE PROB: ", str(activation_selection_change_prob))
494
500
  print(" FITNESS BIAS: ", str(fitness_bias) + '\n')
501
+ print(" SAVE BEST GENOME: ", str(save_best_genome) + '\n')
495
502
 
496
503
 
497
504
  print("*** Performance ***")
@@ -499,9 +506,8 @@ def evolver(weights,
499
506
  print(" MEAN FITNESS: ", str(round(np.mean(fitness), 2)))
500
507
  print(" MIN FITNESS: ", str(round(min(fitness), 2)) + '\n')
501
508
 
502
- print(" BEST GENOME ACTIVATION LENGTH: ", str(len(best_activations)))
503
- print(" BEST GENOME INDEX: ", str(0))
504
- print(" NOTE: The returned genome at the first index is the best of the previous generation." + '\n')
509
+ print(" PREVIOUS BEST GENOME ACTIVATION LENGTH: ", str(len(best_activations)))
510
+ print(" PREVIOUS BEST GENOME INDEX: ", str(previous_best_genome_index))
505
511
 
506
512
  if weight_evolve is False: weights = origin_weights
507
513
 
@@ -536,17 +542,18 @@ def evaluate(Input, weights, activation_potentiations, is_mlp=False):
536
542
  the output for each genome in population.
537
543
  """
538
544
  ### THE OUTPUTS ARE RETURNED, WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
539
-
540
-
545
+
541
546
  if isinstance(activation_potentiations, str):
542
547
  activation_potentiations = [activation_potentiations]
543
548
  elif isinstance(activation_potentiations, list):
544
549
  activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
545
550
 
551
+
546
552
  if is_mlp:
547
553
  layer = Input
548
554
  for i in range(len(weights)):
549
555
  if i != len(weights) - 1 and i != 0: layer = apply_activation(layer, activation_potentiations[i])
556
+
550
557
  layer = layer @ weights[i].T
551
558
 
552
559
  return layer
@@ -733,7 +740,7 @@ def cross_over(first_parent_W,
733
740
 
734
741
  threshold = abs(activation_selection_threshold / succes)
735
742
  new_threshold = threshold
736
-
743
+
737
744
  while True:
738
745
 
739
746
  random_index_undominant = int(random.uniform(0, len(undominant_parent_act)-1))
@@ -16,7 +16,7 @@ import cupy as cp
16
16
  import numpy as np
17
17
  import random
18
18
  import math
19
-
19
+ import copy
20
20
 
21
21
  ### LIBRARY IMPORTS ###
22
22
  from .data_operations_cuda import normalization, non_neg_normalization
@@ -135,6 +135,7 @@ def evolver(weights,
135
135
  weight_mutate_threshold=16,
136
136
  weight_mutate_prob=1,
137
137
  is_mlp=False,
138
+ save_best_genome=False,
138
139
  dtype=cp.float32):
139
140
  """
140
141
  Applies the evolving process of a population of genomes using selection, crossover, mutation, and activation function potentiation.
@@ -223,6 +224,8 @@ def evolver(weights,
223
224
 
224
225
  is_mlp (bool, optional): Evolve PLAN model or MLP model ? Default: False (PLAN)
225
226
 
227
+ save_best_genome (bool, optional): Save the best genome of the previous generation to the next generation. Default: False
228
+
226
229
  dtype (cupy.dtype): Data type for the arrays. Default: cp.float32.
227
230
  Example: cp.float64 or cp.float16 [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not recommended!].
228
231
 
@@ -307,7 +310,7 @@ def evolver(weights,
307
310
  else:
308
311
  raise ValueError("genome population size must be even number. for example: not 99, make 100 or 98.")
309
312
 
310
- if weight_evolve is False: origin_weights = cp.copy(weights)
313
+ if weight_evolve is False: origin_weights = cp.copy(weights) if is_mlp else copy.deepcopy(weights)
311
314
 
312
315
  if is_mlp:
313
316
  activation_mutate_add_prob = 0
@@ -326,6 +329,8 @@ def evolver(weights,
326
329
 
327
330
  ### FITNESS LIST IS SORTED IN ASCENDING ORDER, AND THE WEIGHT AND ACTIVATIONS OF EACH GENOME ARE SORTED ACCORDING TO THIS ORDER:
328
331
 
332
+ previous_best_genome_index = np.argmax(fitness)
333
+
329
334
  sort_indices = cp.argsort(fitness)
330
335
 
331
336
  fitness = fitness[sort_indices]
@@ -471,9 +476,6 @@ def evolver(weights,
471
476
 
472
477
  if bar_status: progress.update(1)
473
478
 
474
- child_W[0] = best_weight
475
- child_act[0] = best_activations
476
-
477
479
  if is_mlp:
478
480
  for i in range(len(child_W)):
479
481
  for j in range(len(child_W[i])):
@@ -487,6 +489,10 @@ def evolver(weights,
487
489
 
488
490
  activation_potentiations = child_act + mutated_act
489
491
 
492
+ if save_best_genome:
493
+ weights[0] = best_weight
494
+ activation_potentiations[0] = best_activations
495
+
490
496
  ### INFO PRINTING CONSOLE
491
497
 
492
498
  if show_info == True:
@@ -510,6 +516,7 @@ def evolver(weights,
510
516
  print(" ACTIVATION SELECTION ADD PROB: ", str(activation_selection_add_prob))
511
517
  print(" ACTIVATION SELECTION CHANGE PROB: ", str(activation_selection_change_prob))
512
518
  print(" FITNESS BIAS: ", str(fitness_bias) + '\n')
519
+ print(" SAVE BEST GENOME: ", str(save_best_genome) + '\n')
513
520
 
514
521
  print("*** Performance ***")
515
522
  print(" MAX FITNESS: ", str(cp.round(max(fitness), 2)))
@@ -517,8 +524,8 @@ def evolver(weights,
517
524
  print(" MIN FITNESS: ", str(cp.round(min(fitness), 2)) + '\n')
518
525
 
519
526
  print(" BEST GENOME ACTIVATION LENGTH: ", str(len(best_activations)))
520
- print(" BEST GENOME INDEX: ", str(0))
521
- print(" NOTE: The returned genome at the first index is the best of the previous generation." + '\n')
527
+ print(" PREVIOUS BEST GENOME ACTIVATION LENGTH: ", str(len(best_activations)))
528
+ print(" PREVIOUS BEST GENOME INDEX: ", str(previous_best_genome_index))
522
529
 
523
530
  if weight_evolve is False: weights = origin_weights
524
531
 
@@ -557,9 +564,9 @@ def evaluate(Input, weights, activation_potentiations, is_mlp=False):
557
564
  ### THE OUTPUTS ARE RETURNED WHERE EACH GENOME'S OUTPUT MATCHES ITS INDEX:
558
565
 
559
566
  if isinstance(activation_potentiations, str):
560
- activation_potentiations = [activation_potentiations]
561
- elif isinstance(activation_potentiations, list):
562
- activation_potentiations = [item if isinstance(item, list) or isinstance(item, str) else [item] for item in activation_potentiations]
567
+ activation_potentiations = [activation_potentiations]
568
+ else:
569
+ activation_potentiations = [item if isinstance(item, list) else [item] for item in activation_potentiations]
563
570
 
564
571
  if is_mlp:
565
572
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.7.2b0
3
+ Version: 4.8b0
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,4 +1,4 @@
1
- pyerualjetwork/__init__.py,sha256=3BBKyB4jthRbtBBj23FCBSgD3qGezCahzQR69jwVBmU,1281
1
+ pyerualjetwork/__init__.py,sha256=JrdL2kDhMrEE3ES_VNRY5lgT7PDsqJDgb0AV0wq_roE,1279
2
2
  pyerualjetwork/activation_functions.py,sha256=X7Kv8qv8oZq8hvTdUiV-GkFjKHRlKIQypRPXh6gdkm4,7614
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=pefklsl9QuSVbKwiUUHeF_ExN0bICH7QIF1MfoMU40Q,7665
4
4
  pyerualjetwork/data_operations.py,sha256=XKYG9-mLa3qKAXUjejuD7V8aJKjpl5PdQwKzPFjpKgs,15437
@@ -10,16 +10,16 @@ pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqd
10
10
  pyerualjetwork/memory_operations.py,sha256=0yCOHcgiNyF4ccMcRlL1Q9F_byG4nzjhmkbpXE_yU6E,13401
11
11
  pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
12
12
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
13
- pyerualjetwork/model_operations.py,sha256=qdOpvlqeK4BJwcQopPj8Ka3WqpARY6G_rm-oQLuKZ9Y,15280
14
- pyerualjetwork/model_operations_cuda.py,sha256=9MZI7X2pudkm4QudrEJH6q3OGHNEAmdfRaOrF2uoZ60,16341
15
- pyerualjetwork/plan.py,sha256=cjVblo8TxTHX-GZPvgQvJZ34nOmzxSvtCrQi9K-Mhog,23268
16
- pyerualjetwork/plan_cuda.py,sha256=ZEU_b_EoA-zPk7Gn94L_XBZz1v4mn8DOUsjTNV6fp8Q,24230
17
- pyerualjetwork/planeat.py,sha256=_AkG-EJj8M7M1fCoXLQpttXud34X6bbaFkaD117UmAc,44717
18
- pyerualjetwork/planeat_cuda.py,sha256=YEpqxwhFYxHBxZNbDFXlI4d-r2S46OalLmKhk86pFBI,45311
13
+ pyerualjetwork/model_operations.py,sha256=kTLhHlu8NSFBTTWDVyMdN7PsDk2emFU9_6JgH6_Mh8k,15268
14
+ pyerualjetwork/model_operations_cuda.py,sha256=bK1SKi_ar7UVL7jwDftSyg4g5wMLsYT1z_ZPqFAsrKo,16329
15
+ pyerualjetwork/plan.py,sha256=0vMiadu-zLuQJMAPrtvIRo9K2N7sIwxQi0SqfVK9aU8,24907
16
+ pyerualjetwork/plan_cuda.py,sha256=9AAaagyjZWo9KZDOnQ5oRVUCq_fu1enWV0lxsD1Iy0E,26013
17
+ pyerualjetwork/planeat.py,sha256=Fi7J_Jwjugmg-pUTooEojFYOYWsLxO2A7tBXts2nbbE,45060
18
+ pyerualjetwork/planeat_cuda.py,sha256=efyu627hxTiTRZzIwvlk-z3tqDuEDsnfJB1dlEHYWs0,45634
19
19
  pyerualjetwork/ui.py,sha256=JBTFYz5R24XwNKhA3GSW-oYAoiIBxAE3kFGXkvm5gqw,656
20
20
  pyerualjetwork/visualizations.py,sha256=utnX9zQhzmtvBJLOLNGm2jecVVk4zHXABQdjb0XzJac,28352
21
21
  pyerualjetwork/visualizations_cuda.py,sha256=gnoaaazZ-nc9E1ImqXrZBRgQ4Rnpi2qh2yGJ2eLKMlE,28807
22
- pyerualjetwork-4.7.2b0.dist-info/METADATA,sha256=h5zXs3STHDR8KQ6dV3VP988NeDHxKwKi_GiDHnYqn-U,7507
23
- pyerualjetwork-4.7.2b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
- pyerualjetwork-4.7.2b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
- pyerualjetwork-4.7.2b0.dist-info/RECORD,,
22
+ pyerualjetwork-4.8b0.dist-info/METADATA,sha256=K1XONEVkRVZrfWyuW2jZndprD2lkfHQ2NKdFP-8CmD0,7505
23
+ pyerualjetwork-4.8b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
24
+ pyerualjetwork-4.8b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
25
+ pyerualjetwork-4.8b0.dist-info/RECORD,,