pyerualjetwork 4.3.9b1__py3-none-any.whl → 4.3.9b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.3.9b1"
1
+ __version__ = "4.3.9b3"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
pyerualjetwork/plan.py CHANGED
@@ -44,7 +44,7 @@ def fit(
44
44
  y_train,
45
45
  activation_potentiation=['linear'],
46
46
  W=None,
47
- normalization=False,
47
+ auto_normalization=False,
48
48
  dtype=np.float32
49
49
  ):
50
50
  """
@@ -60,7 +60,7 @@ def fit(
60
60
 
61
61
  W (numpy.ndarray): If you want to re-continue or update model
62
62
 
63
- normalization (bool, optional): Normalization may solves overflow problem. Default: False
63
+ auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
64
64
 
65
65
  dtype (numpy.dtype): Data type for the arrays. np.float32 by default. Example: np.float64 or np.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
66
66
 
@@ -74,8 +74,8 @@ def fit(
74
74
 
75
75
  weight = np.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
76
76
 
77
- if normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
78
- elif normalization is False: x_train = apply_activation(x_train, activation_potentiation)
77
+ if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
78
+ elif auto_normalization is False: x_train = apply_activation(x_train, activation_potentiation)
79
79
  else: raise ValueError('normalization parameter only be True or False')
80
80
 
81
81
  weight += y_train.T @ x_train
@@ -84,7 +84,7 @@ def fit(
84
84
 
85
85
 
86
86
  def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
87
- neural_web_history=False, show_current_activations=False, normalization=False,
87
+ neural_web_history=False, show_current_activations=False, auto_normalization=False,
88
88
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
89
89
  interval=33.33, target_acc=None, target_loss=None,
90
90
  start_this_act=None, start_this_W=None, dtype=np.float32):
@@ -136,7 +136,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
136
136
 
137
137
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
138
138
 
139
- normalization (bool, optional): Normalization may solves overflow problem. Default: False
139
+ auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
140
140
 
141
141
  loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
142
142
 
@@ -188,12 +188,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
188
188
  # Initialize visualization components
189
189
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
190
190
 
191
- # Initialize progress bar
192
- if batch_size == 1:
193
- ncols = 78
194
- else:
195
- ncols = 49
196
-
197
191
  # Initialize variables
198
192
  best_acc = 0
199
193
  best_f1 = 0
@@ -204,7 +198,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
204
198
  loss_list = []
205
199
  target_pop = []
206
200
 
207
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
201
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=78, bar_format=bar_format_learner)
208
202
 
209
203
  if fit_start is False or pop_size > activation_potentiation_len:
210
204
  weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
@@ -231,7 +225,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
231
225
 
232
226
  if fit_start is True and i == 0 and j < activation_potentiation_len:
233
227
  act_pop[j] = activation_potentiation[j]
234
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], normalization=normalization, dtype=dtype)
228
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
235
229
  weight_pop[j] = W
236
230
 
237
231
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
@@ -357,9 +351,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
357
351
 
358
352
  progress.update(1)
359
353
 
360
- best_acc_per_gen_list.append(best_acc)
361
- loss_list.append(best_loss)
362
-
363
354
  if batch_size != 1:
364
355
  train_model = evaluate(x_train, y_train, best_weights, final_activations)
365
356
 
@@ -373,6 +364,13 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
373
364
  postfix_dict[f"{data} Accuracy"] = np.round(train_model[get_acc()], 4)
374
365
  postfix_dict[f"{data} Loss"] = np.round(train_loss, 4)
375
366
  progress.set_postfix(postfix_dict)
367
+
368
+ best_acc_per_gen_list.append(train_model[get_acc()])
369
+ loss_list.append(train_loss)
370
+
371
+ else:
372
+ best_acc_per_gen_list.append(best_acc)
373
+ loss_list.append(best_loss)
376
374
 
377
375
  weight_pop, act_pop = optimizer(np.array(weight_pop, copy=False, dtype=dtype), act_pop, i, np.array(target_pop, dtype=dtype, copy=False), bar_status=False)
378
376
  target_pop = []
@@ -44,7 +44,7 @@ def fit(
44
44
  y_train,
45
45
  activation_potentiation=['linear'],
46
46
  W=None,
47
- normalization=False,
47
+ auto_normalization=False,
48
48
  dtype=cp.float32
49
49
  ):
50
50
  """
@@ -60,7 +60,7 @@ def fit(
60
60
 
61
61
  W (cupy.ndarray, optional): If you want to re-continue or update model
62
62
 
63
- normalization (bool, optional): Normalization may solves overflow problem. Default: False
63
+ auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
64
64
 
65
65
  dtype (cupy.dtype, optional): Data type for the arrays. cp.float32 by default. Example: cp.float64 or cp.float16. [fp32 for balanced devices, fp64 for strong devices, fp16 for weak devices: not reccomended!] (optional)
66
66
 
@@ -73,8 +73,8 @@ def fit(
73
73
 
74
74
  weight = cp.zeros((len(y_train[0]), len(x_train[0].ravel()))).astype(dtype, copy=False) if W is None else W
75
75
 
76
- if normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
77
- elif normalization is False: x_train = apply_activation(x_train, activation_potentiation)
76
+ if auto_normalization is True: x_train = normalization(apply_activation(x_train, activation_potentiation))
77
+ elif auto_normalization is False: x_train = apply_activation(x_train, activation_potentiation)
78
78
  else: raise ValueError('normalization parameter only be True or False')
79
79
 
80
80
  weight += y_train.T @ x_train
@@ -83,7 +83,7 @@ def fit(
83
83
 
84
84
 
85
85
  def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1, pop_size=None,
86
- neural_web_history=False, show_current_activations=False, normalization=False,
86
+ neural_web_history=False, show_current_activations=False, auto_normalization=False,
87
87
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
88
88
  interval=33.33, target_acc=None, target_loss=None,
89
89
  start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
@@ -136,7 +136,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
136
136
 
137
137
  loss (str, optional): For visualizing and monitoring. PLAN neural networks doesn't need any loss function in training. options: ('categorical_crossentropy' or 'binary_crossentropy') Default is 'categorical_crossentropy'.
138
138
 
139
- normalization (bool, optional): Normalization may solves overflow problem. Default: False
139
+ auto_normalization (bool, optional): Normalization may solves overflow problem. Default: False
140
140
 
141
141
  interval (int, optional): The interval at which evaluations are conducted during training. (33.33 = 30 FPS, 16.67 = 60 FPS) Default is 100.
142
142
 
@@ -196,12 +196,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
196
196
  # Initialize visualization components
197
197
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
198
198
 
199
- # Initialize progress bar
200
- if batch_size == 1:
201
- ncols = 76
202
- else:
203
- ncols = 49
204
-
205
199
  # Initialize variables
206
200
  best_acc = 0
207
201
  best_f1 = 0
@@ -212,7 +206,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
212
206
  loss_list = []
213
207
  target_pop = []
214
208
 
215
- progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
209
+ progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=78, bar_format=bar_format_learner)
216
210
 
217
211
  if fit_start is False or pop_size > activation_potentiation_len:
218
212
  weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=pop_size, dtype=dtype)
@@ -242,7 +236,7 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
242
236
 
243
237
  if fit_start is True and i == 0 and j < activation_potentiation_len:
244
238
  act_pop[j] = activation_potentiation[j]
245
- W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], normalization=normalization, dtype=dtype)
239
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[j], auto_normalization=auto_normalization, dtype=dtype)
246
240
  weight_pop[j] = W
247
241
 
248
242
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], activation_potentiation=act_pop[j])
@@ -367,9 +361,6 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
367
361
  return best_weights, best_model[get_preds()], best_acc, final_activations
368
362
 
369
363
  progress.update(1)
370
-
371
- best_acc_per_gen_list.append(best_acc)
372
- loss_list.append(best_loss)
373
364
 
374
365
  if batch_size != 1:
375
366
  train_model = evaluate(x_train, y_train, best_weights, final_activations)
@@ -383,6 +374,13 @@ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=Non
383
374
  postfix_dict[f"{data} Loss"] = cp.round(train_loss, 4)
384
375
  progress.set_postfix(postfix_dict)
385
376
 
377
+ best_acc_per_gen_list.append(train_model[get_acc()])
378
+ loss_list.append(train_loss)
379
+
380
+ else:
381
+ best_acc_per_gen_list.append(best_acc)
382
+ loss_list.append(best_loss)
383
+
386
384
  weight_pop, act_pop = optimizer(cp.array(weight_pop, copy=False, dtype=dtype), act_pop, i, cp.array(target_pop, dtype=dtype, copy=False), bar_status=False)
387
385
  target_pop = []
388
386
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.3.9b1
3
+ Version: 4.3.9b3
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,4 +1,4 @@
1
- pyerualjetwork/__init__.py,sha256=c1sm6eSTgJQBTjhfEfwJS9QRb_M0Fw_2lKhNGLObtds,641
1
+ pyerualjetwork/__init__.py,sha256=yNG_jFSSI4rgzfAUhDROUGhgEAmQLOOUeGSHBuYgAJ0,641
2
2
  pyerualjetwork/activation_functions.py,sha256=bKf00lsuuLJNO-4vVp4OqBi4zJ-qZ8L3v-vl52notkY,7721
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=5y1Ti3GDfDteQDCUmODwe7tAyDAUlDTKmIikChQ8d6g,7772
4
4
  pyerualjetwork/data_operations.py,sha256=Flteouu6rfSo2uHMqBHuzO02dXmbNa-I5qWmUpGTZ5Y,14760
@@ -11,14 +11,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
11
11
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
12
  pyerualjetwork/model_operations.py,sha256=MCSCNYiiICRVZITobtS3ZIWmH5Q9gjyELuH32sAdgg4,12649
13
13
  pyerualjetwork/model_operations_cuda.py,sha256=NT01BK5nrDYE7H1x3KnSI8gmx0QTGGB0mP_LqEb1uuU,13157
14
- pyerualjetwork/plan.py,sha256=qafwPgZIWPCYfBaVJpU4K98MGU0ARMOjupNCZkT35bI,23463
15
- pyerualjetwork/plan_cuda.py,sha256=mwi9nekGghsLKvI2lEQk6PZqcmC6FBEuQ5LpEOrE4JI,24355
14
+ pyerualjetwork/plan.py,sha256=uK-7GRXqH0ETIajOW6_B6pSyktKmHg1NE4COD7z1f7E,23536
15
+ pyerualjetwork/plan_cuda.py,sha256=GS9_Bh6htFSxsAGSKm3x7LQQWhCX6GOjDu7qQdS3BM0,24416
16
16
  pyerualjetwork/planeat.py,sha256=Lq5R0aMS4UIdZdbUKsKDv5g0WLwYryomR3IQYb8vAa4,37573
17
17
  pyerualjetwork/planeat_cuda.py,sha256=SG7Oq1F2m3lJBbG9cgmu7q_ApmwSn2SvTpcbtEVAoDE,37630
18
18
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
19
  pyerualjetwork/visualizations.py,sha256=08O5uEewuYiovZRX1uHWEHjn19LcnhndWYvqVN74xs0,28290
20
20
  pyerualjetwork/visualizations_cuda.py,sha256=PYRqj4QYUbuYMYcNwO8yaTPB-jK7E6kZHhTrAi0lwPU,28749
21
- pyerualjetwork-4.3.9b1.dist-info/METADATA,sha256=O5ISY6Dipfqtm1ep307Umpq_bMUT-T4MajvoGS6F4iI,7476
22
- pyerualjetwork-4.3.9b1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.3.9b1.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.3.9b1.dist-info/RECORD,,
21
+ pyerualjetwork-4.3.9b3.dist-info/METADATA,sha256=01diXlEPFpR7H3i1jZe-ACbRo8d6mryS7WtkZmhyBOY,7476
22
+ pyerualjetwork-4.3.9b3.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.3.9b3.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.3.9b3.dist-info/RECORD,,