pyerualjetwork 4.2.2b4__py3-none-any.whl → 4.2.2b5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- __version__ = "4.2.2b4"
1
+ __version__ = "4.2.2b5"
2
2
  __update__ = "* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork Homepage: https://github.com/HCB06/PyerualJetwork/tree/main\n* PyerualJetwork document: https://github.com/HCB06/PyerualJetwork/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
3
3
 
4
4
  def print_version(__version__):
pyerualjetwork/plan.py CHANGED
@@ -173,8 +173,8 @@ def fit(
173
173
  return normalization(LTPW, dtype=dtype)
174
174
 
175
175
 
176
- def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_size=1,
177
- neural_web_history=False, show_current_activations=False,
176
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
177
+ neural_web_history=False, show_current_activations=False, auto_normalization=True,
178
178
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
179
179
  interval=33.33, target_acc=None, target_loss=None,
180
180
  start_this_act=None, start_this_W=None, dtype=np.float32):
@@ -203,6 +203,7 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
203
203
  model = plan.learner(x_train,
204
204
  y_train,
205
205
  optimizer=genetic_optimizer,
206
+ fit_start=True,
206
207
  strategy='accuracy',
207
208
  show_history=True,
208
209
  gen=15,
@@ -210,6 +211,8 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
210
211
  interval=16.67)
211
212
  ```
212
213
 
214
+ fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. The fit_start parameter is MANDATORY and must be provided.
215
+
213
216
  strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
214
217
 
215
218
  gen (int, optional): The generation count for genetic optimization.
@@ -218,6 +221,8 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
218
221
 
219
222
  early_stop (bool, optional): If True, implements early stopping during training.(If accuracy not improves in two gen stops learning.) Default is False.
220
223
 
224
+ auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
225
+
221
226
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
222
227
 
223
228
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
@@ -249,8 +254,9 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
249
254
 
250
255
  data = 'Train'
251
256
 
252
- activation_potentiation_len = len(all_activations())
253
-
257
+ activation_potentiation = all_activations()
258
+ activation_potentiation_len = len(activation_potentiation)
259
+
254
260
  # Pre-checks
255
261
 
256
262
  x_train = x_train.astype(dtype, copy=False)
@@ -261,6 +267,7 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
261
267
 
262
268
  if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
263
269
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
270
+ if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
264
271
 
265
272
  # Initialize visualization components
266
273
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
@@ -272,9 +279,6 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
272
279
  ncols = 89
273
280
 
274
281
  # Initialize variables
275
- act_pop = []
276
- weight_pop = []
277
-
278
282
  best_acc = 0
279
283
  best_f1 = 0
280
284
  best_recall = 0
@@ -286,11 +290,16 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
286
290
 
287
291
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
288
292
 
289
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len)
293
+ if fit_start is False:
294
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
290
295
 
291
- if start_this_act is not None and start_this_W is not None:
292
- weight_pop[0] = start_this_W
293
- act_pop[0] = start_this_act
296
+ if start_this_act is not None and start_this_W is not None:
297
+ weight_pop[0] = start_this_W
298
+ act_pop[0] = start_this_act
299
+
300
+ else:
301
+ weight_pop = []
302
+ act_pop = []
294
303
 
295
304
  for i in range(gen):
296
305
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -303,6 +312,12 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
303
312
  for j in range(activation_potentiation_len):
304
313
 
305
314
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
315
+
316
+ if fit_start is True and i == 0:
317
+ act_pop.append(activation_potentiation[j])
318
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
319
+ weight_pop.append(W)
320
+
306
321
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype)
307
322
  acc = model[get_acc()]
308
323
 
@@ -188,8 +188,8 @@ def fit(
188
188
  return normalization(LTPW, dtype=dtype)
189
189
 
190
190
 
191
- def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_size=1,
192
- neural_web_history=False, show_current_activations=False,
191
+ def learner(x_train, y_train, optimizer, fit_start, strategy='accuracy', gen=None, batch_size=1,
192
+ neural_web_history=False, show_current_activations=False, auto_normalization=True,
193
193
  neurons_history=False, early_stop=False, loss='categorical_crossentropy', show_history=False,
194
194
  interval=33.33, target_acc=None, target_loss=None,
195
195
  start_this_act=None, start_this_W=None, dtype=cp.float32, memory='gpu'):
@@ -218,13 +218,15 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
218
218
  model = plan_cuda.learner(x_train,
219
219
  y_train,
220
220
  optimizer=genetic_optimizer,
221
+ fit_start=True,
221
222
  strategy='accuracy',
222
223
  show_history=True,
223
224
  gen=15,
224
225
  batch_size=0.05,
225
226
  interval=16.67)
226
227
  ```
227
-
228
+ fit_start (bool): If the fit_start parameter is set to True, the initial generation population undergoes a simple short training process using the PLAN algorithm. This allows for a very robust starting point, especially for large and complex datasets. However, for small or relatively simple datasets, it may result in unnecessary computational overhead. When fit_start is True, completing the first generation may take slightly longer (this increase in computational cost applies only to the first generation and does not affect subsequent generations). If fit_start is set to False, the initial population will be entirely random. Options: True or False. The fit_start parameter is MANDATORY and must be provided.
229
+
228
230
  strategy (str, optional): Learning strategy. (options: 'accuracy', 'f1', 'precision', 'recall'): 'accuracy', Maximizes train (or test if given) accuracy during learning. 'f1', Maximizes train (or test if given) f1 score during learning. 'precision', Maximizes train (or test if given) precision score during learning. 'recall', Maximizes train (or test if given) recall during learning. Default is 'accuracy'.
229
231
 
230
232
  gen (int, optional): The generation count for genetic optimization.
@@ -233,6 +235,8 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
233
235
 
234
236
  early_stop (bool, optional): If True, implements early stopping during training.(If train accuracy not improves in two gen stops learning.) Default is False.
235
237
 
238
+ auto_normalization (bool, optional): IMPORTANT: auto_nomralization parameter works only if fit_start is True. Do not change this value if fit_start is False, because it doesnt matter.) If auto normalization=False this makes more faster training times and much better accuracy performance for some datasets. Default is True.
239
+
236
240
  show_current_activations (bool, optional): Should it display the activations selected according to the current strategies during learning, or not? (True or False) This can be very useful if you want to cancel the learning process and resume from where you left off later. After canceling, you will need to view the live training activations in order to choose the activations to be given to the 'start_this' parameter. Default is False
237
241
 
238
242
  show_history (bool, optional): If True, displays the training history after optimization. Default is False.
@@ -266,7 +270,8 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
266
270
 
267
271
  data = 'Train'
268
272
 
269
- activation_potentiation_len = len(all_activations())
273
+ activation_potentiation = all_activations()
274
+ activation_potentiation_len = len(activation_potentiation)
270
275
 
271
276
  y_train = optimize_labels(y_train, cuda=True)
272
277
 
@@ -279,7 +284,7 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
279
284
  elif memory == 'cpu':
280
285
  x_train = transfer_to_cpu(x_train, dtype=dtype)
281
286
  y_train = transfer_to_cpu(y_train, dtype=y_train.dtype)
282
-
287
+
283
288
  from .data_operations import batcher
284
289
 
285
290
  else:
@@ -287,7 +292,8 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
287
292
 
288
293
  if strategy != 'accuracy' and strategy != 'f1' and strategy != 'recall' and strategy != 'precision': raise ValueError("Strategy parameter only be 'accuracy' or 'f1' or 'recall' or 'precision'.")
289
294
  if target_acc is not None and (target_acc < 0 or target_acc > 1): raise ValueError('target_acc must be in range 0 and 1')
290
-
295
+ if fit_start is not True and fit_start is not False: raise ValueError('fit_start parameter only be True or False. Please read doc-string')
296
+
291
297
  # Initialize visualization components
292
298
  viz_objects = initialize_visualization_for_learner(show_history, neurons_history, neural_web_history, x_train, y_train)
293
299
 
@@ -298,9 +304,6 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
298
304
  ncols = 99
299
305
 
300
306
  # Initialize variables
301
- act_pop = []
302
- weight_pop = []
303
-
304
307
  best_acc = 0
305
308
  best_f1 = 0
306
309
  best_recall = 0
@@ -312,11 +315,16 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
312
315
 
313
316
  progress = initialize_loading_bar(total=activation_potentiation_len, desc="", ncols=ncols, bar_format=bar_format_learner)
314
317
 
315
- weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len)
318
+ if fit_start is False:
319
+ weight_pop, act_pop = define_genomes(input_shape=len(x_train[0]), output_shape=len(y_train[0]), population_size=activation_potentiation_len, dtype=dtype)
316
320
 
317
- if start_this_act is not None and start_this_W is not None:
318
- weight_pop[0] = start_this_W
319
- act_pop[0] = start_this_act
321
+ if start_this_act is not None and start_this_W is not None:
322
+ weight_pop[0] = start_this_W
323
+ act_pop[0] = start_this_act
324
+
325
+ else:
326
+ weight_pop = []
327
+ act_pop = []
320
328
 
321
329
  for i in range(gen):
322
330
  postfix_dict["Gen"] = str(i+1) + '/' + str(gen)
@@ -329,6 +337,12 @@ def learner(x_train, y_train, optimizer, strategy='accuracy', gen=None, batch_si
329
337
  for j in range(activation_potentiation_len):
330
338
 
331
339
  x_train_batch, y_train_batch = batcher(x_train, y_train, batch_size=batch_size)
340
+
341
+ if fit_start is True and i == 0:
342
+ act_pop.append(activation_potentiation[j])
343
+ W = fit(x_train_batch, y_train_batch, activation_potentiation=act_pop[-1], train_bar=False, auto_normalization=auto_normalization, dtype=dtype)
344
+ weight_pop.append(W)
345
+
332
346
  model = evaluate(x_train_batch, y_train_batch, W=weight_pop[j], loading_bar_status=False, activation_potentiation=act_pop[j], dtype=dtype, memory=memory)
333
347
  acc = model[get_acc()]
334
348
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.2.2b4
3
+ Version: 4.2.2b5
4
4
  Summary: PyerualJetwork is a machine learning library supported with GPU(CUDA) acceleration written in Python for professionals and researchers including with PLAN algorithm, PLANEAT algorithm (genetic optimization). Also includes data pre-process and memory manegament
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
@@ -1,4 +1,4 @@
1
- pyerualjetwork/__init__.py,sha256=W84gU5_QHLqxGImXR7xeq2n_LiBCqtoHJTEGoLUPilI,641
1
+ pyerualjetwork/__init__.py,sha256=jCSoAWmX1GCN4tOGRp2UsvgxojBEUi4abZxyxQ2ygl8,641
2
2
  pyerualjetwork/activation_functions.py,sha256=WWOdMd5pI6ZKe-ieKCIsKAYPQODHuXYxx7tzhA5xjes,11767
3
3
  pyerualjetwork/activation_functions_cuda.py,sha256=KmXJ5Cdig46XAMYakXFPEOlxSxtFJjD21-i3nGtxPjE,11807
4
4
  pyerualjetwork/data_operations.py,sha256=pb5CqJ0Th6fCjTNMCtqQMiwH3KezTxAijacglsKUxmY,14730
@@ -11,14 +11,14 @@ pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,607
11
11
  pyerualjetwork/metrics_cuda.py,sha256=73h9GC7XwmnFCVzFEEiPQfF8CwHIz2wsCbxpZrJtYgw,5061
12
12
  pyerualjetwork/model_operations.py,sha256=RKqnh7-MByFosxqme4q4jC1lOndX26O-OVXYV6ZxoEE,12965
13
13
  pyerualjetwork/model_operations_cuda.py,sha256=XnKKq54ZLaqCm-NaJ6d8IToACKcKg2Ttq6moowVRRWo,13365
14
- pyerualjetwork/plan.py,sha256=mACNJnHlPkeqsHDYw0BVDXp8PCOlwWsF-1KD4a2Bk7U,30431
15
- pyerualjetwork/plan_cuda.py,sha256=G4Lo7FnfHfmiv1mZdbT0-BWtjoWUyxViRj6xAj4Hwlc,31784
14
+ pyerualjetwork/plan.py,sha256=tyV-Xtg6u_aVvPmBAdLct7GnTO73xbp9SHe011Tact0,32186
15
+ pyerualjetwork/plan_cuda.py,sha256=uzLwJP31zy9qH5QUmbOFH6cv20h_8gwCpvubQTlYB-o,33517
16
16
  pyerualjetwork/planeat.py,sha256=hMSyrSPipOxKgOqyoAiZtniVgxPQxc4rRsvEEMOS2Ng,40757
17
17
  pyerualjetwork/planeat_cuda.py,sha256=9uopmM-gTZpSb0EOExrOZPT8FF5BqDdEfCX0zYQb9QU,40712
18
18
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
19
19
  pyerualjetwork/visualizations.py,sha256=QaYSIyVkJZ8NqpBKArQKkI1y37nCQo_KIM98IMssnRc,28766
20
20
  pyerualjetwork/visualizations_cuda.py,sha256=F60vQ92AXlMgBka3InXnOtGoM25vQJAlBIU2AlYTwks,29200
21
- pyerualjetwork-4.2.2b4.dist-info/METADATA,sha256=z6eJ-HSJllMFcpo08y8wg7Ad1cyX0B04egOQqIjwSog,7914
22
- pyerualjetwork-4.2.2b4.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
- pyerualjetwork-4.2.2b4.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
- pyerualjetwork-4.2.2b4.dist-info/RECORD,,
21
+ pyerualjetwork-4.2.2b5.dist-info/METADATA,sha256=iCMk-fDHiW4yLCHSnWuDx7hJuGtdwWWD_MAUAoVmAmc,7914
22
+ pyerualjetwork-4.2.2b5.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
23
+ pyerualjetwork-4.2.2b5.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
24
+ pyerualjetwork-4.2.2b5.dist-info/RECORD,,