pyerualjetwork 4.1.5__py3-none-any.whl → 4.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,18 +1,16 @@
1
1
  import psutil
2
2
  import numpy as np
3
3
  import cupy as cp
4
- import gc
5
4
  import logging
6
5
 
7
- def get_available_memory():
6
+ def get_available_cpu_memory():
8
7
  """
9
8
  The function `get_available_memory` returns the amount of available memory in the system using the
10
9
  `psutil` library.
11
10
  :return: The function `get_available_memory()` returns the amount of available memory in bytes on
12
11
  the system.
13
12
  """
14
- memory = psutil.virtual_memory().available
15
- return memory
13
+ return psutil.virtual_memory().available
16
14
 
17
15
  def get_optimal_batch_size_for_cpu(x, data_size_bytes, available_memory):
18
16
  """
@@ -36,62 +34,90 @@ def get_optimal_batch_size_for_cpu(x, data_size_bytes, available_memory):
36
34
  def transfer_to_cpu(x, dtype=np.float32):
37
35
  """
38
36
  The `transfer_to_cpu` function converts data to a specified data type on the CPU, handling memory constraints
39
- by batching the conversion process.
40
-
41
- :param x: The `x` parameter in the `transfer_to_cpu` function is the input data that you want to transfer to
42
- the CPU. It can be either a NumPy array or any other data structure that supports the `get` method
43
- for retrieving the data
44
- :param dtype: The `dtype` parameter in the `transfer_to_cpu` function specifies the data type to which the
45
- input array `x` should be converted before moving it to the CPU. By default, it is set to
46
- `np.float32`, which is a 32-bit floating-point number data type in NumPy
47
- :return: The `transfer_to_cpu` function returns the processed data in NumPy array format with the specified
48
- data type (`dtype`). If the input `x` is already a NumPy array with the same data type as specified,
49
- it returns `x` as is. Otherwise, it converts the input data to the specified data type and returns
50
- the processed NumPy array.
37
+ by batching the conversion process and ensuring complete GPU memory cleanup.
38
+
39
+ :param x: Input data to transfer to CPU (CuPy array)
40
+ :param dtype: Target NumPy dtype for the output array (default: np.float32)
41
+ :return: NumPy array with the specified dtype
51
42
  """
43
+ from .ui import loading_bars, initialize_loading_bar
52
44
  try:
53
45
  if isinstance(x, np.ndarray):
54
46
  return x.astype(dtype) if x.dtype != dtype else x
55
-
56
- data_size = x.nbytes
57
- available_memory = get_available_memory()
47
+
48
+ x = x.astype(dtype=dtype, copy=False)
58
49
 
50
+ data_size = x.nbytes
51
+ available_memory = get_available_cpu_memory()
59
52
  logging.debug(f"Data size: {data_size/1e6:.2f}MB, Available memory: {available_memory/1e6:.2f}MB")
60
53
 
54
+ pool = cp.get_default_memory_pool()
55
+ pinned_mempool = cp.cuda.PinnedMemoryPool()
56
+
61
57
  if data_size <= available_memory * 0.25:
62
- final_result = np.array(x.get(), dtype=dtype, copy=False)
63
- del x
64
- cp.get_default_memory_pool().free_all_blocks()
58
+ try:
59
+ final_result = np.array(x.get(), dtype=dtype, copy=False)
60
+ finally:
61
+ del x
62
+ pool.free_all_blocks()
63
+ pinned_mempool.free_all_blocks()
64
+ cp.cuda.runtime.deviceSynchronize()
65
65
  return final_result
66
-
67
- batch_size = get_optimal_batch_size_for_cpu(x, data_size, available_memory)
66
+
67
+ batch_size = max(get_optimal_batch_size_for_cpu(x, data_size, available_memory), 1)
68
+ total_batches = (len(x) + batch_size - 1) // batch_size
69
+ loading_bar = initialize_loading_bar(
70
+ total=total_batches,
71
+ desc='Transfering to CPU mem',
72
+ ncols=70,
73
+ bar_format=loading_bars()[0],
74
+ leave=False
75
+ )
68
76
  logging.debug(f"Using batch size: {batch_size}")
69
77
 
70
- result = []
71
- total_batches = (x.size + batch_size - 1) // batch_size
78
+ try:
79
+ sample_chunk = x[0:1]
80
+ sample_array = np.array(sample_chunk.get(), dtype=dtype)
81
+ chunk_shape = sample_array.shape[1:] if len(sample_array.shape) > 1 else ()
82
+ total_shape = (len(x),) + chunk_shape
83
+ finally:
84
+ del sample_array
85
+ del sample_chunk
86
+ pool.free_all_blocks()
87
+ pinned_mempool.free_all_blocks()
72
88
 
73
- for i in range(0, x.size, batch_size):
74
- try:
75
- chunk = x[i:i + batch_size]
76
- result.append(np.array(chunk.get(), dtype=dtype))
77
- del chunk
78
-
79
- if i > 0 and i % (batch_size * 10) == 0:
80
- cp.get_default_memory_pool().free_all_blocks()
81
- gc.collect()
82
-
83
- except Exception as e:
84
- logging.error(f"Error processing batch {i//batch_size + 1}/{total_batches}: {str(e)}")
85
- raise
89
+ chunks = np.empty(total_shape, dtype=dtype)
90
+
91
+ try:
92
+ for i in range(0, len(x), batch_size):
93
+ try:
94
+ end_idx = min(i + batch_size, len(x))
95
+ chunk = x[i:end_idx]
96
+ chunks[i:end_idx] = chunk.get().astype(dtype=dtype)
97
+ finally:
98
+ del chunk
99
+ pool.free_all_blocks()
100
+ pinned_mempool.free_all_blocks()
101
+ cp.cuda.runtime.deviceSynchronize()
86
102
 
87
- final_result = np.concatenate(result)
88
- del x
89
- cp.get_default_memory_pool().free_all_blocks()
90
- gc.collect()
91
- return final_result
103
+ loading_bar.update(1)
104
+ finally:
105
+ del x
106
+ pool.free_all_blocks()
107
+ pinned_mempool.free_all_blocks()
108
+ cp.cuda.runtime.deviceSynchronize()
109
+
110
+ return chunks
92
111
 
93
112
  except Exception as e:
94
113
  logging.error(f"Error in transfer_to_cpu: {str(e)}")
114
+ if 'x' in locals():
115
+ del x
116
+ if 'pool' in locals():
117
+ pool.free_all_blocks()
118
+ if 'pinned_mempool' in locals():
119
+ pinned_mempool.free_all_blocks()
120
+ cp.cuda.runtime.deviceSynchronize()
95
121
  raise
96
122
 
97
123
  def get_optimal_batch_size_for_gpu(x, data_size_bytes):
@@ -129,41 +155,60 @@ def transfer_to_gpu(x, dtype=cp.float32):
129
155
  `x` as is. If the data size of `x` exceeds 25% of the free GPU memory, it processes the data in
130
156
  batches to
131
157
  """
132
-
158
+ from .ui import loading_bars, initialize_loading_bar
133
159
  try:
134
160
  if isinstance(x, cp.ndarray):
135
161
  return x.astype(dtype) if x.dtype != dtype else x
136
162
 
163
+ x = x.astype(dtype=dtype, copy=False)
137
164
  data_size = x.nbytes
165
+ pinned_mempool = cp.cuda.PinnedMemoryPool()
138
166
  free_gpu_memory = cp.cuda.runtime.memGetInfo()[0]
139
-
140
167
  logging.debug(f"Data size: {data_size/1e6:.2f}MB, Free GPU memory: {free_gpu_memory/1e6:.2f}MB")
141
-
168
+
142
169
  if data_size <= free_gpu_memory * 0.25:
143
170
  new_x = cp.array(x, dtype=dtype, copy=False)
144
- del x
145
- gc.collect()
146
171
  return new_x
147
172
 
148
173
  batch_size = get_optimal_batch_size_for_gpu(x, data_size)
149
- logging.debug(f"Using batch size: {batch_size}")
174
+ if batch_size == 0: batch_size = 1
175
+
176
+ loading_bar = initialize_loading_bar(total=len(x)/batch_size, desc='Transfering to GPU mem', ncols=70, bar_format=loading_bars()[0], leave=False)
150
177
 
151
- result = []
178
+ logging.debug(f"Using batch size: {batch_size}")
179
+ current_threshold = 0.75
152
180
  total_batches = (len(x) + batch_size - 1) // batch_size
181
+
182
+ sample_chunk = x[0:1]
183
+ sample_array = cp.array(sample_chunk, dtype=dtype)
184
+ chunk_shape = sample_array.shape[1:] if len(sample_array.shape) > 1 else ()
185
+ del sample_array
186
+ del sample_chunk
187
+ if chunk_shape:
188
+ total_shape = (len(x),) + chunk_shape
189
+ else:
190
+ total_shape = (len(x),)
153
191
 
192
+ del chunk_shape
193
+ chunks = cp.empty(total_shape, dtype=dtype)
194
+ del total_shape
195
+
154
196
  for i in range(0, len(x), batch_size):
155
197
  try:
156
198
  chunk = x[i:i + batch_size]
157
- gpu_chunk = cp.array(chunk, dtype=dtype)
158
- result.append(gpu_chunk)
159
-
199
+ chunk = cp.array(chunk, dtype=dtype)
200
+ chunks[i // batch_size] = chunk
160
201
  del chunk
202
+ pinned_mempool.free_all_blocks()
161
203
 
162
204
  if i > 0 and i % (batch_size * 5) == 0:
163
205
  pool = cp.get_default_memory_pool()
164
- if pool.used_bytes() > free_gpu_memory * 0.75:
206
+ current_threshold = adjust_gpu_memory_threshold(pool, free_gpu_memory, current_threshold)
207
+ if pool.used_bytes() > cp.cuda.runtime.memGetInfo()[0] * current_threshold:
165
208
  pool.free_all_blocks()
166
- gc.collect()
209
+
210
+
211
+ loading_bar.update(1)
167
212
 
168
213
  except cp.cuda.memory.OutOfMemoryError:
169
214
  logging.error(f"GPU out of memory at batch {i//batch_size + 1}/{total_batches}")
@@ -176,12 +221,10 @@ def transfer_to_gpu(x, dtype=cp.float32):
176
221
  raise
177
222
 
178
223
  try:
179
- final_result = cp.concatenate(result)
180
- del result
181
224
  del x
182
- gc.collect()
183
225
  cp.get_default_memory_pool().free_all_blocks()
184
- return final_result
226
+ pinned_mempool.free_all_blocks()
227
+ return chunks
185
228
 
186
229
  except Exception as e:
187
230
  logging.error(f"Error concatenating results: {str(e)}")
@@ -190,3 +233,71 @@ def transfer_to_gpu(x, dtype=cp.float32):
190
233
  except Exception as e:
191
234
  logging.error(f"Error in transfer_to_gpu: {str(e)}")
192
235
  raise
236
+
237
+ def adjust_gpu_memory_threshold(pool, free_gpu_memory, current_threshold=0.75, min_threshold=0.5, max_threshold=0.9):
238
+ used_memory = pool.used_bytes()
239
+ usage_ratio = used_memory / free_gpu_memory
240
+
241
+ if usage_ratio > current_threshold:
242
+ current_threshold = max(min_threshold, current_threshold - 0.05)
243
+ elif usage_ratio < current_threshold * 0.8:
244
+ current_threshold = min(max_threshold, current_threshold + 0.05)
245
+
246
+ return current_threshold
247
+
248
+
249
+ def optimize_labels(y, one_hot_encoded=True, cuda=False):
250
+ """
251
+ The function `optimize_labels` optimizes the data type of labels based on their length and encoding
252
+ format.
253
+
254
+ :param y: The `optimize_labels` function is designed to optimize the data type of the input labels
255
+ `y` based on certain conditions. The function checks if the labels are in one-hot encoded format or
256
+ not, and then based on the length of the labels and the specified data types (`uint8`, `uint
257
+ :param one_hot_encoded: The `one_hot_encoded` parameter in the `optimize_labels` function indicates
258
+ whether the labels are in one-hot encoded format or not. If `one_hot_encoded` is set to `True`, it
259
+ means that the labels are in one-hot encoded format, and the function will check the length of the,
260
+ defaults to True (optional)
261
+ :param cuda: The `cuda` parameter in the `optimize_labels` function is a boolean flag that indicates
262
+ whether to use CUDA for computations. If `cuda` is set to `True`, the function will use the CuPy
263
+ library for array operations, which can leverage GPU acceleration. If `cuda` is `False, defaults to
264
+ False (optional)
265
+ :return: The function `optimize_labels` returns the input array `y` after optimizing its data type
266
+ based on the specified conditions. If `one_hot_encoded` is True, it checks the length of the
267
+ elements in `y` and converts the data type to uint8, uint16, or uint32 accordingly. If
268
+ `one_hot_encoded` is False, it checks the length of `y` itself and
269
+ """
270
+
271
+ if cuda: array_type = cp
272
+ else: array_type = np
273
+
274
+ dtype_uint8 = array_type.uint8
275
+ dtype_uint16 = array_type.uint16
276
+ dtype_uint32 = array_type.uint32
277
+
278
+ if one_hot_encoded:
279
+ if len(y[0]) < 256:
280
+ if y.dtype != dtype_uint8:
281
+ y = array_type.array(y, copy=False).astype(dtype_uint8, copy=False)
282
+ elif len(y[0]) <= 32767:
283
+ if y.dtype != dtype_uint16:
284
+ y = array_type.array(y, copy=False).astype(dtype_uint16, copy=False)
285
+ else:
286
+ if y.dtype != dtype_uint32:
287
+ y = array_type.array(y, copy=False).astype(dtype_uint32, copy=False)
288
+
289
+ return y
290
+
291
+ else:
292
+
293
+ if len(y) < 256:
294
+ if y.dtype != dtype_uint8:
295
+ y = array_type.array(y, copy=False).astype(dtype_uint8, copy=False)
296
+ elif len(y) <= 32767:
297
+ if y.dtype != dtype_uint16:
298
+ y = array_type.array(y, copy=False).astype(dtype_uint16, copy=False)
299
+ else:
300
+ if y.dtype != dtype_uint32:
301
+ y = array_type.array(y, copy=False).astype(dtype_uint32, copy=False)
302
+
303
+ return y
@@ -80,7 +80,7 @@ def roc_curve(y_true, y_score):
80
80
  raise ValueError("Only binary classification is supported.")
81
81
 
82
82
 
83
- desc_score_indices = cp.argsort(y_score, kind="mergesort")[::-1]
83
+ desc_score_indices = cp.argsort(y_score, kind="stable")[::-1]
84
84
  y_score = y_score[desc_score_indices]
85
85
  y_true = y_true[desc_score_indices]
86
86
 
@@ -93,11 +93,10 @@ def roc_curve(y_true, y_score):
93
93
 
94
94
  tp = 0
95
95
  fp = 0
96
- prev_score = None
96
+ prev_score = 0
97
97
 
98
-
99
98
  for i, score in enumerate(y_score):
100
- if score != prev_score:
99
+ if score is not prev_score:
101
100
  fpr.append(fp / n_neg)
102
101
  tpr.append(tp / n_pos)
103
102
  thresholds.append(score)
@@ -17,7 +17,7 @@ def save_model(model_name,
17
17
  activation_potentiation=['linear'],
18
18
  weights_type='npy',
19
19
  weights_format='raw',
20
- show_architecture=None,
20
+ show_architecture=False,
21
21
  show_info=True
22
22
  ):
23
23
 
@@ -44,7 +44,7 @@ def save_model(model_name,
44
44
 
45
45
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
46
46
 
47
- show_architecture (str): It draws model architecture. Takes 2 value='basic' or 'detailed'. Default: None(not drawing)
47
+ show_architecture (bool): It draws model architecture. True or False. Default: False
48
48
 
49
49
  show_info (bool): Prints model details into console. default: True
50
50
 
@@ -170,8 +170,8 @@ def save_model(model_name,
170
170
 
171
171
  print(message)
172
172
 
173
- if show_architecture is not None:
174
- draw_model_architecture(model_name=model_name, model_path=model_path, style=show_architecture)
173
+ if show_architecture:
174
+ draw_model_architecture(model_name=model_name, model_path=model_path)
175
175
 
176
176
 
177
177
 
@@ -190,7 +190,6 @@ def load_model(model_name,
190
190
  Returns:
191
191
  lists: W(list[num]), activation_potentiation, DataFrame of the model
192
192
  """
193
- np.set_printoptions(threshold=np.Infinity)
194
193
 
195
194
  try:
196
195
 
@@ -1,4 +1,5 @@
1
1
  import cupy as cp
2
+ import numpy as np
2
3
  from colorama import Fore, Style
3
4
  import sys
4
5
  from datetime import datetime
@@ -44,7 +45,7 @@ def save_model(model_name,
44
45
 
45
46
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
46
47
 
47
- show_architecture (str): It draws model architecture. Takes 2 value='basic' or 'detailed'. Default: None(not drawing)
48
+ show_architecture (bool): It draws model architecture. True or False. Default: False
48
49
 
49
50
  show_info (bool): Prints model details into console. default: True
50
51
 
@@ -173,8 +174,8 @@ def save_model(model_name,
173
174
 
174
175
  print(message)
175
176
 
176
- if show_architecture is not None:
177
- draw_model_architecture(model_name=model_name, model_path=model_path, style=show_architecture)
177
+ if show_architecture:
178
+ draw_model_architecture(model_name=model_name, model_path=model_path)
178
179
 
179
180
 
180
181
 
@@ -213,12 +214,12 @@ def load_model(model_name,
213
214
  if scaler_params_cpu[0] == None: # model not scaled
214
215
  scaler_params = scaler_params_cpu[0]
215
216
 
216
- except:
217
+ except: # model scaled
218
+ scaler_params_cpu = [item for item in scaler_params_cpu if isinstance(item, np.ndarray)]
217
219
  scaler_params = cp.array(scaler_params_cpu)
220
+
218
221
  del scaler_params_cpu
219
222
  gc.collect()
220
- scaler_params = [item for item in scaler_params if isinstance(item, cp.ndarray)] # model scaled
221
-
222
223
 
223
224
  model_name = str(df['MODEL NAME'].iloc[0])
224
225
  WeightType = str(df['WEIGHTS TYPE'].iloc[0])
pyerualjetwork/plan.py CHANGED
@@ -15,6 +15,7 @@ ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/
15
15
 
16
16
  import numpy as np
17
17
  from colorama import Fore
18
+ import math
18
19
 
19
20
  ### LIBRARY IMPORTS ###
20
21
  from .ui import loading_bars, initialize_loading_bar
@@ -25,8 +26,9 @@ from .metrics import metrics
25
26
  from .model_operations import get_acc, get_preds, get_preds_softmax
26
27
  from .visualizations import (
27
28
  draw_neural_web,
29
+ update_neural_web_for_fit,
28
30
  plot_evaluate,
29
- neuron_history,
31
+ update_neuron_history,
30
32
  initialize_visualization_for_fit,
31
33
  update_weight_visualization_for_fit,
32
34
  update_decision_boundary_for_fit,
@@ -34,7 +36,9 @@ from .visualizations import (
34
36
  display_visualization_for_fit,
35
37
  display_visualizations_for_learner,
36
38
  update_history_plots_for_learner,
37
- initialize_visualization_for_learner
39
+ initialize_visualization_for_learner,
40
+ update_neuron_history_for_learner,
41
+ show
38
42
  )
39
43
 
40
44
  ### GLOBAL VARIABLES ###
@@ -98,20 +102,13 @@ def fit(
98
102
  Returns:
99
103
  numpyarray([num]): (Weight matrix).
100
104
  """
105
+
106
+ from model_operations import get_acc
107
+
101
108
  # Pre-checks
102
109
 
103
110
  x_train = x_train.astype(dtype, copy=False)
104
111
 
105
- if len(y_train[0]) < 256:
106
- if y_train.dtype != np.uint8:
107
- y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
108
- elif len(y_train[0]) <= 32767:
109
- if y_train.dtype != np.uint16:
110
- y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
111
- else:
112
- if y_train.dtype != np.uint32:
113
- y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
114
-
115
112
  if train_bar and val:
116
113
  train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
117
114
  elif train_bar and val == False:
@@ -137,18 +134,25 @@ def fit(
137
134
 
138
135
  # Training process
139
136
  for index, inp in enumerate(x_train):
140
- inp = np.array(inp, copy=False, dtype=dtype).ravel()
137
+ inp = np.array(inp, copy=False).ravel()
141
138
  y_decoded = decode_one_hot(y_train)
142
139
  # Weight updates
143
140
  STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
144
141
  LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
145
-
146
- # Visualization updates
147
- if show_training:
148
- update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
149
- if decision_boundary_status:
150
- update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
151
- update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
142
+ if val and index != 0:
143
+ if index % math.ceil((val_count / len(x_train)) * 100) == 0:
144
+ val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
145
+ val_list.append(val_acc)
146
+
147
+ # Visualization updates
148
+ if show_training:
149
+ update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
150
+ if decision_boundary_status:
151
+ update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
152
+ update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
153
+ update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
154
+ if neurons_history:
155
+ update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
152
156
  if train_bar:
153
157
  train_progress.update(1)
154
158
 
@@ -156,7 +160,15 @@ def fit(
156
160
 
157
161
  # Finalize visualization
158
162
  if show_training:
159
- display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
163
+ ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
164
+ ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
165
+ ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
166
+ ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
167
+ show()
168
+
169
+ if neurons_history:
170
+ ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
171
+ show()
160
172
 
161
173
  return normalization(LTPW, dtype=dtype)
162
174
 
@@ -223,7 +235,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
223
235
  tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
224
236
 
225
237
  """
226
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
238
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
227
239
 
228
240
  activation_potentiation = all_activations()
229
241
 
@@ -420,7 +432,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
420
432
 
421
433
  if neurons_history:
422
434
  viz_objects['neurons']['artists'] = (
423
- neuron_history(np.copy(best_weights), viz_objects['neurons']['ax'],
435
+ update_neuron_history_for_learner(np.copy(best_weights), viz_objects['neurons']['ax'],
424
436
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
425
437
  y_train[0], viz_objects['neurons']['artists'],
426
438
  data=data, fig1=viz_objects['neurons']['fig'],