pyerualjetwork 4.1.5__py3-none-any.whl → 4.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,33 +1,29 @@
1
1
  import psutil
2
2
  import numpy as np
3
3
  import cupy as cp
4
- import gc
5
4
  import logging
6
5
 
7
- def get_available_memory():
6
+ def get_available_cpu_memory():
8
7
  """
9
8
  The function `get_available_memory` returns the amount of available memory in the system using the
10
9
  `psutil` library.
11
10
  :return: The function `get_available_memory()` returns the amount of available memory in bytes on
12
11
  the system.
13
12
  """
14
- memory = psutil.virtual_memory().available
15
- return memory
13
+ return psutil.virtual_memory().available
16
14
 
17
15
  def get_optimal_batch_size_for_cpu(x, data_size_bytes, available_memory):
18
16
  """
19
17
  The function calculates the optimal batch size for a given data size and available memory based on
20
18
  the size of each element.
21
19
 
22
- :param x: `x` is a NumPy array representing the input data for which we want to determine the
23
- optimal batch size for processing on the CPU
24
- :param data_size_bytes: The `data_size_bytes` parameter represents the size of the data in bytes
25
- that you want to process in batches
26
- :param available_memory: The `available_memory` parameter represents the total memory available on
27
- the CPU in bytes. This function calculates the optimal batch size for processing data based on the
28
- provided parameters. Let me know if you need any further assistance or explanation!
29
- :return: the optimal batch size for a given array `x` based on the available memory and the size of
30
- each element in bytes.
20
+ :param x: `x` is a NumPy array representing the input data for which we want to determine the optimal batch size for processing on the CPU
21
+
22
+ :param data_size_bytes: The `data_size_bytes` parameter represents the size of the data in bytes that you want to process in batches
23
+
24
+ :param available_memory: The `available_memory` parameter represents the total memory available on the CPU in bytes. This function calculates the optimal batch size for processing data based on the provided parameters. Let me know if you need any further assistance or explanation!
25
+
26
+ :return: the optimal batch size for a given array `x` based on the available memory and the size of each element in bytes.
31
27
  """
32
28
  safe_memory = available_memory * 0.25
33
29
  element_size = data_size_bytes / x.size
@@ -36,62 +32,92 @@ def get_optimal_batch_size_for_cpu(x, data_size_bytes, available_memory):
36
32
  def transfer_to_cpu(x, dtype=np.float32):
37
33
  """
38
34
  The `transfer_to_cpu` function converts data to a specified data type on the CPU, handling memory constraints
39
- by batching the conversion process.
35
+ by batching the conversion process and ensuring complete GPU memory cleanup.
36
+
37
+ :param x: Input data to transfer to CPU (CuPy array)
38
+
39
+ :param dtype: Target NumPy dtype for the output array (default: np.float32)
40
40
 
41
- :param x: The `x` parameter in the `transfer_to_cpu` function is the input data that you want to transfer to
42
- the CPU. It can be either a NumPy array or any other data structure that supports the `get` method
43
- for retrieving the data
44
- :param dtype: The `dtype` parameter in the `transfer_to_cpu` function specifies the data type to which the
45
- input array `x` should be converted before moving it to the CPU. By default, it is set to
46
- `np.float32`, which is a 32-bit floating-point number data type in NumPy
47
- :return: The `transfer_to_cpu` function returns the processed data in NumPy array format with the specified
48
- data type (`dtype`). If the input `x` is already a NumPy array with the same data type as specified,
49
- it returns `x` as is. Otherwise, it converts the input data to the specified data type and returns
50
- the processed NumPy array.
41
+ :return: NumPy array with the specified dtype
51
42
  """
43
+ from .ui import loading_bars, initialize_loading_bar
52
44
  try:
53
45
  if isinstance(x, np.ndarray):
54
46
  return x.astype(dtype) if x.dtype != dtype else x
55
-
56
- data_size = x.nbytes
57
- available_memory = get_available_memory()
47
+
48
+ x = x.astype(dtype=dtype, copy=False)
58
49
 
50
+ data_size = x.nbytes
51
+ available_memory = get_available_cpu_memory()
59
52
  logging.debug(f"Data size: {data_size/1e6:.2f}MB, Available memory: {available_memory/1e6:.2f}MB")
60
53
 
54
+ pool = cp.get_default_memory_pool()
55
+ pinned_mempool = cp.cuda.PinnedMemoryPool()
56
+
61
57
  if data_size <= available_memory * 0.25:
62
- final_result = np.array(x.get(), dtype=dtype, copy=False)
63
- del x
64
- cp.get_default_memory_pool().free_all_blocks()
58
+ try:
59
+ final_result = np.array(x.get(), dtype=dtype, copy=False)
60
+ finally:
61
+ del x
62
+ pool.free_all_blocks()
63
+ pinned_mempool.free_all_blocks()
64
+ cp.cuda.runtime.deviceSynchronize()
65
65
  return final_result
66
-
67
- batch_size = get_optimal_batch_size_for_cpu(x, data_size, available_memory)
66
+
67
+ batch_size = max(get_optimal_batch_size_for_cpu(x, data_size, available_memory), 1)
68
+ total_batches = (len(x) + batch_size - 1) // batch_size
69
+ loading_bar = initialize_loading_bar(
70
+ total=total_batches,
71
+ desc='Transfering to CPU mem',
72
+ ncols=70,
73
+ bar_format=loading_bars()[0],
74
+ leave=False
75
+ )
68
76
  logging.debug(f"Using batch size: {batch_size}")
69
77
 
70
- result = []
71
- total_batches = (x.size + batch_size - 1) // batch_size
78
+ try:
79
+ sample_chunk = x[0:1]
80
+ sample_array = np.array(sample_chunk.get(), dtype=dtype)
81
+ chunk_shape = sample_array.shape[1:] if len(sample_array.shape) > 1 else ()
82
+ total_shape = (len(x),) + chunk_shape
83
+ finally:
84
+ del sample_array
85
+ del sample_chunk
86
+ pool.free_all_blocks()
87
+ pinned_mempool.free_all_blocks()
72
88
 
73
- for i in range(0, x.size, batch_size):
74
- try:
75
- chunk = x[i:i + batch_size]
76
- result.append(np.array(chunk.get(), dtype=dtype))
77
- del chunk
78
-
79
- if i > 0 and i % (batch_size * 10) == 0:
80
- cp.get_default_memory_pool().free_all_blocks()
81
- gc.collect()
82
-
83
- except Exception as e:
84
- logging.error(f"Error processing batch {i//batch_size + 1}/{total_batches}: {str(e)}")
85
- raise
89
+ chunks = np.empty(total_shape, dtype=dtype)
90
+
91
+ try:
92
+ for i in range(0, len(x), batch_size):
93
+ try:
94
+ end_idx = min(i + batch_size, len(x))
95
+ chunk = x[i:end_idx]
96
+ chunks[i:end_idx] = chunk.get().astype(dtype=dtype)
97
+ finally:
98
+ del chunk
99
+ pool.free_all_blocks()
100
+ pinned_mempool.free_all_blocks()
101
+ cp.cuda.runtime.deviceSynchronize()
86
102
 
87
- final_result = np.concatenate(result)
88
- del x
89
- cp.get_default_memory_pool().free_all_blocks()
90
- gc.collect()
91
- return final_result
103
+ loading_bar.update(1)
104
+ finally:
105
+ del x
106
+ pool.free_all_blocks()
107
+ pinned_mempool.free_all_blocks()
108
+ cp.cuda.runtime.deviceSynchronize()
109
+
110
+ return chunks
92
111
 
93
112
  except Exception as e:
94
113
  logging.error(f"Error in transfer_to_cpu: {str(e)}")
114
+ if 'x' in locals():
115
+ del x
116
+ if 'pool' in locals():
117
+ pool.free_all_blocks()
118
+ if 'pinned_mempool' in locals():
119
+ pinned_mempool.free_all_blocks()
120
+ cp.cuda.runtime.deviceSynchronize()
95
121
  raise
96
122
 
97
123
  def get_optimal_batch_size_for_gpu(x, data_size_bytes):
@@ -118,52 +144,66 @@ def transfer_to_gpu(x, dtype=cp.float32):
118
144
  The `transfer_to_gpu` function in Python converts input data to GPU arrays, optimizing memory usage by
119
145
  batching and handling out-of-memory errors.
120
146
 
121
- :param x: The `x` parameter in the `transfer_to_gpu` function is the input data that you want to transfer to
122
- the GPU for processing. It can be either a NumPy array or a CuPy array. If it's a NumPy array, the
123
- function will convert it to a CuPy array and
124
- :param dtype: The `dtype` parameter in the `transfer_to_gpu` function specifies the data type to which the
125
- input array `x` should be converted when moving it to the GPU. By default, it is set to
126
- `cp.float32`, which is a 32-bit floating-point data type provided by the Cu
127
- :return: The `transfer_to_gpu` function returns the input data `x` converted to a GPU array of type `dtype`
128
- (default is `cp.float32`). If the input `x` is already a GPU array with the same dtype, it returns
129
- `x` as is. If the data size of `x` exceeds 25% of the free GPU memory, it processes the data in
130
- batches to
131
- """
147
+ :param x: The `x` parameter in the `transfer_to_gpu` function is the input data that you want to transfer to the GPU for processing. It can be either a NumPy array or a CuPy array. If it's a NumPy array, the function will convert it to a CuPy array and
132
148
 
149
+ :param dtype: The `dtype` parameter in the `transfer_to_gpu` function specifies the data type to which the input array `x` should be converted when moving it to the GPU. By default, it is set to `cp.float32`, which is a 32-bit floating-point data type provided by the CuPy
150
+
151
+ :return: The `transfer_to_gpu` function returns the input data `x` converted to a GPU array of type `dtype` (default is `cp.float32`). If the input `x` is already a GPU array with the same dtype, it returns `x` as is. If the data size of `x` exceeds 25% of the free GPU memory, it processes the data in batches to
152
+ """
153
+ from .ui import loading_bars, initialize_loading_bar
133
154
  try:
134
155
  if isinstance(x, cp.ndarray):
135
156
  return x.astype(dtype) if x.dtype != dtype else x
136
157
 
158
+ x = x.astype(dtype=dtype, copy=False)
137
159
  data_size = x.nbytes
160
+ pinned_mempool = cp.cuda.PinnedMemoryPool()
138
161
  free_gpu_memory = cp.cuda.runtime.memGetInfo()[0]
139
-
140
162
  logging.debug(f"Data size: {data_size/1e6:.2f}MB, Free GPU memory: {free_gpu_memory/1e6:.2f}MB")
141
-
163
+
142
164
  if data_size <= free_gpu_memory * 0.25:
143
165
  new_x = cp.array(x, dtype=dtype, copy=False)
144
- del x
145
- gc.collect()
146
166
  return new_x
147
167
 
148
168
  batch_size = get_optimal_batch_size_for_gpu(x, data_size)
149
- logging.debug(f"Using batch size: {batch_size}")
169
+ if batch_size == 0: batch_size = 1
170
+
171
+ loading_bar = initialize_loading_bar(total=len(x)/batch_size, desc='Transfering to GPU mem', ncols=70, bar_format=loading_bars()[0], leave=False)
150
172
 
151
- result = []
173
+ logging.debug(f"Using batch size: {batch_size}")
174
+ current_threshold = 0.75
152
175
  total_batches = (len(x) + batch_size - 1) // batch_size
176
+
177
+ sample_chunk = x[0:1]
178
+ sample_array = cp.array(sample_chunk, dtype=dtype)
179
+ chunk_shape = sample_array.shape[1:] if len(sample_array.shape) > 1 else ()
180
+ del sample_array
181
+ del sample_chunk
182
+ if chunk_shape:
183
+ total_shape = (len(x),) + chunk_shape
184
+ else:
185
+ total_shape = (len(x),)
153
186
 
187
+ del chunk_shape
188
+ chunks = cp.empty(total_shape, dtype=dtype)
189
+ del total_shape
190
+
154
191
  for i in range(0, len(x), batch_size):
155
192
  try:
156
193
  chunk = x[i:i + batch_size]
157
- gpu_chunk = cp.array(chunk, dtype=dtype)
158
- result.append(gpu_chunk)
159
-
194
+ chunk = cp.array(chunk, dtype=dtype)
195
+ chunks[i // batch_size] = chunk
160
196
  del chunk
197
+ pinned_mempool.free_all_blocks()
161
198
 
162
199
  if i > 0 and i % (batch_size * 5) == 0:
163
200
  pool = cp.get_default_memory_pool()
164
- if pool.used_bytes() > free_gpu_memory * 0.75:
201
+ current_threshold = adjust_gpu_memory_threshold(pool, free_gpu_memory, current_threshold)
202
+ if pool.used_bytes() > cp.cuda.runtime.memGetInfo()[0] * current_threshold:
165
203
  pool.free_all_blocks()
166
- gc.collect()
204
+
205
+
206
+ loading_bar.update(1)
167
207
 
168
208
  except cp.cuda.memory.OutOfMemoryError:
169
209
  logging.error(f"GPU out of memory at batch {i//batch_size + 1}/{total_batches}")
@@ -176,12 +216,10 @@ def transfer_to_gpu(x, dtype=cp.float32):
176
216
  raise
177
217
 
178
218
  try:
179
- final_result = cp.concatenate(result)
180
- del result
181
219
  del x
182
- gc.collect()
183
220
  cp.get_default_memory_pool().free_all_blocks()
184
- return final_result
221
+ pinned_mempool.free_all_blocks()
222
+ return chunks
185
223
 
186
224
  except Exception as e:
187
225
  logging.error(f"Error concatenating results: {str(e)}")
@@ -190,3 +228,71 @@ def transfer_to_gpu(x, dtype=cp.float32):
190
228
  except Exception as e:
191
229
  logging.error(f"Error in transfer_to_gpu: {str(e)}")
192
230
  raise
231
+
232
+ def adjust_gpu_memory_threshold(pool, free_gpu_memory, current_threshold=0.75, min_threshold=0.5, max_threshold=0.9):
233
+ used_memory = pool.used_bytes()
234
+ usage_ratio = used_memory / free_gpu_memory
235
+
236
+ if usage_ratio > current_threshold:
237
+ current_threshold = max(min_threshold, current_threshold - 0.05)
238
+ elif usage_ratio < current_threshold * 0.8:
239
+ current_threshold = min(max_threshold, current_threshold + 0.05)
240
+
241
+ return current_threshold
242
+
243
+
244
+ def optimize_labels(y, one_hot_encoded=True, cuda=False):
245
+ """
246
+ The function `optimize_labels` optimizes the data type of labels based on their length and encoding
247
+ format.
248
+
249
+ :param y: The `optimize_labels` function is designed to optimize the data type of the input labels
250
+ `y` based on certain conditions. The function checks if the labels are in one-hot encoded format or
251
+ not, and then based on the length of the labels and the specified data types (`uint8`, `uint
252
+ :param one_hot_encoded: The `one_hot_encoded` parameter in the `optimize_labels` function indicates
253
+ whether the labels are in one-hot encoded format or not. If `one_hot_encoded` is set to `True`, it
254
+ means that the labels are in one-hot encoded format, and the function will check the length of the,
255
+ defaults to True (optional)
256
+ :param cuda: The `cuda` parameter in the `optimize_labels` function is a boolean flag that indicates
257
+ whether to use CUDA for computations. If `cuda` is set to `True`, the function will use the CuPy
258
+ library for array operations, which can leverage GPU acceleration. If `cuda` is `False, defaults to
259
+ False (optional)
260
+ :return: The function `optimize_labels` returns the input array `y` after optimizing its data type
261
+ based on the specified conditions. If `one_hot_encoded` is True, it checks the length of the
262
+ elements in `y` and converts the data type to uint8, uint16, or uint32 accordingly. If
263
+ `one_hot_encoded` is False, it checks the length of `y` itself and
264
+ """
265
+
266
+ if cuda: array_type = cp
267
+ else: array_type = np
268
+
269
+ dtype_uint8 = array_type.uint8
270
+ dtype_uint16 = array_type.uint16
271
+ dtype_uint32 = array_type.uint32
272
+
273
+ if one_hot_encoded:
274
+ if len(y[0]) < 256:
275
+ if y.dtype != dtype_uint8:
276
+ y = array_type.array(y, copy=False).astype(dtype_uint8, copy=False)
277
+ elif len(y[0]) <= 32767:
278
+ if y.dtype != dtype_uint16:
279
+ y = array_type.array(y, copy=False).astype(dtype_uint16, copy=False)
280
+ else:
281
+ if y.dtype != dtype_uint32:
282
+ y = array_type.array(y, copy=False).astype(dtype_uint32, copy=False)
283
+
284
+ return y
285
+
286
+ else:
287
+
288
+ if len(y) < 256:
289
+ if y.dtype != dtype_uint8:
290
+ y = array_type.array(y, copy=False).astype(dtype_uint8, copy=False)
291
+ elif len(y) <= 32767:
292
+ if y.dtype != dtype_uint16:
293
+ y = array_type.array(y, copy=False).astype(dtype_uint16, copy=False)
294
+ else:
295
+ if y.dtype != dtype_uint32:
296
+ y = array_type.array(y, copy=False).astype(dtype_uint32, copy=False)
297
+
298
+ return y
@@ -80,7 +80,7 @@ def roc_curve(y_true, y_score):
80
80
  raise ValueError("Only binary classification is supported.")
81
81
 
82
82
 
83
- desc_score_indices = cp.argsort(y_score, kind="mergesort")[::-1]
83
+ desc_score_indices = cp.argsort(y_score, kind="stable")[::-1]
84
84
  y_score = y_score[desc_score_indices]
85
85
  y_true = y_true[desc_score_indices]
86
86
 
@@ -93,11 +93,10 @@ def roc_curve(y_true, y_score):
93
93
 
94
94
  tp = 0
95
95
  fp = 0
96
- prev_score = None
96
+ prev_score = 0
97
97
 
98
-
99
98
  for i, score in enumerate(y_score):
100
- if score != prev_score:
99
+ if score is not prev_score:
101
100
  fpr.append(fp / n_neg)
102
101
  tpr.append(tp / n_pos)
103
102
  thresholds.append(score)
@@ -17,7 +17,7 @@ def save_model(model_name,
17
17
  activation_potentiation=['linear'],
18
18
  weights_type='npy',
19
19
  weights_format='raw',
20
- show_architecture=None,
20
+ show_architecture=False,
21
21
  show_info=True
22
22
  ):
23
23
 
@@ -44,7 +44,7 @@ def save_model(model_name,
44
44
 
45
45
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
46
46
 
47
- show_architecture (str): It draws model architecture. Takes 2 value='basic' or 'detailed'. Default: None(not drawing)
47
+ show_architecture (bool): It draws model architecture. True or False. Default: False
48
48
 
49
49
  show_info (bool): Prints model details into console. default: True
50
50
 
@@ -170,8 +170,8 @@ def save_model(model_name,
170
170
 
171
171
  print(message)
172
172
 
173
- if show_architecture is not None:
174
- draw_model_architecture(model_name=model_name, model_path=model_path, style=show_architecture)
173
+ if show_architecture:
174
+ draw_model_architecture(model_name=model_name, model_path=model_path)
175
175
 
176
176
 
177
177
 
@@ -190,7 +190,6 @@ def load_model(model_name,
190
190
  Returns:
191
191
  lists: W(list[num]), activation_potentiation, DataFrame of the model
192
192
  """
193
- np.set_printoptions(threshold=np.Infinity)
194
193
 
195
194
  try:
196
195
 
@@ -1,4 +1,5 @@
1
1
  import cupy as cp
2
+ import numpy as np
2
3
  from colorama import Fore, Style
3
4
  import sys
4
5
  from datetime import datetime
@@ -44,7 +45,7 @@ def save_model(model_name,
44
45
 
45
46
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: ['linear']
46
47
 
47
- show_architecture (str): It draws model architecture. Takes 2 value='basic' or 'detailed'. Default: None(not drawing)
48
+ show_architecture (bool): It draws model architecture. True or False. Default: False
48
49
 
49
50
  show_info (bool): Prints model details into console. default: True
50
51
 
@@ -173,8 +174,8 @@ def save_model(model_name,
173
174
 
174
175
  print(message)
175
176
 
176
- if show_architecture is not None:
177
- draw_model_architecture(model_name=model_name, model_path=model_path, style=show_architecture)
177
+ if show_architecture:
178
+ draw_model_architecture(model_name=model_name, model_path=model_path)
178
179
 
179
180
 
180
181
 
@@ -213,12 +214,12 @@ def load_model(model_name,
213
214
  if scaler_params_cpu[0] == None: # model not scaled
214
215
  scaler_params = scaler_params_cpu[0]
215
216
 
216
- except:
217
+ except: # model scaled
218
+ scaler_params_cpu = [item for item in scaler_params_cpu if isinstance(item, np.ndarray)]
217
219
  scaler_params = cp.array(scaler_params_cpu)
220
+
218
221
  del scaler_params_cpu
219
222
  gc.collect()
220
- scaler_params = [item for item in scaler_params if isinstance(item, cp.ndarray)] # model scaled
221
-
222
223
 
223
224
  model_name = str(df['MODEL NAME'].iloc[0])
224
225
  WeightType = str(df['WEIGHTS TYPE'].iloc[0])
pyerualjetwork/plan.py CHANGED
@@ -15,6 +15,7 @@ ANAPLAN document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_Anaplan/
15
15
 
16
16
  import numpy as np
17
17
  from colorama import Fore
18
+ import math
18
19
 
19
20
  ### LIBRARY IMPORTS ###
20
21
  from .ui import loading_bars, initialize_loading_bar
@@ -25,8 +26,9 @@ from .metrics import metrics
25
26
  from .model_operations import get_acc, get_preds, get_preds_softmax
26
27
  from .visualizations import (
27
28
  draw_neural_web,
29
+ update_neural_web_for_fit,
28
30
  plot_evaluate,
29
- neuron_history,
31
+ update_neuron_history,
30
32
  initialize_visualization_for_fit,
31
33
  update_weight_visualization_for_fit,
32
34
  update_decision_boundary_for_fit,
@@ -34,7 +36,9 @@ from .visualizations import (
34
36
  display_visualization_for_fit,
35
37
  display_visualizations_for_learner,
36
38
  update_history_plots_for_learner,
37
- initialize_visualization_for_learner
39
+ initialize_visualization_for_learner,
40
+ update_neuron_history_for_learner,
41
+ show
38
42
  )
39
43
 
40
44
  ### GLOBAL VARIABLES ###
@@ -98,20 +102,13 @@ def fit(
98
102
  Returns:
99
103
  numpyarray([num]): (Weight matrix).
100
104
  """
105
+
106
+ from model_operations import get_acc
107
+
101
108
  # Pre-checks
102
109
 
103
110
  x_train = x_train.astype(dtype, copy=False)
104
111
 
105
- if len(y_train[0]) < 256:
106
- if y_train.dtype != np.uint8:
107
- y_train = np.array(y_train, copy=False).astype(np.uint8, copy=False)
108
- elif len(y_train[0]) <= 32767:
109
- if y_train.dtype != np.uint16:
110
- y_train = np.array(y_train, copy=False).astype(np.uint16, copy=False)
111
- else:
112
- if y_train.dtype != np.uint32:
113
- y_train = np.array(y_train, copy=False).astype(np.uint32, copy=False)
114
-
115
112
  if train_bar and val:
116
113
  train_progress = initialize_loading_bar(total=len(x_train), ncols=71, desc='Fitting', bar_format=bar_format_normal)
117
114
  elif train_bar and val == False:
@@ -137,18 +134,25 @@ def fit(
137
134
 
138
135
  # Training process
139
136
  for index, inp in enumerate(x_train):
140
- inp = np.array(inp, copy=False, dtype=dtype).ravel()
137
+ inp = np.array(inp, copy=False).ravel()
141
138
  y_decoded = decode_one_hot(y_train)
142
139
  # Weight updates
143
140
  STPW = feed_forward(inp, STPW, is_training=True, Class=y_decoded[index], activation_potentiation=activation_potentiation, LTD=LTD)
144
141
  LTPW += normalization(STPW, dtype=dtype) if auto_normalization else STPW
145
-
146
- # Visualization updates
147
- if show_training:
148
- update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
149
- if decision_boundary_status:
150
- update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
151
- update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
142
+ if val and index != 0:
143
+ if index % math.ceil((val_count / len(x_train)) * 100) == 0:
144
+ val_acc = evaluate(x_val, y_val, loading_bar_status=False, activation_potentiation=activation_potentiation, W=LTPW)[get_acc()]
145
+ val_list.append(val_acc)
146
+
147
+ # Visualization updates
148
+ if show_training:
149
+ update_weight_visualization_for_fit(vis_objects['ax'][0, 0], LTPW, vis_objects['artist2'])
150
+ if decision_boundary_status:
151
+ update_decision_boundary_for_fit(vis_objects['ax'][0, 1], x_val, y_val, activation_potentiation, LTPW, vis_objects['artist1'])
152
+ update_validation_history_for_fit(vis_objects['ax'][1, 1], val_list, vis_objects['artist3'])
153
+ update_neural_web_for_fit(W=LTPW, G=vis_objects['G'], ax=vis_objects['ax'][1, 0], artist=vis_objects['artist4'])
154
+ if neurons_history:
155
+ update_neuron_history(LTPW, row=vis_objects['row'], col=vis_objects['col'], class_count=len(y_train[0]), fig1=vis_objects['fig1'], ax1=vis_objects['ax1'], artist5=vis_objects['artist5'], acc=val_acc)
152
156
  if train_bar:
153
157
  train_progress.update(1)
154
158
 
@@ -156,7 +160,15 @@ def fit(
156
160
 
157
161
  # Finalize visualization
158
162
  if show_training:
159
- display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
163
+ ani1 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist1'], interval)
164
+ ani2 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist2'], interval)
165
+ ani3 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist3'], interval)
166
+ ani4 = display_visualization_for_fit(vis_objects['fig'], vis_objects['artist4'], interval)
167
+ show()
168
+
169
+ if neurons_history:
170
+ ani5 = display_visualization_for_fit(vis_objects['fig1'], vis_objects['artist5'], interval)
171
+ show()
160
172
 
161
173
  return normalization(LTPW, dtype=dtype)
162
174
 
@@ -223,7 +235,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
223
235
  tuple: A list for model parameters: [Weight matrix, Test loss, Test Accuracy, [Activations functions]].
224
236
 
225
237
  """
226
- print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular', 'spiral'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
238
+ print(Fore.WHITE + "\nRemember, optimization on large datasets can be very time-consuming and computationally expensive. Therefore, if you are working with such a dataset, our recommendation is to include activation function: ['circular'] in the 'except_this' parameter unless absolutely necessary, as they can significantly prolong the process. from: learner\n" + Fore.RESET)
227
239
 
228
240
  activation_potentiation = all_activations()
229
241
 
@@ -420,7 +432,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
420
432
 
421
433
  if neurons_history:
422
434
  viz_objects['neurons']['artists'] = (
423
- neuron_history(np.copy(best_weights), viz_objects['neurons']['ax'],
435
+ update_neuron_history_for_learner(np.copy(best_weights), viz_objects['neurons']['ax'],
424
436
  viz_objects['neurons']['row'], viz_objects['neurons']['col'],
425
437
  y_train[0], viz_objects['neurons']['artists'],
426
438
  data=data, fig1=viz_objects['neurons']['fig'],