pyerualjetwork 4.0.3b0__py3-none-any.whl → 4.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -46,7 +46,7 @@ for package_name in package_names:
46
46
 
47
47
  print(f"PyerualJetwork is ready to use with {err} errors")
48
48
 
49
- __version__ = "4.0.3b0"
49
+ __version__ = "4.0.4"
50
50
  __update__ = "* Note: CUDA modules need cupy. Enter this command in your terminal: 'pip install cupy-cuda12x' or your cuda version.\n* Changes: https://github.com/HCB06/PyerualJetwork/blob/main/CHANGES\n* PyerualJetwork document: https://github.com/HCB06/Anaplan/blob/main/Welcome_to_PyerualJetwork/PYERUALJETWORK_USER_MANUEL_AND_LEGAL_INFORMATION(EN).pdf\n* YouTube tutorials: https://www.youtube.com/@HasanCanBeydili"
51
51
 
52
52
  def print_version(__version__):
@@ -1,5 +1,4 @@
1
1
  import cupy as cp
2
- from scipy.special import expit, softmax
3
2
 
4
3
  # ACTIVATION FUNCTIONS ----
5
4
 
@@ -10,210 +9,207 @@ def all_activations():
10
9
  return activations_list
11
10
 
12
11
  def spiral_activation(x):
12
+ if x.ndim == 1:
13
+ r = cp.sqrt(cp.sum(x**2))
14
+ theta = cp.arctan2(x[1], x[0])
13
15
 
14
- r = cp.sqrt(cp.sum(x**2))
15
-
16
- theta = cp.arctan2(x[1:], x[:-1])
17
-
18
- spiral_x = r * cp.cos(theta + r)
19
- spiral_y = r * cp.sin(theta + r)
20
-
21
-
22
- spiral_output = cp.concatenate(([spiral_x[0]], spiral_y))
23
-
24
- return spiral_output
16
+ spiral_x = r * cp.cos(theta + r)
17
+ spiral_y = r * cp.sin(theta + r)
25
18
 
19
+ spiral_output = cp.array([spiral_x, spiral_y])
20
+ else:
21
+ r = cp.sqrt(cp.sum(x**2, axis=-1))
22
+ theta = cp.arctan2(x[:, 1], x[:, 0])
26
23
 
27
- def Softmax(
28
- x # num: Input data to be transformed using softmax function.
29
- ):
30
- """
31
- Applies the softmax function to the input data.
24
+ spiral_x = r * cp.cos(theta + r)
25
+ spiral_y = r * cp.sin(theta + r)
32
26
 
33
- Args:
34
- (num): Input data to be transformed using softmax function.
35
-
36
- Returns:
37
- (num): Transformed data after applying softmax function.
38
- """
39
-
40
- return cp.array(softmax(x.get()))
27
+ spiral_output = cp.stack((spiral_x, spiral_y), axis=-1)
41
28
 
42
-
43
- def Sigmoid(
44
- x # num: Input data to be transformed using sigmoid function.
45
- ):
46
- """
47
- Applies the sigmoid function to the input data.
48
-
49
- Args:
50
- (num): Input data to be transformed using sigmoid function.
51
-
52
- Returns:
53
- (num): Transformed data after applying sigmoid function.
54
- """
55
- return expit(x)
29
+ return spiral_output
56
30
 
57
31
 
58
- def Relu(
59
- x # num: Input data to be transformed using ReLU function.
60
- ):
61
- """
62
- Applies the Rectified Linear Unit (ReLU) function to the input data.
32
+ def Softmax(x):
33
+ """Optimized Softmax function"""
34
+ return cp.array(cp.exp(x - cp.max(x, axis=-1, keepdims=True)) / cp.sum(cp.exp(x - cp.max(x, axis=-1, keepdims=True)), axis=-1, keepdims=True))
63
35
 
64
- Args:
65
- (num): Input data to be transformed using ReLU function.
66
-
67
- Returns:
68
- (num): Transformed data after applying ReLU function.
69
- """
36
+ def Sigmoid(x):
37
+ """Optimized Sigmoid function"""
38
+ return 1 / (1 + cp.exp(-x))
70
39
 
40
+ def Relu(x):
41
+ """Optimized ReLU function"""
71
42
  return cp.maximum(0, x)
72
43
 
73
-
74
44
  def tanh(x):
45
+ """Optimized Tanh function"""
75
46
  return cp.tanh(x)
76
47
 
77
48
  def swish(x):
78
- return x * (1 / (1 + cp.exp(-x)))
49
+ """Optimized Swish function"""
50
+ return x * Sigmoid(x)
79
51
 
80
52
  def sin_plus(x):
53
+ """Optimized SinPlus function"""
81
54
  return (cp.sin(x) + 1) / 2
82
55
 
83
56
  def modular_circular_activation(x, period=2*cp.pi):
57
+ """Optimized Modular Circular Activation function"""
84
58
  return cp.mod(x, period) / period
85
59
 
86
60
  def tanh_circular_activation(x):
61
+ """Optimized Tanh Circular Activation function"""
87
62
  return (cp.tanh(x) + 1) / 2
88
63
 
89
64
  def leaky_relu(x, alpha=0.01):
65
+ """Optimized Leaky ReLU function"""
90
66
  return cp.where(x > 0, x, alpha * x)
91
67
 
92
68
  def softplus(x):
93
- return cp.log(1 + cp.exp(x))
69
+ """Optimized Softplus function"""
70
+ return cp.log1p(cp.exp(x))
94
71
 
95
72
  def elu(x, alpha=1.0):
73
+ """Optimized ELU function"""
96
74
  return cp.where(x > 0, x, alpha * (cp.exp(x) - 1))
97
75
 
98
76
  def gelu(x):
77
+ """Optimized GELU function"""
99
78
  return 0.5 * x * (1 + cp.tanh(cp.sqrt(2 / cp.pi) * (x + 0.044715 * cp.power(x, 3))))
100
79
 
101
80
  def selu(x, lambda_=1.0507, alpha=1.6733):
81
+ """Optimized SELU function"""
102
82
  return lambda_ * cp.where(x > 0, x, alpha * (cp.exp(x) - 1))
103
83
 
104
84
  def sinakt(x):
85
+ """Optimized SinAkt function"""
105
86
  return cp.sin(x) + cp.cos(x)
106
87
 
107
88
  def p_squared(x, alpha=1.0, beta=0.0):
89
+ """Optimized P-squared function"""
108
90
  return alpha * x**2 + beta * x
109
91
 
110
92
  def sglu(x, alpha=1.0):
111
- return cp.array(softmax(alpha * x.get())) * x
93
+ """Optimized SGU function"""
94
+ return cp.array(cp.exp(alpha * x)) * x
112
95
 
113
- # 4. Double Leaky ReLU (DLReLU)
114
96
  def dlrelu(x):
97
+ """Optimized Double Leaky ReLU (DLReLU) function"""
115
98
  return cp.maximum(0.01 * x, x) + cp.minimum(0.01 * x, 0.1 * x)
116
99
 
117
- # 5. Exponential Sigmoid (ExSig)
118
100
  def exsig(x):
101
+ """Optimized Exponential Sigmoid (ExSig) function"""
119
102
  return 1 / (1 + cp.exp(-x**2))
120
103
 
121
- # 6. Adaptive Cosine Activation (ACos)
122
104
  def acos(x, alpha=1.0, beta=0.0):
105
+ """Optimized Adaptive Cosine Activation (ACos) function"""
123
106
  return cp.cos(alpha * x + beta)
124
107
 
125
- # 7. Gaussian-like Activation (GLA)
126
108
  def gla(x, alpha=1.0, mu=0.0):
109
+ """Optimized Gaussian-like Activation (GLA) function"""
127
110
  return cp.exp(-alpha * (x - mu)**2)
128
111
 
129
- # 8. Swish ReLU (SReLU)
130
112
  def srelu(x):
113
+ """Optimized Swish ReLU (SReLU) function"""
131
114
  return x * (1 / (1 + cp.exp(-x))) + cp.maximum(0, x)
132
115
 
133
- # 9. Quadratic Exponential Linear Unit (QELU)
134
116
  def qelu(x):
117
+ """Optimized Quadratic Exponential Linear Unit (QELU) function"""
135
118
  return x**2 * cp.exp(x) - 1
136
119
 
137
- # 10. Inverse Square Root Activation (ISRA)
138
120
  def isra(x):
121
+ """Optimized Inverse Square Root Activation (ISRA) function"""
139
122
  return x / cp.sqrt(cp.abs(x) + 1)
140
123
 
141
124
  def waveakt(x, alpha=1.0, beta=2.0, gamma=3.0):
125
+ """Optimized Wave Activation function"""
142
126
  return cp.sin(alpha * x) * cp.cos(beta * x) * cp.sin(gamma * x)
143
127
 
144
128
  def arctan(x):
129
+ """Optimized Arctan function"""
145
130
  return cp.arctan(x)
146
131
 
147
132
  def bent_identity(x):
133
+ """Optimized Bent Identity function"""
148
134
  return (cp.sqrt(x**2 + 1) - 1) / 2 + x
149
135
 
150
- def circular_activation(x, scale=2.0, frequency=1.0, shift=0.0):
151
-
136
+ def circular_activation(x, scale=2.0, frequency=1.0, shift=0.0):
137
+ """Optimized Circular Activation function"""
152
138
  n_features = x.shape[0]
153
-
154
139
  circular_output = cp.zeros_like(x)
155
140
 
141
+ r = cp.sqrt(cp.sum(x**2))
156
142
  for i in range(n_features):
157
-
158
- r = cp.sqrt(cp.sum(x**2))
159
143
  theta = 2 * cp.pi * (i / n_features) + shift
160
-
161
144
  circular_x = r * cp.cos(theta + frequency * r) * scale
162
145
  circular_y = r * cp.sin(theta + frequency * r) * scale
163
146
 
164
- if i % 2 == 0:
165
- circular_output[i] = circular_x
166
- else:
167
- circular_output[i] = circular_y
147
+ circular_output[i] = circular_x if i % 2 == 0 else circular_y
168
148
 
169
149
  return circular_output
170
150
 
171
151
  def sech(x):
152
+ """Optimized Sech function"""
172
153
  return 2 / (cp.exp(x) + cp.exp(-x))
173
154
 
174
155
  def softsign(x):
156
+ """Optimized Softsign function"""
175
157
  return x / (1 + cp.abs(x))
176
158
 
177
159
  def pwl(x, alpha=0.5, beta=1.5):
160
+ """Optimized Piecewise Linear function (PWL)"""
178
161
  return cp.where(x <= 0, alpha * x, beta * x)
179
162
 
180
163
  def cubic(x):
164
+ """Optimized Cubic function"""
181
165
  return x**3
182
166
 
183
167
  def gaussian(x, alpha=1.0, mu=0.0):
168
+ """Optimized Gaussian function"""
184
169
  return cp.exp(-alpha * (x - mu)**2)
185
-
170
+
186
171
  def sine(x, alpha=1.0):
172
+ """Optimized Sine function"""
187
173
  return cp.sin(alpha * x)
188
174
 
189
175
  def tanh_square(x):
176
+ """Optimized Tanh Square function"""
190
177
  return cp.tanh(x)**2
191
178
 
192
179
  def mod_sigmoid(x, alpha=1.0, beta=0.0):
180
+ """Optimized Modified Sigmoid function"""
193
181
  return 1 / (1 + cp.exp(-alpha * x + beta))
194
182
 
195
183
  def quartic(x):
184
+ """Optimized Quartic function"""
196
185
  return x**4
197
186
 
198
187
  def square_quartic(x):
188
+ """Optimized Square Quartic function"""
199
189
  return (x**2)**2
200
190
 
201
191
  def cubic_quadratic(x):
192
+ """Optimized Cubic Quadratic function"""
202
193
  return x**3 * (x**2)
203
194
 
204
195
  def exp_cubic(x):
196
+ """Optimized Exponential Cubic function"""
205
197
  return cp.exp(x**3)
206
198
 
207
199
  def sine_square(x):
200
+ """Optimized Sine Square function"""
208
201
  return cp.sin(x)**2
209
202
 
210
203
  def logarithmic(x):
204
+ """Optimized Logarithmic function"""
211
205
  return cp.log(x**2 + 1)
212
206
 
213
207
  def scaled_cubic(x, alpha=1.0):
208
+ """Optimized Scaled Cubic function"""
214
209
  return alpha * x**3
215
210
 
216
211
  def sine_offset(x, beta=0.0):
212
+ """Optimized Sine Offset function"""
217
213
  return cp.sin(x + beta)
218
214
 
219
215
  def apply_activation(Input, activation_list):
@@ -221,7 +217,7 @@ def apply_activation(Input, activation_list):
221
217
  Applies a sequence of activation functions to the input.
222
218
 
223
219
  Args:
224
- Input (array-like): The input to apply activations to.
220
+ Input (numpy.ndarray): The input to apply activations to.
225
221
  activation_list (list): A list of activation function names to apply.
226
222
 
227
223
  Returns:
@@ -1,79 +1,54 @@
1
1
  import cupy as cp
2
+ from .data_operations_cuda import decode_one_hot
2
3
 
3
4
  def metrics(y_ts, test_preds, average='weighted'):
4
- """
5
- Calculates precision, recall and F1 score for a classification task.
6
-
7
- Args:
8
- y_ts (list or numpy.ndarray): True labels.
9
- test_preds (list or numpy.ndarray): Predicted labels.
10
- average (str): Type of averaging ('micro', 'macro', 'weighted').
11
-
12
- Returns:
13
- tuple: Precision, recall, F1 score.
14
- """
15
-
16
- from .data_operations_cuda import decode_one_hot
17
-
18
- y_test_d = decode_one_hot(y_ts)
19
- y_test_d = cp.array(y_test_d)
5
+ y_test_d = cp.array(decode_one_hot(y_ts))
20
6
  y_pred = cp.array(test_preds)
21
7
 
22
8
  if y_test_d.ndim > 1:
23
- y_test_d = y_test_d.reshape(-1)
9
+ y_test_d = y_test_d.ravel()
24
10
  if y_pred.ndim > 1:
25
- y_pred = y_pred.reshape(-1)
26
-
27
- tp = {}
28
- fp = {}
29
- fn = {}
11
+ y_pred = y_pred.ravel()
30
12
 
31
13
  classes = cp.unique(cp.concatenate((y_test_d, y_pred)))
14
+ tp = cp.zeros(len(classes), dtype=cp.int32)
15
+ fp = cp.zeros(len(classes), dtype=cp.int32)
16
+ fn = cp.zeros(len(classes), dtype=cp.int32)
32
17
 
33
- for c in classes:
34
- tp[c] = 0
35
- fp[c] = 0
36
- fn[c] = 0
37
-
38
- for c in classes:
39
- for true, pred in zip(y_test_d, y_pred):
40
- if true == c and pred == c:
41
- tp[c] += 1
42
- elif true != c and pred == c:
43
- fp[c] += 1
44
- elif true == c and pred != c:
45
- fn[c] += 1
46
-
47
- precision = {}
48
- recall = {}
49
- f1 = {}
50
-
51
- for c in classes:
52
- precision[c] = tp[c] / (tp[c] + fp[c]) if (tp[c] + fp[c]) > 0 else 0
53
- recall[c] = tp[c] / (tp[c] + fn[c]) if (tp[c] + fn[c]) > 0 else 0
54
- f1[c] = 2 * (precision[c] * recall[c]) / (precision[c] + recall[c]) if (precision[c] + recall[c]) > 0 else 0
18
+ for i, c in enumerate(classes):
19
+ tp[i] = cp.sum((y_test_d == c) & (y_pred == c))
20
+ fp[i] = cp.sum((y_test_d != c) & (y_pred == c))
21
+ fn[i] = cp.sum((y_test_d == c) & (y_pred != c))
22
+
23
+ precision = tp / (tp + fp + 1e-10)
24
+ recall = tp / (tp + fn + 1e-10)
25
+ f1 = 2 * (precision * recall) / (precision + recall + 1e-10)
55
26
 
56
27
  if average == 'micro':
57
- precision_val = cp.sum(list(tp.values())) / (cp.sum(list(tp.values())) + cp.sum(list(fp.values()))) if (cp.sum(list(tp.values())) + cp.sum(list(fp.values()))) > 0 else 0
58
- recall_val = cp.sum(list(tp.values())) / (cp.sum(list(tp.values())) + cp.sum(list(fn.values()))) if (cp.sum(list(tp.values())) + cp.sum(list(fn.values()))) > 0 else 0
59
- f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val) if (precision_val + recall_val) > 0 else 0
28
+ tp_sum = cp.sum(tp)
29
+ fp_sum = cp.sum(fp)
30
+ fn_sum = cp.sum(fn)
31
+ precision_val = tp_sum / (tp_sum + fp_sum + 1e-10)
32
+ recall_val = tp_sum / (tp_sum + fn_sum + 1e-10)
33
+ f1_val = 2 * (precision_val * recall_val) / (precision_val + recall_val + 1e-10)
60
34
 
61
35
  elif average == 'macro':
62
- precision_val = cp.mean(list(precision.values()))
63
- recall_val = cp.mean(list(recall.values()))
64
- f1_val = cp.mean(list(f1.values()))
36
+ precision_val = cp.mean(precision)
37
+ recall_val = cp.mean(recall)
38
+ f1_val = cp.mean(f1)
65
39
 
66
40
  elif average == 'weighted':
67
41
  weights = cp.array([cp.sum(y_test_d == c) for c in classes])
68
42
  weights = weights / cp.sum(weights)
69
- precision_val = cp.sum([weights[i] * precision[classes[i]] for i in range(len(classes))])
70
- recall_val = cp.sum([weights[i] * recall[classes[i]] for i in range(len(classes))])
71
- f1_val = cp.sum([weights[i] * f1[classes[i]] for i in range(len(classes))])
43
+ precision_val = cp.sum(weights * precision)
44
+ recall_val = cp.sum(weights * recall)
45
+ f1_val = cp.sum(weights * f1)
72
46
 
73
47
  else:
74
48
  raise ValueError("Invalid value for 'average'. Choose from 'micro', 'macro', 'weighted'.")
75
49
 
76
- return precision_val, recall_val, f1_val
50
+ return precision_val.item(), recall_val.item(), f1_val.item()
51
+
77
52
 
78
53
 
79
54
  def roc_curve(y_true, y_score):
@@ -190,7 +190,6 @@ def load_model(model_name,
190
190
  Returns:
191
191
  lists: W(list[num]), activation_potentiation, DataFrame of the model
192
192
  """
193
- np.set_printoptions(threshold=np.Infinity)
194
193
 
195
194
  try:
196
195
 
pyerualjetwork/plan.py CHANGED
@@ -17,13 +17,13 @@ import numpy as np
17
17
  from colorama import Fore
18
18
 
19
19
  ### LIBRARY IMPORTS ###
20
- from ui import loading_bars, initialize_loading_bar
21
- from data_operations import normalization, decode_one_hot, batcher
22
- from loss_functions import binary_crossentropy, categorical_crossentropy
23
- from activation_functions import apply_activation, Softmax, all_activations
24
- from metrics import metrics
25
- from model_operations import get_acc, get_preds, get_preds_softmax
26
- from visualizations import (
20
+ from .ui import loading_bars, initialize_loading_bar
21
+ from .data_operations import normalization, decode_one_hot, batcher
22
+ from .loss_functions import binary_crossentropy, categorical_crossentropy
23
+ from .activation_functions import apply_activation, Softmax, all_activations
24
+ from .metrics import metrics
25
+ from .model_operations import get_acc, get_preds, get_preds_softmax
26
+ from .visualizations import (
27
27
  draw_neural_web,
28
28
  plot_evaluate,
29
29
  neuron_history,
@@ -500,11 +500,11 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
500
500
 
501
501
 
502
502
  def feed_forward(
503
- Input, # list[num]: Input data.
503
+ Input, # num: Input data.
504
504
  w, # num: Weight matrix of the neural network.
505
505
  is_training, # bool: Flag indicating if the function is called during training (True or False).
506
- activation_potentiation,
507
- Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
506
+ activation_potentiation, # (list): Activation potentiation list for deep PLAN. (optional)
507
+ Class='?', # int: Which class is, if training.
508
508
  LTD=0
509
509
  ) -> tuple:
510
510
  """
@@ -515,7 +515,7 @@ def feed_forward(
515
515
  w (num): Weight matrix of the neural network.
516
516
  is_training (bool): Flag indicating if the function is called during training (True or False).
517
517
  Class (int): if is during training then which class(label) ? is isnt then put None.
518
- # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
518
+ activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
519
519
 
520
520
  Returns:
521
521
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
@@ -64,9 +64,9 @@ def fit(
64
64
 
65
65
  fit Args:
66
66
 
67
- x_train (list[num]): List or numarray of input data.
67
+ x_train (list[cupy-array]): List or numarray of input data.
68
68
 
69
- y_train (list[num]): List or numarray of target labels. (one hot encoded)
69
+ y_train (list[cupy-array]): List or numarray of target labels. (one hot encoded)
70
70
 
71
71
  val (None or True): validation in training process ? None or True default: None (optional)
72
72
 
@@ -74,9 +74,9 @@ def fit(
74
74
 
75
75
  activation_potentiation (list): For deeper PLAN networks, activation function parameters. For more information please run this code: plan.activations_list() default: [None] (optional)
76
76
 
77
- x_val (list[num]): List of validation data. default: x_train (optional)
77
+ x_val (list[cupy-array]): List of validation data. default: x_train (optional)
78
78
 
79
- y_val (list[num]): (list[num]): List of target labels. (one hot encoded) default: y_train (optional)
79
+ y_val (list[cupy-array]): List of target labels. (one hot encoded) default: y_train (optional)
80
80
 
81
81
  show_training (bool, str): True or None default: None (optional)
82
82
 
@@ -251,7 +251,7 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
251
251
  if batch_size == 1:
252
252
  ncols = 100
253
253
  else:
254
- ncols = 140
254
+ ncols = 120
255
255
  progress = initialize_loading_bar(total=len(activation_potentiation), desc="", ncols=ncols, bar_format=bar_format_learner)
256
256
 
257
257
  # Initialize variables
@@ -506,22 +506,22 @@ def learner(x_train, y_train, x_test=None, y_test=None, strategy='accuracy', bat
506
506
 
507
507
 
508
508
  def feed_forward(
509
- Input, # list[num]: Input data.
510
- w, # num: Weight matrix of the neural network.
509
+ Input, # cupy-array: Input data.
510
+ w, # cupy-array: Weight matrix of the neural network.
511
511
  is_training, # bool: Flag indicating if the function is called during training (True or False).
512
- activation_potentiation,
513
- Class='?', # int: Which class is, if training. # (list): Activation potentiation list for deep PLAN. (optional)
512
+ activation_potentiation, # (list): Activation potentiation list for deep PLAN. (optional)
513
+ Class='?', # int: Which class is, if training.
514
514
  LTD=0
515
515
  ) -> tuple:
516
516
  """
517
517
  Applies feature extraction process to the input data using synaptic potentiation.
518
518
 
519
519
  Args:
520
- Input (num): Input data.
521
- w (num): Weight matrix of the neural network.
520
+ Input (cupy-array): Input data.
521
+ w (cupy-array): Weight matrix of the neural network.
522
522
  is_training (bool): Flag indicating if the function is called during training (True or False).
523
523
  Class (int): if is during training then which class(label) ? is isnt then put None.
524
- # activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
524
+ activation_potentiation (list): ac list for deep PLAN. default: [None] ('linear') (optional)
525
525
 
526
526
  Returns:
527
527
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
@@ -563,9 +563,9 @@ def evaluate(
563
563
  Evaluates the neural network model with the given test data.
564
564
 
565
565
  Args:
566
- x_test (array-like): Test input data.
567
- y_test (array-like): Test labels.
568
- W (list[array-like]): Neural network weight matrix.
566
+ x_test (cupy-array): Test input data.
567
+ y_test (cupy-array): Test labels.
568
+ W (list[cupy-array]): Neural network weight matrix.
569
569
  activation_potentiation (list): Activation functions.
570
570
  loading_bar_status (bool): Loading bar status (optional).
571
571
  show_metrics (bool): Option to display metrics (optional).
@@ -574,9 +574,6 @@ def evaluate(
574
574
  tuple: model.
575
575
  """
576
576
 
577
- x_test = cp.array(x_test, copy=False)
578
- y_test = cp.array(y_test, copy=False)
579
-
580
577
  predict_probabilitys = cp.empty((len(x_test), W.shape[0]), dtype=cp.float32)
581
578
  real_classes = cp.empty(len(x_test), dtype=cp.int32)
582
579
  predict_classes = cp.empty(len(x_test), dtype=cp.int32)
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: pyerualjetwork
3
- Version: 4.0.3b0
3
+ Version: 4.0.4
4
4
  Summary: PyerualJetwork is a machine learning library written in Python for professionals, incorporating advanced, unique, new, and modern techniques.
5
5
  Author: Hasan Can Beydili
6
6
  Author-email: tchasancan@gmail.com
7
7
  Keywords: model evaluation,classification,potentiation learning artificial neural networks,NEAT,genetic algorithms,reinforcement learning,neural networks
8
8
  Description-Content-Type: text/markdown
9
9
 
10
- # PyerualJetwork [![Socket Badge](https://socket.dev/api/badge/pypi/package/anaplan/2.5.0?artifact_id=tar-gz)](https://socket.dev/pypi/package/anaplan/overview/2.5.0/tar-gz) [![CodeFactor](https://www.codefactor.io/repository/github/hcb06/anaplan/badge)](https://www.codefactor.io/repository/github/hcb06/anaplan) [![PyPI Downloads](https://static.pepy.tech/badge/anaplan)](https://pepy.tech/projects/anaplan) + [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork)](https://pepy.tech/projects/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/anaplan/month)](https://pepy.tech/projects/anaplan) [![PyPI Downloads](https://static.pepy.tech/badge/anaplan/week)](https://pepy.tech/projects/anaplan) [![PyPI version](https://img.shields.io/pypi/v/pyerualjetwork.svg)](https://pypi.org/project/pyerualjetwork/)
10
+ # PyerualJetwork [![Socket Badge](https://socket.dev/api/badge/pypi/package/pyerualjetwork/4.0.4?artifact_id=tar-gz)](https://socket.dev/pypi/package/pyerualjetwork/overview/4.0.4/tar-gz) [![CodeFactor](https://www.codefactor.io/repository/github/hcb06/pyerualjetwork/badge)](https://www.codefactor.io/repository/github/hcb06/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/anaplan)](https://pepy.tech/projects/anaplan) + [![PyPI Downloads](https://static.pepy.tech/badge/pyerualjetwork)](https://pepy.tech/projects/pyerualjetwork) [![PyPI Downloads](https://static.pepy.tech/badge/anaplan/month)](https://pepy.tech/projects/anaplan) [![PyPI Downloads](https://static.pepy.tech/badge/anaplan/week)](https://pepy.tech/projects/anaplan) [![PyPI version](https://img.shields.io/pypi/v/pyerualjetwork.svg)](https://pypi.org/project/pyerualjetwork/)
11
11
 
12
12
  Note: anaplan old name of pyerualjetwork
13
13
 
@@ -1,23 +1,23 @@
1
- pyerualjetwork/__init__.py,sha256=AqFnWvlLN33ns13c-E0bw_fld7lXv64WqLwU1tfr8AY,2479
1
+ pyerualjetwork/__init__.py,sha256=fRW4zgPaGddL4vn1jz7QKAPi9tUzaOiEmKGTYuvvG8k,2477
2
2
  pyerualjetwork/activation_functions.py,sha256=iJpdsX8FqZ3lB3x-YG7d9-em8xHD0y1ciJLNWmI7Y6A,9941
3
- pyerualjetwork/activation_functions_cuda.py,sha256=7p-_qZuuj-BZ9A7ds8PgU7hSQ_EGI16XLh4J_6ySkD8,9968
3
+ pyerualjetwork/activation_functions_cuda.py,sha256=8wV31USrS-9BGI388Ntya10HCucNkV-zm5EH0YL2iRw,10896
4
4
  pyerualjetwork/data_operations.py,sha256=mph66_qGQHxhg_gQtTuOzP2PjTwJsxTGzmRmvrzlQn4,12747
5
5
  pyerualjetwork/data_operations_cuda.py,sha256=Iy4i5tCZidsnSbyRSX8IuzUMLTDrlz8Gc9nji_RRak0,13642
6
6
  pyerualjetwork/help.py,sha256=pZs7hIhgFkovGLle97d9Qu9m5zKhMh7-OAIphIoSxBg,830
7
7
  pyerualjetwork/loss_functions.py,sha256=6PyBI232SQRGuFnG3LDGvnv_PUdWzT2_2mUODJiejGI,618
8
8
  pyerualjetwork/loss_functions_cuda.py,sha256=C93IZJcrOpT6HMK9x1O4AHJWXYTkN5WZiqdssPbvAPk,617
9
9
  pyerualjetwork/metrics.py,sha256=q7MkhnZDRbCjFBDDfUgrl8lBYnUT_1ro1LxeBq105pI,6077
10
- pyerualjetwork/metrics_cuda.py,sha256=1KKIJunalYfj7OC7AJDXmK4wANrMnyJe_bvbUmhgl_Q,6081
11
- pyerualjetwork/model_operations.py,sha256=eXFUVZUO6vf_uO4auevWzne1RYSvD6Efz_IdH77DGZc,11980
10
+ pyerualjetwork/metrics_cuda.py,sha256=TCwn5Z_4jQjqPCURX_xtcz9cjsYVzlahgKDA-qCgpU4,5072
11
+ pyerualjetwork/model_operations.py,sha256=d1cOuKBYiVMXKrWLJ6zy6rkfVCadzQGNw6OjqwWtBhA,11932
12
12
  pyerualjetwork/model_operations_cuda.py,sha256=CAnHj8EQuz2p2oFYcqaa9Z-yJX70rLnFrBkh2sQwrYY,12168
13
- pyerualjetwork/plan.py,sha256=_AduKIXKH_pkQXRAx_yTV9g7dnKuZvV8cgDg740W6Vw,31525
14
- pyerualjetwork/plan_cuda.py,sha256=gbOw_wXR5gXEoPUjAES2DB3WZ9xB-miXgFWr6i9zd4Q,31307
13
+ pyerualjetwork/plan.py,sha256=vfN_HIwU8NepZuB_UKY6nPickOBYWKXd_uymhaTLEoI,31525
14
+ pyerualjetwork/plan_cuda.py,sha256=vUH6-l-TnxcgIZ704w4_t1gKegTlt4nRTP9SKlGlYRY,31255
15
15
  pyerualjetwork/planeat.py,sha256=3l4c-sMqTY6mQvW9u2OarcccUYcMxqASQXgx1GjNZSA,38061
16
16
  pyerualjetwork/planeat_cuda.py,sha256=zkXkvdHSYgzV2BSwtpUuUXB6_WbYb_EPL06OfBmRk9w,38094
17
17
  pyerualjetwork/ui.py,sha256=wu2BhU1k-w3Kcho5Jtq4SEKe68ftaUeRGneUOSCVDjU,575
18
18
  pyerualjetwork/visualizations.py,sha256=DvbiQGlvlKNAgBJ3O3ukAi6uxSheha9SRFh5YX7ZxIA,26678
19
19
  pyerualjetwork/visualizations_cuda.py,sha256=dA0u85ZIyKqjtoSJ6p3EbEpJs4V4vS5W5ftR6eif8yg,26713
20
- pyerualjetwork-4.0.3b0.dist-info/METADATA,sha256=Dol32fciutemeylwk72ivNGHvZ34w0S8eEcRJ_5qNW4,6303
21
- pyerualjetwork-4.0.3b0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
22
- pyerualjetwork-4.0.3b0.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
23
- pyerualjetwork-4.0.3b0.dist-info/RECORD,,
20
+ pyerualjetwork-4.0.4.dist-info/METADATA,sha256=51txpwSwgc_rPSW1p_XGCi_ntMCt0MyRcwGO5ICRv_Q,6329
21
+ pyerualjetwork-4.0.4.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
22
+ pyerualjetwork-4.0.4.dist-info/top_level.txt,sha256=BRyt62U_r3ZmJpj-wXNOoA345Bzamrj6RbaWsyW4tRg,15
23
+ pyerualjetwork-4.0.4.dist-info/RECORD,,