pyerualjetwork 2.3.8__tar.gz → 2.5.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyerualjetwork
3
+ Version: 2.5.9
4
+ Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
5
+ Author: Hasan Can Beydili
6
+ Author-email: tchasancan@gmail.com
7
+ Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -0,0 +1,5 @@
1
+ # pyerualjetwork/PLAN/__init__.py
2
+
3
+ # Bu dosya, plan modülünün ana giriş noktasıdır.
4
+
5
+ from .plan import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization
@@ -1,4 +1,24 @@
1
1
 
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Tue Jun 18 23:32:16 2024
5
+
6
+ @author: hasan
7
+ """
8
+
9
+ # optic
10
+
11
+ # -*- coding: utf-8 -*-
12
+ """
13
+ Created on Fri Jun 21 05:21:35 2024
14
+
15
+ @author: hasan
16
+ """
17
+
18
+ # -*- coding: utf-8 -*-
19
+
20
+
21
+
2
22
  """
3
23
  Created on Thu Jun 12 00:00:00 2024
4
24
 
@@ -20,9 +40,14 @@ import seaborn as sns
20
40
 
21
41
  def fit(
22
42
  x_train: List[Union[int, float]],
23
- # At least two.. and one hot encoded
24
- y_train: List[Union[int, float, str]], # At least two.. and one hot encoded
25
- show_training
43
+ y_train: List[Union[int, float]], # At least two.. and one hot encoded
44
+ show_training,
45
+ show_count= None,
46
+ val= None,
47
+ val_count= None,
48
+ x_val= None,
49
+ y_val= None,
50
+ activation_potential=None # (float): Input activation potential (optional)
26
51
  ) -> str:
27
52
 
28
53
  infoPLAN = """
@@ -30,19 +55,50 @@ def fit(
30
55
 
31
56
  Args:
32
57
  x_train (list[num]): List of input data.
33
- y_train (list[num]): List of y_train. (one hot encoded)
34
- show_training (bool, str): True, None or 'final'
35
-
58
+ y_train (list[num]): List of target labels. (one hot encoded)
59
+ show_training (bool, str): True, None or'final'
60
+ show_count (None, int): How many learning steps in total will be displayed in a single figure? (Adjust according to your hardware) Default: 10 (optional)
61
+ val_count (None, int): After how many examples learned will an accuracy test be performed? Default: 0.1 (%10) (optional)
62
+ x_val (list[num]): List of validation data. (optional) Default: x_train
63
+ y_val (list[num]): (list[num]): List of target labels. (one hot encoded) (optional) Default: y_train
64
+ activation_potential (float): Input activation potential (for binary injection) (optional) in range: -1, 1
36
65
  Returns:
37
66
  list([num]): (Weight matrices list, train_predictions list, Train_acc).
38
67
  error handled ?: Process status ('e')
39
68
  """
69
+
40
70
 
41
71
  if len(x_train) != len(y_train):
72
+
42
73
  print(Fore.RED + "ERROR301: x_train list and y_train list must be same length. from: fit", infoPLAN)
43
74
  return 'e'
75
+
76
+ if x_val == None and y_val == None:
77
+
78
+ x_val = x_train
79
+ y_val = y_train
80
+
81
+ if val == True and val_count == None:
44
82
 
83
+ val_count = 0.1
84
+ val_count_copy = val_count
85
+
86
+ if val == True:
87
+
88
+ val_count = int(len(x_train) * val_count)
89
+
90
+ val_count_copy = val_count
91
+
92
+ if show_training == True or show_training == 'final' and show_count == None:
93
+
94
+ show_count = 10
95
+
96
+ if show_training == True or show_training == 'final':
97
+
98
+ row, col = shape_control(x_train)
99
+
45
100
  class_count = set()
101
+
46
102
  for sublist in y_train:
47
103
 
48
104
  class_count.add(tuple(sublist))
@@ -53,20 +109,24 @@ def fit(
53
109
 
54
110
  neurons = [len(class_count), len(class_count)]
55
111
  layers = ['fex']
112
+ val_list = [None]
56
113
 
57
114
  x_train[0] = np.array(x_train[0])
58
115
  x_train[0] = x_train[0].ravel()
59
116
  x_train_size = len(x_train[0])
60
117
 
61
- W = pdi.weight_identification(
118
+ W = weight_identification(
62
119
  len(layers) - 1, len(class_count), neurons, x_train_size)
63
120
 
64
121
  trained_W = [1] * len(W)
65
122
  print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL)
66
123
  start_time = time.time()
67
- y = pdi.decode_one_hot(y_train)
124
+ y = decode_one_hot(y_train)
68
125
 
69
126
  for index, inp in enumerate(x_train):
127
+
128
+ progress = index / len(x_train) * 100
129
+
70
130
  uni_start_time = time.time()
71
131
  inp = np.array(inp)
72
132
  inp = inp.ravel()
@@ -80,40 +140,97 @@ def fit(
80
140
 
81
141
  for Lindex, Layer in enumerate(layers):
82
142
 
83
- neural_layer = pdi.normalization(neural_layer)
143
+ neural_layer = normalization(neural_layer)
84
144
 
85
145
  if Layer == 'fex':
86
- W[Lindex] = pdi.fex(neural_layer, W[Lindex], True, y[index])
146
+ W[Lindex] = fex(neural_layer, W[Lindex], True, y[index], activation_potential)
87
147
 
88
148
  for i, w in enumerate(W):
89
149
  trained_W[i] = trained_W[i] + w
90
150
 
91
- if show_training == True:
92
-
93
- fig, ax = plt.subplots(1, 10, figsize=(18, 14))
94
-
95
- try:
96
- row = x_train[1].shape[0]
97
- col = x_train[1].shape[1]
98
- except:
99
- print(Fore.RED + 'ERROR: You try train showing but inputs is raveled. x_train inputs to must be reshape for training_show.', infoPLAN + Style.RESET_ALL)
100
- return 'e'
101
-
102
- for j in range(10):
103
-
104
- mat = trained_W[0][j,:].reshape(row, col)
105
-
106
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
107
- ax[j].set_aspect('equal')
151
+
152
+ if val == True:
153
+
154
+ if index == val_count:
155
+
156
+ val_count += val_count_copy
157
+
158
+ layers.append('cat')
159
+ trained_W.append(np.eye(len(class_count)))
160
+
161
+ validation_model = evaluate(x_val, y_val, None, trained_W)
162
+
163
+ layers.pop()
164
+ trained_W.pop()
165
+
166
+ val_acc = validation_model[get_acc()]
167
+
168
+ val_list.append(val_acc)
169
+
170
+
171
+ plt.plot(val_list, linestyle='-',
172
+ color='r')
173
+
174
+ progress_status = f"{progress:.1f}"
175
+ plt.title('Validation accuracy graph. Amount of data learned by the model: % ' + progress_status)
176
+ plt.xlabel('Learning Progress')
177
+ plt.ylabel('Accuracy')
178
+ plt.ylim(0, 1)
179
+ plt.draw()
180
+ plt.pause(0.1)
181
+
182
+ if show_training == True:
183
+
184
+ if index %show_count == 0:
185
+
186
+
187
+ if index != 0:
188
+ plt.close(fig)
189
+
190
+ if row != 0:
191
+
192
+ fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
108
193
 
109
- ax[j].set_xticks([])
110
- ax[j].set_yticks([])
111
- ax[j].set_title(f'{j+1}. Neuron')
112
-
194
+ else:
195
+
196
+ fig, ax = plt.subplots(1, 1, figsize=(18, 14))
197
+
198
+ for j in range(len(class_count)):
199
+
200
+
201
+ if row != 0:
202
+
203
+ mat = trained_W[0][j,:].reshape(row, col)
204
+ suptitle_info = 'Neurons Learning Progress: % '
205
+ title_info = f'{j+1}. Neuron'
206
+
207
+ mat = trained_W[0][j,:].reshape(row, col)
208
+
209
+ ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
210
+
211
+ ax[j].set_aspect('equal')
212
+
213
+ ax[j].set_xticks([])
214
+ ax[j].set_yticks([])
215
+ ax[j].set_title(title_info)
216
+
217
+ else:
218
+
219
+ mat = trained_W[0]
220
+ ax.imshow(mat, interpolation='sinc', cmap='viridis')
221
+ suptitle_info = 'Weight Learning Progress: % '
222
+ title_info = 'Weight Matrix Of Fex Layer'
223
+
224
+
113
225
 
114
- plt.show()
115
-
116
- W = pdi.weight_identification(
226
+
227
+ progress_status = f"{progress:.1f}"
228
+ fig.suptitle(suptitle_info + progress_status)
229
+ plt.draw()
230
+ plt.pause(0.1)
231
+
232
+
233
+ W = weight_identification(
117
234
  len(layers) - 1, len(class_count), neurons, x_train_size)
118
235
 
119
236
  uni_end_time = time.time()
@@ -133,29 +250,25 @@ def fit(
133
250
  print('\rTraining: ', index, "/", len(x_train), "\n", end="")
134
251
 
135
252
  if show_training == 'final':
136
-
137
- fig, ax = plt.subplots(1, 10, figsize=(18, 14))
138
-
139
- try:
140
- row = x_train[1].shape[0]
141
- col = x_train[1].shape[1]
142
- except:
143
- print(Fore.RED + 'ERROR: You try train showing but inputs is raveled. x_train inputs to must be reshape for training_show.', infoPLAN + Style.RESET_ALL)
144
- return 'e'
145
-
146
- for j in range(10):
147
253
 
148
- mat = trained_W[0][j,:].reshape(row, col)
254
+ fig, ax = plt.subplots(1, len(class_count), figsize=(18, 14))
149
255
 
150
- ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
151
- ax[j].set_aspect('equal')
152
-
153
- ax[j].set_xticks([])
154
- ax[j].set_yticks([])
155
- ax[j].set_title(f'{j+1}. Neuron')
156
-
256
+ for j in range(len(class_count)):
257
+
258
+ mat = trained_W[0][j,:].reshape(row, col)
259
+
260
+ ax[j].imshow(mat, interpolation='sinc', cmap='viridis')
261
+ ax[j].set_aspect('equal')
262
+
263
+ ax[j].set_xticks([])
264
+ ax[j].set_yticks([])
265
+ ax[j].set_title(f'{j+1}. Neuron')
266
+
267
+ progress_status = f"{progress:.1f}"
268
+ fig.suptitle('Neurons Learning Progress: % ' + progress_status)
269
+ plt.draw()
270
+ plt.pause(0.1)
157
271
 
158
- plt.show()
159
272
 
160
273
  EndTime = time.time()
161
274
 
@@ -180,6 +293,39 @@ def fit(
180
293
 
181
294
  # FUNCTIONS -----
182
295
 
296
+ def shape_control(x_train):
297
+
298
+ try:
299
+ row = x_train[1].shape[0]
300
+ col = x_train[1].shape[1]
301
+
302
+ except:
303
+
304
+ print(Fore.MAGENTA + 'WARNING: You trying show_training but inputs is raveled. x_train inputs should be reshaped for show_training.' + Style.RESET_ALL)
305
+
306
+ try:
307
+ row, col = find_numbers(len(x_train[0]))
308
+
309
+ except:
310
+
311
+ print(Fore.MAGENTA + 'WARNING: Input length cannot be reshaped. Neurons learning progression cannot be draw, weight learning progress drwaing started.' + Style.RESET_ALL)
312
+ return [0, 0]
313
+
314
+ return row, col
315
+
316
+ def find_numbers(n):
317
+ if n <= 1:
318
+ raise ValueError("Parameter 'n' must be greater than 1.")
319
+
320
+ for i in range(2, int(n**0.5) + 1):
321
+ if n % i == 0:
322
+ factor1 = i
323
+ factor2 = n // i
324
+ if factor1 == factor2:
325
+ return factor1, factor2
326
+
327
+ return None
328
+
183
329
  def weight_normalization(
184
330
  W,
185
331
  class_count
@@ -234,7 +380,8 @@ def fex(
234
380
  Input, # list[num]: Input data.
235
381
  w, # num: Weight matrix of the neural network.
236
382
  is_training, # bool: Flag indicating if the function is called during training (True or False).
237
- Class # int: Which class is, if training.
383
+ Class, # int: Which class is, if training.
384
+ activation_potential # float or None: Input activation potential (optional)
238
385
  ) -> tuple:
239
386
  """
240
387
  Applies feature extraction process to the input data using synaptic pruning.
@@ -244,22 +391,42 @@ def fex(
244
391
  w (num): Weight matrix of the neural network.
245
392
  is_training (bool): Flag indicating if the function is called during training (True or False).
246
393
  Class (int): if is during training then which class(label) ? is isnt then put None.
394
+ activation_potential (float or None): Threshold value for comparison. (optional)
247
395
 
248
396
  Returns:
249
397
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
250
398
  """
251
399
 
252
- if is_training == True:
253
-
400
+ if is_training == True and activation_potential == None:
401
+
254
402
  w[Class, :] = Input
255
403
 
256
404
  return w
257
405
 
258
- else:
259
-
260
- neural_layer = np.dot(w, Input)
261
-
262
- return neural_layer
406
+ elif is_training == True and activation_potential != None:
407
+
408
+
409
+ Input[Input < activation_potential] = 0
410
+ Input[Input > activation_potential] = 1
411
+
412
+ w[Class,:] = Input
413
+
414
+ return w
415
+
416
+ elif is_training == False and activation_potential == None:
417
+
418
+ neural_layer = np.dot(w, Input)
419
+
420
+ return neural_layer
421
+
422
+ elif is_training == False and activation_potential != None:
423
+
424
+ Input[Input < activation_potential] = 0
425
+ Input[Input > activation_potential] = 1
426
+
427
+ neural_layer = np.dot(w, Input)
428
+
429
+ return neural_layer
263
430
 
264
431
 
265
432
 
@@ -336,7 +503,8 @@ def evaluate(
336
503
  x_test, # list[num]: Test input data.
337
504
  y_test, # list[num]: Test labels.
338
505
  show_metrices, # show_metrices (bool): (True or False)
339
- W # list[num]: Weight matrix list of the neural network.
506
+ W, # list[num]: Weight matrix list of the neural network.
507
+ activation_potential=None # activation_potential (float or None): Threshold value for comparison. (optional)
340
508
  ) -> tuple:
341
509
  infoTestModel = """
342
510
  Tests the neural network model with the given test data.
@@ -346,6 +514,7 @@ def evaluate(
346
514
  y_test (list[num]): Test labels.
347
515
  show_metrices (bool): (True or False)
348
516
  W (list[num]): Weight matrix list of the neural network.
517
+ activation_potential (float or None): Threshold value for comparison. (optional)
349
518
 
350
519
  Returns:
351
520
  tuple: A tuple containing the predicted labels and the accuracy of the model.
@@ -376,7 +545,7 @@ def evaluate(
376
545
  neural_layer = normalization(neural_layer)
377
546
 
378
547
  if Layer == 'fex':
379
- neural_layer = fex(neural_layer, W[index], False, None)
548
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
380
549
  elif Layer == 'cat':
381
550
  neural_layer = np.dot(W[index], neural_layer)
382
551
 
@@ -451,7 +620,8 @@ def multiple_evaluate(
451
620
  x_test, # list[num]: Test input data.
452
621
  y_test, # list[num]: Test labels.
453
622
  show_metrices, # show_metrices (bool): Visualize test progress ? (True or False)
454
- MW # list[list[num]]: Weight matrix of the neural network.
623
+ MW, # list[list[num]]: Weight matrix of the neural network.
624
+ activation_potential=None # (float or None): Threshold value for comparison. (optional)
455
625
  ) -> tuple:
456
626
  infoTestModel = """
457
627
  Tests the neural network model with the given test data.
@@ -498,7 +668,7 @@ def multiple_evaluate(
498
668
  neural_layer = normalization(neural_layer)
499
669
 
500
670
  if Layer == 'fex':
501
- neural_layer = fex(neural_layer, W[index], False, None)
671
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
502
672
  elif Layer == 'cat':
503
673
  neural_layer = np.dot(W[index], neural_layer)
504
674
 
@@ -582,7 +752,8 @@ def save_model(model_name,
582
752
  weights_format,
583
753
  model_path,
584
754
  scaler_params,
585
- W
755
+ W,
756
+ activation_potential=None
586
757
  ):
587
758
 
588
759
  infosave_model = """
@@ -598,6 +769,7 @@ def save_model(model_name,
598
769
  model_path (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
599
770
  scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
600
771
  W: Weights of the model.
772
+ activation_potential (float or None): Threshold value for comparison. (optional)
601
773
 
602
774
  Returns:
603
775
  str: Message indicating if the model was saved successfully or encountered an error.
@@ -646,7 +818,8 @@ def save_model(model_name,
646
818
  'WEIGHTS TYPE': weights_type,
647
819
  'WEIGHTS FORMAT': weights_format,
648
820
  'MODEL PATH': model_path,
649
- 'STANDARD SCALER': scaler_params
821
+ 'STANDARD SCALER': scaler_params,
822
+ 'ACTIVATION POTENTIAL': activation_potential
650
823
  }
651
824
  try:
652
825
 
@@ -787,17 +960,34 @@ def predict_model_ssd(Input, model_name, model_path):
787
960
  Arguments:
788
961
  Input (list or ndarray): Input data for the model (single vector or single matrix).
789
962
  model_name (str): Name of the model.
790
- model_path (str): Path where the model is saved.
791
963
  Returns:
792
964
  ndarray: Output from the model.
793
965
  """
794
966
  W, df = load_model(model_name, model_path)
967
+
968
+ activation_potential = str(df['ACTIVATION POTENTIAL'].iloc[0])
795
969
 
796
- scaler_params = str(df['STANDARD SCALER'].iloc[0])
970
+ if activation_potential != 'nan':
797
971
 
798
- if scaler_params != None:
972
+ activation_potential = float(activation_potential)
973
+
974
+ else:
975
+
976
+ activation_potential = None
977
+
978
+ try:
979
+
980
+ scaler_params = df['STANDARD SCALER'].tolist()
981
+
982
+
983
+ scaler_params = [np.fromstring(arr.strip('[]'), sep=' ') for arr in scaler_params]
799
984
 
800
985
  Input = standard_scaler(None, Input, scaler_params)
986
+
987
+ except:
988
+
989
+ non_scaled = True
990
+
801
991
 
802
992
  layers = ['fex', 'cat']
803
993
 
@@ -813,7 +1003,7 @@ def predict_model_ssd(Input, model_name, model_path):
813
1003
  neural_layer = normalization(neural_layer)
814
1004
 
815
1005
  if Layer == 'fex':
816
- neural_layer = fex(neural_layer, W[index], False, None)
1006
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
817
1007
  elif Layer == 'cat':
818
1008
  neural_layer = np.dot(W[index], neural_layer)
819
1009
  except:
@@ -825,7 +1015,7 @@ def predict_model_ssd(Input, model_name, model_path):
825
1015
  return neural_layer
826
1016
 
827
1017
 
828
- def predict_model_ram(Input, scaler_params, W):
1018
+ def predict_model_ram(Input, scaler_params, W, activation_potential=None):
829
1019
 
830
1020
  infopredict_model_ram = """
831
1021
  Function to make a prediction using a divided pruning learning artificial neural network (PLAN).
@@ -835,11 +1025,12 @@ def predict_model_ram(Input, scaler_params, W):
835
1025
  Input (list or ndarray): Input data for the model (single vector or single matrix).
836
1026
  scaler_params (int, float): standard scaler params list: mean,std. If not used standard scaler then be: None.
837
1027
  W (list of ndarrays): Weights of the model.
1028
+ activation_potential (float or None): Threshold value for comparison. (optional)
838
1029
 
839
1030
  Returns:
840
1031
  ndarray: Output from the model.
841
1032
  """
842
-
1033
+
843
1034
  if scaler_params != None:
844
1035
 
845
1036
  Input = standard_scaler(None, Input, scaler_params)
@@ -858,7 +1049,7 @@ def predict_model_ram(Input, scaler_params, W):
858
1049
  neural_layer = normalization(neural_layer)
859
1050
 
860
1051
  if Layer == 'fex':
861
- neural_layer = fex(neural_layer, W[index], False, None)
1052
+ neural_layer = fex(neural_layer, W[index], False, None, activation_potential)
862
1053
  elif Layer == 'cat':
863
1054
  neural_layer = np.dot(W[index], neural_layer)
864
1055
 
@@ -987,35 +1178,44 @@ def standard_scaler(x_train, x_test, scaler_params=None):
987
1178
  """
988
1179
  try:
989
1180
 
990
- if scaler_params == None and x_test != None:
991
-
992
- mean = np.mean(x_train, axis=0)
993
- std = np.std(x_train, axis=0)
994
- train_data_scaled = (x_train - mean) / std
995
- test_data_scaled = (x_test - mean) / std
996
-
997
- scaler_params = [mean, std]
998
-
999
- return scaler_params, train_data_scaled, test_data_scaled
1181
+ if scaler_params == None and x_test != None:
1000
1182
 
1001
- if scaler_params == None and x_test == None:
1002
-
1003
- mean = np.mean(x_train, axis=0)
1004
- std = np.std(x_train, axis=0)
1005
- train_data_scaled = (x_train - mean) / std
1006
-
1007
- scaler_params = [mean, std]
1008
-
1009
- return scaler_params, train_data_scaled
1010
-
1011
- if scaler_params != None:
1012
- test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
1013
- return test_data_scaled
1183
+ mean = np.mean(x_train, axis=0)
1184
+ std = np.std(x_train, axis=0)
1185
+ train_data_scaled = (x_train - mean) / std
1186
+ test_data_scaled = (x_test - mean) / std
1187
+
1188
+ train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
1189
+ test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1190
+
1191
+ scaler_params = [mean, std]
1192
+
1193
+ return scaler_params, train_data_scaled, test_data_scaled
1194
+
1195
+ if scaler_params == None and x_test == None:
1196
+
1197
+ mean = np.mean(x_train, axis=0)
1198
+ std = np.std(x_train, axis=0)
1199
+ train_data_scaled = (x_train - mean) / std
1200
+
1201
+ train_data_scaled = np.nan_to_num(train_data_scaled, nan=0)
1202
+
1203
+ scaler_params = [mean, std]
1204
+
1205
+ return scaler_params, train_data_scaled
1206
+
1207
+ if scaler_params != None:
1208
+
1209
+ test_data_scaled = (x_test - scaler_params[0]) / scaler_params[1]
1210
+ test_data_scaled = np.nan_to_num(test_data_scaled, nan=0)
1211
+
1212
+ return test_data_scaled
1014
1213
 
1015
1214
  except:
1016
1215
  print(
1017
1216
  Fore.RED + "ERROR: x_train and x_test must be list[numpyarray] from standard_scaler" + info_standard_scaler)
1018
1217
 
1218
+
1019
1219
  def encode_one_hot(y_train, y_test):
1020
1220
  info_one_hot_encode = """
1021
1221
  Performs one-hot encoding on y_train and y_test data..
@@ -1495,4 +1695,4 @@ def get_preds():
1495
1695
 
1496
1696
  def get_acc():
1497
1697
 
1498
- return 2
1698
+ return 2
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyerualjetwork
3
+ Version: 2.5.9
4
+ Summary: New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document
5
+ Author: Hasan Can Beydili
6
+ Author-email: tchasancan@gmail.com
7
+ Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -1,8 +1,6 @@
1
1
  setup.py
2
- plan_bi/__init__.py
3
- plan_bi/plan_bi.py
4
- plan_di/__init__.py
5
- plan_di/plan_di.py
2
+ plan/__init__.py
3
+ plan/plan.py
6
4
  pyerualjetwork.egg-info/PKG-INFO
7
5
  pyerualjetwork.egg-info/SOURCES.txt
8
6
  pyerualjetwork.egg-info/dependency_links.txt
@@ -5,10 +5,10 @@ from setuptools import setup, find_packages
5
5
  setup(
6
6
 
7
7
  name = "pyerualjetwork",
8
- version = "2.3.8",
8
+ version = "2.5.9",
9
9
  author = "Hasan Can Beydili",
10
10
  author_email = "tchasancan@gmail.com",
11
- description= "Weights post process function added: [weight_post_process](optional after training before testing.), new function: manuel_balancer. And scaler_params added for scaled models.",
11
+ description= "New optional parameters added for fit function, x_val, y_val show_count, val_count, val. For more information please read user document",
12
12
  packages = find_packages(),
13
13
  keywords = ["model evaluation", "classifcation", 'pruning learning artficial neural networks'],
14
14
 
@@ -1,7 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: pyerualjetwork
3
- Version: 2.3.8
4
- Summary: Weights post process function added: [weight_post_process](optional after training before testing.), new function: manuel_balancer. And scaler_params added for scaled models.
5
- Author: Hasan Can Beydili
6
- Author-email: tchasancan@gmail.com
7
- Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -1,5 +0,0 @@
1
- # pyerualjetwork/PLAN/__init__.py
2
-
3
- # Bu dosya, plan modülünün ana giriş noktasıdır.
4
-
5
- from .plan_bi import auto_balancer, normalization, Softmax, Sigmoid, Relu, weight_identification, fex, fit, evaluate, save_model, load_model, predict_model_ssd, predict_model_ram, get_weights, get_df, get_preds, get_acc, get_pot, synthetic_augmentation, standard_scaler, multiple_evaluate, encode_one_hot, split, metrics, decode_one_hot, roc_curve, confusion_matrix, plot_evaluate, manuel_balancer, weight_normalization