pyerualjetwork 1.1.9__tar.gz → 1.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyerualjetwork
3
+ Version: 1.2.1
4
+ Summary: Advanced python deep learning library.UPDATED TO 1.2.1 NEW FEATURES: GetWeights() func for TrainPLAN, TestPLAN and LoadPLAN, GetPreds() and GetAcc() funcs for TrainPLAN and TestPLAN, GetAllFromLoad() func for LoadPLAN, GetDf() for LoadPLAN (Documentation in desc. Examples in GİTHUB: HCB06)
5
+ Author: Hasan Can Beydili
6
+ Author-email: tchasancan@gmail.com
7
+ Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -3,10 +3,12 @@ import time
3
3
  from colorama import Fore,Style
4
4
  from typing import List, Union
5
5
  import math
6
+ from scipy.special import expit, softmax
7
+
6
8
  # BUILD -----
7
9
  def TrainPLAN(
8
- Inputs: List[Union[int, float]],
9
- Labels: List[Union[int, float, str]], # At least two.. and one hot encoded
10
+ TrainInputs: List[Union[int, float]],
11
+ TrainLabels: List[Union[int, float, str]], # At least two.. and one hot encoded
10
12
  ClassCount: int,
11
13
  Layers: List[str],
12
14
  Neurons: List[Union[int, float]],
@@ -20,8 +22,8 @@ def TrainPLAN(
20
22
  Creates and configures a PLAN model.
21
23
 
22
24
  Args:
23
- Inputs (list[num]): List of input data.
24
- Labels (list[num]): List of labels. (one hot encoded)
25
+ TrainInputs (list[num]): List of input data.
26
+ TrainLabels (list[num]): List of TrainLabels. (one hot encoded)
25
27
  ClassCount (int): Number of classes.
26
28
  Layers (list[str]): List of layer names. (options: 'fex' (Feature Extraction), 'cat' (Catalyser))
27
29
  Neurons (list[num]): List of neuron counts for each layer.
@@ -31,7 +33,7 @@ def TrainPLAN(
31
33
  Activations (list[str]): List of activation functions.
32
34
 
33
35
  Returns:
34
- list([num]): (Weight matrices list, TrainPredictions list.).
36
+ list([num]): (Weight matrices list, TrainPredictions list, TrainAcc).
35
37
  error handled ?: Process status ('e')
36
38
  """
37
39
 
@@ -45,8 +47,8 @@ def TrainPLAN(
45
47
  print(Fore.RED + "ERROR307: Normalization list length must be equal to length of ThresholdSigns List,ThresholdValues List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
46
48
  return 'e'
47
49
 
48
- if len(Inputs) != len(Labels):
49
- print(Fore.RED + "ERROR301: Inputs list and Labels list must be same length.",infoPLAN)
50
+ if len(TrainInputs) != len(TrainLabels):
51
+ print(Fore.RED + "ERROR301: TrainInputs list and TrainLabels list must be same length.",infoPLAN)
50
52
  return 'e'
51
53
 
52
54
  for i, Value in enumerate(ThresholdValues):
@@ -117,45 +119,45 @@ def TrainPLAN(
117
119
  print(Fore.RED + "ERROR109: at layer type 'fex', pairing with 'none' Threshold is not acceptlable. if you want to 'none' put '==' and make threshold value '0'. from: TrainPLAN ",infoPLAN)
118
120
  return 'e'
119
121
 
120
- UniqueLabels = set()
121
- for sublist in Labels:
122
+ UniqueTrainLabels = set()
123
+ for sublist in TrainLabels:
122
124
 
123
- UniqueLabels.add(tuple(sublist))
125
+ UniqueTrainLabels.add(tuple(sublist))
124
126
 
125
127
 
126
- UniqueLabels = list(UniqueLabels)
128
+ UniqueTrainLabels = list(UniqueTrainLabels)
127
129
 
128
- Labels = [tuple(sublist) for sublist in Labels]
130
+ TrainLabels = [tuple(sublist) for sublist in TrainLabels]
129
131
 
130
132
 
131
- if len(UniqueLabels) != ClassCount:
133
+ if len(UniqueTrainLabels) != ClassCount:
132
134
  print(Fore.RED + "ERROR106: Label variety length must be same Class Count. from: TrainPLAN",infoPLAN)
133
135
  return 'e'
134
136
 
135
- Inputs[0] = np.array(Inputs[0])
136
- Inputs[0] = Inputs[0].ravel()
137
- InputSize = len(Inputs[0])
137
+ TrainInputs[0] = np.array(TrainInputs[0])
138
+ TrainInputs[0] = TrainInputs[0].ravel()
139
+ TrainInputsize = len(TrainInputs[0])
138
140
 
139
- W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,InputSize)
141
+ W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,TrainInputsize)
140
142
  Divides = SynapticDividing(ClassCount,W)
141
143
  TrainedWs = [1] * len(W)
142
144
  print(Fore.GREEN + "Train Started with 0 ERROR" + Style.RESET_ALL,)
143
- TrainPredictions = [1] * len(Labels)
145
+ TrainPredictions = [1] * len(TrainLabels)
144
146
  true = 0
145
147
  StartTime = time.time()
146
- for index, inp in enumerate(Inputs):
148
+ for index, inp in enumerate(TrainInputs):
147
149
  UniStartTime = time.time()
148
150
  inp = np.array(inp)
149
151
  inp = inp.ravel()
150
152
 
151
- if InputSize != len(inp):
152
- print(Fore.RED +"ERROR304: All input matrices or vectors in inputs list, must be same size. from: TrainPLAN",infoPLAN + Style.RESET_ALL)
153
+ if TrainInputsize != len(inp):
154
+ print(Fore.RED +"ERROR304: All input matrices or vectors in TrainInputs list, must be same size. from: TrainPLAN",infoPLAN + Style.RESET_ALL)
153
155
  return 'e'
154
156
 
155
157
 
156
- for Ulindex, Ul in enumerate(UniqueLabels):
158
+ for Ulindex, Ul in enumerate(UniqueTrainLabels):
157
159
 
158
- if Ul == Labels[index]:
160
+ if Ul == TrainLabels[index]:
159
161
  for Windex, w in enumerate(W):
160
162
  for i, ul in enumerate(Ul):
161
163
  if ul == 1.0:
@@ -183,12 +185,12 @@ def TrainPLAN(
183
185
  elif Layer == 'cat':
184
186
  NeuralLayer,W[Lindex] = Cat(NeuralLayer, W[Lindex], ThresholdSigns[Lindex], ThresholdValues[Lindex],1)
185
187
 
186
- RealOutput = np.argmax(Labels[index])
188
+ RealOutput = np.argmax(TrainLabels[index])
187
189
  PredictedOutput = np.argmax(NeuralLayer)
188
190
  if RealOutput == PredictedOutput:
189
191
  true += 1
190
- Acc = true / len(Labels)
191
- TrainPredictions[index] = PredictedOutput+1
192
+ Acc = true / len(TrainLabels)
193
+ TrainPredictions[index] = PredictedOutput
192
194
 
193
195
  if index == 0:
194
196
  for i, w in enumerate(W):
@@ -199,12 +201,12 @@ def TrainPLAN(
199
201
  TrainedWs[i] = TrainedWs[i] + w
200
202
 
201
203
 
202
- W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,InputSize)
204
+ W = WeightIdentification(len(Layers) - 1,ClassCount,Neurons,TrainInputsize)
203
205
 
204
206
 
205
207
  UniEndTime = time.time()
206
208
 
207
- CalculatingEst = round((UniEndTime - UniStartTime) * (len(Inputs) - index),3)
209
+ CalculatingEst = round((UniEndTime - UniStartTime) * (len(TrainInputs) - index),3)
208
210
 
209
211
  if CalculatingEst < 60:
210
212
  print('\rest......(sec):',CalculatingEst,'\n',end= "")
@@ -245,7 +247,7 @@ def TrainPLAN(
245
247
 
246
248
 
247
249
 
248
- return TrainedWs,TrainPredictions
250
+ return TrainedWs,TrainPredictions,Acc
249
251
 
250
252
  # FUNCTIONS -----
251
253
 
@@ -253,7 +255,7 @@ def WeightIdentification(
253
255
  LayerCount, # int: Number of layers in the neural network.
254
256
  ClassCount, # int: Number of classes in the classification task.
255
257
  Neurons, # list[num]: List of neuron counts for each layer.
256
- InputSize # int: Size of the input data.
258
+ TrainInputsize # int: Size of the input data.
257
259
  ) -> str:
258
260
  """
259
261
  Identifies the weights for a neural network model.
@@ -262,7 +264,7 @@ def WeightIdentification(
262
264
  LayerCount (int): Number of layers in the neural network.
263
265
  ClassCount (int): Number of classes in the classification task.
264
266
  Neurons (list[num]): List of neuron counts for each layer.
265
- InputSize (int): Size of the input data.
267
+ TrainInputsize (int): Size of the input data.
266
268
 
267
269
  Returns:
268
270
  list([numpy_arrays],[...]): Weight matices of the model. .
@@ -271,7 +273,7 @@ def WeightIdentification(
271
273
 
272
274
  Wlen = LayerCount + 1
273
275
  W = [None] * Wlen
274
- W[0] = np.ones((Neurons[0],InputSize))
276
+ W[0] = np.ones((Neurons[0],TrainInputsize))
275
277
  ws = LayerCount - 1
276
278
  for w in range(ws):
277
279
  W[w + 1] = np.ones((Neurons[w + 1],Neurons[w]))
@@ -492,19 +494,8 @@ def Softmax(
492
494
  Returns:
493
495
  list[num]: Transformed data after applying softmax function.
494
496
  """
495
-
496
-
497
- MaxX = np.max(x)
498
-
499
- x -= MaxX
500
-
501
- ExpX = np.exp(x)
502
-
503
- SumExpX = np.sum(ExpX)
504
497
 
505
- SoftmaxX = ExpX / SumExpX
506
-
507
- return SoftmaxX
498
+ return softmax(x)
508
499
 
509
500
 
510
501
  def Sigmoid(
@@ -519,18 +510,7 @@ def Sigmoid(
519
510
  Returns:
520
511
  list[num]: Transformed data after applying sigmoid function.
521
512
  """
522
-
523
- PositiveMask = x >= 0
524
- NegativeMask = ~PositiveMask
525
-
526
- ExpXPositive = np.exp(-np.clip(x[PositiveMask], -709, None))
527
- ExpXNegative = np.exp(np.clip(x[NegativeMask], None, 709))
528
-
529
- SigmoidX = np.zeros_like(x, dtype=float)
530
- SigmoidX[PositiveMask] = 1 / (1 + ExpXPositive)
531
- SigmoidX[NegativeMask] = ExpXNegative / (1 + ExpXNegative)
532
-
533
- return SigmoidX
513
+ return expit(x)
534
514
 
535
515
 
536
516
  def Relu(
@@ -557,7 +537,7 @@ def TestPLAN(
557
537
  ThresholdSigns, # list[str]: List of threshold signs for each layer.
558
538
  ThresholdValues, # list[num]: List of threshold values for each layer.
559
539
  Normalizations, # str: Whether normalization will be performed ("y" or "n").
560
- Activation, # str: Activation function list for the neural network.
540
+ Activations, # str: Activation function list for the neural network.
561
541
  W # list[list[num]]: Weight matrix of the neural network.
562
542
  ) -> tuple:
563
543
  infoTestModel = """
@@ -597,11 +577,11 @@ def TestPLAN(
597
577
  for index, Layer in enumerate(Layers):
598
578
  if Normalizations[index] == 'y':
599
579
  NeuralLayer = Normalization(NeuralLayer)
600
- if Activation[index] == 'relu':
580
+ if Activations[index] == 'relu':
601
581
  NeuralLayer = Relu(NeuralLayer)
602
- elif Activation[index] == 'sigmoid':
582
+ elif Activations[index] == 'sigmoid':
603
583
  NeuralLayer = Sigmoid(NeuralLayer)
604
- elif Activation[index] == 'softmax':
584
+ elif Activations[index] == 'softmax':
605
585
  NeuralLayer = Softmax(NeuralLayer)
606
586
 
607
587
  if Layers[index] == 'fex':
@@ -615,7 +595,7 @@ def TestPLAN(
615
595
  if RealOutput == PredictedOutput:
616
596
  true += 1
617
597
  Acc = true / len(TestLabels)
618
- TestPredictions[inpIndex] = PredictedOutput+1
598
+ TestPredictions[inpIndex] = PredictedOutput
619
599
  UniEndTime = time.time()
620
600
 
621
601
  CalculatingEst = round((UniEndTime - UniStartTime) * (len(TestInputs) - inpIndex),3)
@@ -663,7 +643,7 @@ def TestPLAN(
663
643
  print(Fore.RED + "ERROR: Testing model parameters like 'Layers' 'ThresholdCounts' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: TestPLAN" + infoTestModel + Style.RESET_ALL)
664
644
  return 'e'
665
645
 
666
- return TestPredictions,Acc
646
+ return W,TestPredictions,Acc
667
647
 
668
648
  def SavePLAN(ModelName,
669
649
  ModelType,
@@ -676,8 +656,8 @@ def SavePLAN(ModelName,
676
656
  TestAcc,
677
657
  LogType,
678
658
  WeightsType,
679
- WeightFormat,
680
- SavePath,
659
+ WeightsFormat,
660
+ ModelPath,
681
661
  W
682
662
  ):
683
663
 
@@ -697,7 +677,7 @@ def SavePLAN(ModelName,
697
677
  LogType (str): Type of log to save (options: 'csv', 'txt', 'hdf5').
698
678
  WeightsType (str): Type of weights to save (options: 'txt', 'npy', 'mat').
699
679
  WeightFormat (str): Format of the weights (options: 'd', 'f', 'raw').
700
- SavePath (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
680
+ ModelPath (str): Path where the model will be saved. For example: C:/Users/beydili/Desktop/denemePLAN/
701
681
  W: Weights of the model.
702
682
 
703
683
  Returns:
@@ -715,7 +695,7 @@ def SavePLAN(ModelName,
715
695
  print(Fore.RED + "ERROR110: Save Weight type (File Extension) Type must be 'txt' or 'npy' or 'mat' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
716
696
  return 'e'
717
697
 
718
- if WeightFormat != 'd' and WeightFormat != 'f' and WeightFormat != 'raw':
698
+ if WeightsFormat != 'd' and WeightsFormat != 'f' and WeightsFormat != 'raw':
719
699
  print(Fore.RED + "ERROR111: Weight Format Type must be 'd' or 'f' or 'raw' from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
720
700
  return 'e'
721
701
 
@@ -747,8 +727,8 @@ def SavePLAN(ModelName,
747
727
  'TEST ACCURACY': TestAcc,
748
728
  'SAVE DATE': datetime.now(),
749
729
  'WEIGHTS TYPE': WeightsType,
750
- 'WEIGHTS FORMAT': WeightFormat,
751
- 'SAVE PATH': SavePath
730
+ 'WEIGHTS FORMAT': WeightsFormat,
731
+ 'SAVE PATH': ModelPath
752
732
  }
753
733
  try:
754
734
 
@@ -756,15 +736,15 @@ def SavePLAN(ModelName,
756
736
 
757
737
  if LogType == 'csv':
758
738
 
759
- df.to_csv(SavePath + ModelName + '.csv', sep='\t', index=False)
739
+ df.to_csv(ModelPath + ModelName + '.csv', sep='\t', index=False)
760
740
 
761
741
  elif LogType == 'txt':
762
742
 
763
- df.to_csv(SavePath + ModelName + '.txt', sep='\t', index=False)
743
+ df.to_csv(ModelPath + ModelName + '.txt', sep='\t', index=False)
764
744
 
765
745
  elif LogType == 'hdf5':
766
746
 
767
- df.to_hdf(SavePath + ModelName + '.h5', key='data', mode='w')
747
+ df.to_hdf(ModelPath + ModelName + '.h5', key='data', mode='w')
768
748
 
769
749
  except:
770
750
 
@@ -772,61 +752,61 @@ def SavePLAN(ModelName,
772
752
  return 'e'
773
753
  try:
774
754
 
775
- if WeightsType == 'txt' and WeightFormat == 'd':
755
+ if WeightsType == 'txt' and WeightsFormat == 'd':
776
756
 
777
757
  for i, w in enumerate(W):
778
- np.savetxt(SavePath + ModelName + str(i+1) + 'w.txt' , w, fmt='%d')
758
+ np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w, fmt='%d')
779
759
 
780
- if WeightsType == 'txt' and WeightFormat == 'f':
760
+ if WeightsType == 'txt' and WeightsFormat == 'f':
781
761
 
782
762
  for i, w in enumerate(W):
783
- np.savetxt(SavePath + ModelName + str(i+1) + 'w.txt' , w, fmt='%f')
763
+ np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w, fmt='%f')
784
764
 
785
- if WeightsType == 'txt' and WeightFormat == 'raw':
765
+ if WeightsType == 'txt' and WeightsFormat == 'raw':
786
766
 
787
767
  for i, w in enumerate(W):
788
- np.savetxt(SavePath + ModelName + str(i+1) + 'w.txt' , w)
768
+ np.savetxt(ModelPath + ModelName + str(i+1) + 'w.txt' , w)
789
769
 
790
770
 
791
771
  ###
792
772
 
793
773
 
794
- if WeightsType == 'npy' and WeightFormat == 'd':
774
+ if WeightsType == 'npy' and WeightsFormat == 'd':
795
775
 
796
776
  for i, w in enumerate(W):
797
- np.save(SavePath + ModelName + str(i+1) + 'w.npy', w.astype(int))
777
+ np.save(ModelPath + ModelName + str(i+1) + 'w.npy', w.astype(int))
798
778
 
799
- if WeightsType == 'npy' and WeightFormat == 'f':
779
+ if WeightsType == 'npy' and WeightsFormat == 'f':
800
780
 
801
781
  for i, w in enumerate(W):
802
- np.save(SavePath + ModelName + str(i+1) + 'w.npy' , w, w.astype(float))
782
+ np.save(ModelPath + ModelName + str(i+1) + 'w.npy' , w, w.astype(float))
803
783
 
804
- if WeightsType == 'npy' and WeightFormat == 'raw':
784
+ if WeightsType == 'npy' and WeightsFormat == 'raw':
805
785
 
806
786
  for i, w in enumerate(W):
807
- np.save(SavePath + ModelName + str(i+1) + 'w.npy' , w)
787
+ np.save(ModelPath + ModelName + str(i+1) + 'w.npy' , w)
808
788
 
809
789
 
810
790
  ###
811
791
 
812
792
 
813
- if WeightsType == 'mat' and WeightFormat == 'd':
793
+ if WeightsType == 'mat' and WeightsFormat == 'd':
814
794
 
815
795
  for i, w in enumerate(W):
816
796
  w = {'w': w.astype(int)}
817
- io.savemat(SavePath + ModelName + str(i+1) + 'w.mat', w)
797
+ io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
818
798
 
819
- if WeightsType == 'mat' and WeightFormat == 'f':
799
+ if WeightsType == 'mat' and WeightsFormat == 'f':
820
800
 
821
801
  for i, w in enumerate(W):
822
802
  w = {'w': w.astype(float)}
823
- io.savemat(SavePath + ModelName + str(i+1) + 'w.mat', w)
803
+ io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
824
804
 
825
- if WeightsType == 'mat' and WeightFormat == 'raw':
805
+ if WeightsType == 'mat' and WeightsFormat == 'raw':
826
806
 
827
807
  for i, w in enumerate(W):
828
808
  w = {'w': w}
829
- io.savemat(SavePath + ModelName + str(i+1) + 'w.mat', w)
809
+ io.savemat(ModelPath + ModelName + str(i+1) + 'w.mat', w)
830
810
 
831
811
  except:
832
812
 
@@ -884,7 +864,7 @@ def LoadPLAN(ModelName,
884
864
  ClassCount = int(df['CLASS COUNT'].iloc[0])
885
865
  ThresholdSigns = df['THRESHOLD SIGNS'].tolist()
886
866
  ThresholdValues = df['THRESHOLD VALUES'].tolist()
887
- Normalization = df['NORMALIZATION'].tolist()
867
+ Normalizations = df['NORMALIZATION'].tolist()
888
868
  Activations = df['ACTIVATIONS'].tolist()
889
869
  NeuronCount = int(df['NEURON COUNT'].iloc[0])
890
870
  SynapseCount = int(df['SYNAPSE COUNT'].iloc[0])
@@ -892,12 +872,12 @@ def LoadPLAN(ModelName,
892
872
  ModelType = str(df['MODEL TYPE'].iloc[0])
893
873
  WeightType = str(df['WEIGHTS TYPE'].iloc[0])
894
874
  WeightFormat = str(df['WEIGHTS FORMAT'].iloc[0])
895
- SavePath = str(df['SAVE PATH'].iloc[0])
875
+ ModelPath = str(df['MODEL PATH'].iloc[0])
896
876
 
897
877
  W = [0] * LayerCount
898
878
 
899
879
  if WeightType == 'txt':
900
- for i in range(LayerCount):
880
+ for i in range(LayerCount):
901
881
  W[i] = np.loadtxt(LoadPath + ModelName + str(i+1) + 'w.txt')
902
882
  elif WeightType == 'npy':
903
883
  for i in range(LayerCount):
@@ -908,7 +888,7 @@ def LoadPLAN(ModelName,
908
888
  else:
909
889
  raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
910
890
  print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
911
- return W,Layers,ThresholdSigns,ThresholdValues,Normalization,Activations,df
891
+ return W,Layers,ThresholdSigns,ThresholdValues,Normalizations,Activations,df
912
892
 
913
893
  def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
914
894
  infoPredictFromDİscPLAN = """
@@ -923,7 +903,7 @@ def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
923
903
  Returns:
924
904
  ndarray: Output from the model.
925
905
  """
926
- W,Layers,ThresholdSigns,ThresholdValues,Normalization,Activations = LoadPLAN(ModelName,ModelPath,
906
+ W,Layers,ThresholdSigns,ThresholdValues,Normalizations,Activations = LoadPLAN(ModelName,ModelPath,
927
907
  LogType)[0:6]
928
908
  Wc = [0] * len(W)
929
909
  for i, w in enumerate(W):
@@ -1048,4 +1028,26 @@ def AutoBalancer(TrainInputs, TrainLabels, ClassCount):
1048
1028
  print(Fore.RED + "ERROR: Inputs and labels must be same length check parameters" + infoAutoBalancer)
1049
1029
  return 'e'
1050
1030
 
1051
- return BalancedInputs, BalancedLabels
1031
+ return BalancedInputs, BalancedLabels
1032
+
1033
+
1034
+ def GetWeights():
1035
+
1036
+ return[0]
1037
+
1038
+ def GetDf():
1039
+
1040
+ return[6]
1041
+
1042
+ def GetPreds():
1043
+
1044
+ return[1]
1045
+
1046
+ def GetAcc():
1047
+
1048
+ return[2]
1049
+
1050
+ def GetAllFromLoad():
1051
+
1052
+ return[0],[1],[2],[3],[4],[5]
1053
+
@@ -0,0 +1,7 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyerualjetwork
3
+ Version: 1.2.1
4
+ Summary: Advanced python deep learning library.UPDATED TO 1.2.1 NEW FEATURES: GetWeights() func for TrainPLAN, TestPLAN and LoadPLAN, GetPreds() and GetAcc() funcs for TrainPLAN and TestPLAN, GetAllFromLoad() func for LoadPLAN, GetDf() for LoadPLAN (Documentation in desc. Examples in GİTHUB: HCB06)
5
+ Author: Hasan Can Beydili
6
+ Author-email: tchasancan@gmail.com
7
+ Keywords: model evaluation,classifcation,pruning learning artficial neural networks
@@ -0,0 +1,15 @@
1
+ from setuptools import setup, find_packages
2
+
3
+ # Setting Up
4
+
5
+ setup(
6
+
7
+ name = "pyerualjetwork",
8
+ version = "1.2.1",
9
+ author = "Hasan Can Beydili",
10
+ author_email = "tchasancan@gmail.com",
11
+ description= "Advanced python deep learning library.UPDATED TO 1.2.1 NEW FEATURES: GetWeights() func for TrainPLAN, TestPLAN and LoadPLAN, GetPreds() and GetAcc() funcs for TrainPLAN and TestPLAN, GetAllFromLoad() func for LoadPLAN, GetDf() for LoadPLAN (Documentation in desc. Examples in GİTHUB: HCB06)",
12
+ packages = find_packages(),
13
+ keywords = ["model evaluation", "classifcation", 'pruning learning artficial neural networks']
14
+
15
+ )
@@ -1,7 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: pyerualjetwork
3
- Version: 1.1.9
4
- Summary: Advanced python deep learning library.(More document coming soon..)
5
- Author: Hasan Can Beydili
6
- Author-email: tchasancan@gmail.com
7
- Keywords: model evaluation,classifcation,divided pruning neural networks
@@ -1,7 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: pyerualjetwork
3
- Version: 1.1.9
4
- Summary: Advanced python deep learning library.(More document coming soon..)
5
- Author: Hasan Can Beydili
6
- Author-email: tchasancan@gmail.com
7
- Keywords: model evaluation,classifcation,divided pruning neural networks
@@ -1,15 +0,0 @@
1
- from setuptools import setup, find_packages
2
-
3
- # Setting Up
4
-
5
- setup(
6
-
7
- name = "pyerualjetwork",
8
- version = "1.1.9",
9
- author = "Hasan Can Beydili",
10
- author_email = "tchasancan@gmail.com",
11
- description= "Advanced python deep learning library.(More document coming soon..)",
12
- packages = find_packages(),
13
- keywords = ["model evaluation", "classifcation", 'divided pruning neural networks']
14
-
15
- )
File without changes