pyerualjetwork 1.2.6__py3-none-any.whl → 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plan/plan.py CHANGED
@@ -12,8 +12,8 @@ def TrainPLAN(
12
12
  ClassCount: int,
13
13
  Layers: List[str],
14
14
  Neurons: List[Union[int, float]],
15
- ThresholdSigns: List[str],
16
- ThresholdValues: List[Union[int, float]],
15
+ MembranThresholds: List[str],
16
+ MembranPotentials: List[Union[int, float]],
17
17
  Normalizations: List[str],
18
18
  Activations: List[str]
19
19
  ) -> str:
@@ -27,8 +27,8 @@ def TrainPLAN(
27
27
  ClassCount (int): Number of classes.
28
28
  Layers (list[str]): List of layer names. (options: 'fex' (Feature Extraction), 'cat' (Catalyser))
29
29
  Neurons (list[num]): List of neuron counts for each layer.
30
- ThresholdSigns (list[str]): List of threshold signs.
31
- ThresholdValues (list[num]): List of threshold values.
30
+ MembranThresholds (list[str]): List of MembranThresholds.
31
+ MembranPotentials (list[num]): List of MembranPotentials.
32
32
  Normalizations (List[str]): Whether normalization will be performed at indexed layers ("y" or "n").
33
33
  Activations (list[str]): List of activation functions.
34
34
 
@@ -42,43 +42,43 @@ def TrainPLAN(
42
42
  print(Fore.RED + "ERROR108: Last layer of neuron count must be equal class count. from: TrainPLAN",infoPLAN)
43
43
  return 'e'
44
44
 
45
- if len(Normalizations) != len(ThresholdValues):
45
+ if len(Normalizations) != len(MembranPotentials):
46
46
 
47
- print(Fore.RED + "ERROR307: Normalization list length must be equal to length of ThresholdSigns List,ThresholdValues List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
47
+ print(Fore.RED + "ERROR307: Normalization list length must be equal to length of MembranThresholds List,MembranPotentials List,Layers List,Neurons List. from: TrainPLAN",infoPLAN)
48
48
  return 'e'
49
49
 
50
50
  if len(TrainInputs) != len(TrainLabels):
51
51
  print(Fore.RED + "ERROR301: TrainInputs list and TrainLabels list must be same length.",infoPLAN)
52
52
  return 'e'
53
53
 
54
- for i, Value in enumerate(ThresholdValues):
54
+ for i, Value in enumerate(MembranPotentials):
55
55
 
56
56
  if Normalizations[i] != 'y' and Normalizations[i] != 'n':
57
57
  print(Fore.RED + "ERROR105: Normalization list must be 'y' or 'n'.",infoPLAN)
58
58
  return 'e'
59
59
 
60
- if ThresholdSigns[i] == 'none':
60
+ if MembranThresholds[i] == 'none':
61
61
  print(Fore.MAGENTA + "WARNING102: We are advise to do not put 'none' Threshold sign. But some cases improves performance of the model from: TrainPLAN",infoPLAN + Style.RESET_ALL)
62
62
  time.sleep(3)
63
63
 
64
64
  if isinstance(Value, str):
65
- print(Fore.RED + "ERROR201: Threshold values must be numeric. from: TrainPLAN")
65
+ print(Fore.RED + "ERROR201: MEMBRAN POTENTIALS must be numeric. from: TrainPLAN")
66
66
  return 'e'
67
67
 
68
68
  if isinstance(Neurons[i], str):
69
69
  print(Fore.RED + "ERROR202: Neurons list must be numeric.")
70
70
  return 'e'
71
71
 
72
- if len(ThresholdSigns) != len(ThresholdValues):
73
- print(Fore.RED + "ERROR302: Threshold signs list and Threshold Values list must be same length. from: TrainPLAN",infoPLAN)
72
+ if len(MembranThresholds) != len(MembranPotentials):
73
+ print(Fore.RED + "ERROR302: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
74
74
  return 'e'
75
75
 
76
76
  if len(Layers) != len(Neurons):
77
77
  print(Fore.RED + "ERROR303: Layers list and Neurons list must same length. from: TrainPLAN",infoPLAN)
78
78
  return 'e'
79
79
 
80
- if len(ThresholdValues) != len(Layers) or len(ThresholdSigns) != len(Layers):
81
- print(Fore.RED + "ERROR306: Threshold Values and Threshold Signs lists length must be same Layers list length. from: TrainPLAN",infoPLAN)
80
+ if len(MembranPotentials) != len(Layers) or len(MembranThresholds) != len(Layers):
81
+ print(Fore.RED + "ERROR306: MEMBRAN POTENTIALS and MEMBRAN THRESHOLDS lists length must be same Layers list length. from: TrainPLAN",infoPLAN)
82
82
  return 'e'
83
83
 
84
84
 
@@ -105,14 +105,14 @@ def TrainPLAN(
105
105
  print(Fore.RED + "ERROR107: Layers list must be 'fex'(Feature Extraction Layer) or 'cat' (Catalyser Layer). from: TrainPLAN",infoPLAN)
106
106
  return 'e'
107
107
 
108
- if len(ThresholdSigns) != len(ThresholdValues):
109
- print(Fore.RED + "ERROR305: Threshold signs list and Threshold values list must be same length. from: TrainPLAN",infoPLAN)
108
+ if len(MembranThresholds) != len(MembranPotentials):
109
+ print(Fore.RED + "ERROR305: MEMBRAN THRESHOLDS list and MEMBRAN POTENTIALS list must be same length. from: TrainPLAN",infoPLAN)
110
110
  return 'e'
111
111
 
112
112
 
113
- for i, Sign in enumerate(ThresholdSigns):
113
+ for i, Sign in enumerate(MembranThresholds):
114
114
  if Sign != '>' and Sign != '<' and Sign != '==' and Sign != '!=' and Sign != 'none':
115
- print(Fore.RED + "ERROR104: Threshold signs must be '>' or '<' or '==' or '!='. or 'none' from: TrainPLAN",infoPLAN)
115
+ print(Fore.RED + "ERROR104: MEMBRAN THRESHOLDS must be '>' or '<' or '==' or '!='. or 'none' WE SUGGEST '<' FOR FEX LAYER AND '==' FOR CAT LAYER (Your data, your hyperparameter) from: TrainPLAN",infoPLAN)
116
116
  return 'e'
117
117
 
118
118
  if Layers[i] == 'fex' and Sign == 'none':
@@ -181,9 +181,9 @@ def TrainPLAN(
181
181
  NeuralLayer = Softmax(NeuralLayer)
182
182
 
183
183
  if Layer == 'fex':
184
- NeuralLayer,W[Lindex] = Fex(NeuralLayer, W[Lindex], ThresholdSigns[Lindex], ThresholdValues[Lindex])
184
+ NeuralLayer,W[Lindex] = Fex(NeuralLayer, W[Lindex], MembranThresholds[Lindex], MembranPotentials[Lindex])
185
185
  elif Layer == 'cat':
186
- NeuralLayer,W[Lindex] = Cat(NeuralLayer, W[Lindex], ThresholdSigns[Lindex], ThresholdValues[Lindex],1)
186
+ NeuralLayer,W[Lindex] = Cat(NeuralLayer, W[Lindex], MembranThresholds[Lindex], MembranPotentials[Lindex],1)
187
187
 
188
188
  RealOutput = np.argmax(TrainLabels[index])
189
189
  PredictedOutput = np.argmax(NeuralLayer)
@@ -391,8 +391,8 @@ def SynapticDividing(
391
391
  def Fex(
392
392
  Input, # list[num]: Input data.
393
393
  w, # list[list[num]]: Weight matrix of the neural network.
394
- ThresholdsSign, # str: Sign for threshold comparison ('<', '>', '==', '!=').
395
- ThresholdValue # num: Threshold value for comparison.
394
+ MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
395
+ MembranPotential # num: Threshold value for comparison.
396
396
  ) -> tuple:
397
397
  """
398
398
  Applies feature extraction process to the input data using synaptic pruning.
@@ -400,21 +400,21 @@ def Fex(
400
400
  Args:
401
401
  Input (list[num]): Input data.
402
402
  w (list[list[num]]): Weight matrix of the neural network.
403
- ThresholdsSign (str): Sign for threshold comparison ('<', '>', '==', '!=').
404
- ThresholdValue (num): Threshold value for comparison.
403
+ MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
404
+ MembranPotential (num): Threshold value for comparison.
405
405
 
406
406
  Returns:
407
407
  tuple: A tuple (vector) containing the neural layer result and the updated weight matrix.
408
408
  """
409
409
 
410
- if ThresholdsSign == '<':
411
- PruneIndex = np.where(Input < ThresholdValue)
412
- elif ThresholdsSign == '>':
413
- PruneIndex = np.where(Input > ThresholdValue)
414
- elif ThresholdsSign == '==':
415
- PruneIndex = np.where(Input == ThresholdValue)
416
- elif ThresholdsSign == '!=':
417
- PruneIndex = np.where(Input != ThresholdValue)
410
+ if MembranThreshold == '<':
411
+ PruneIndex = np.where(Input < MembranPotential)
412
+ elif MembranThreshold == '>':
413
+ PruneIndex = np.where(Input > MembranPotential)
414
+ elif MembranThreshold == '==':
415
+ PruneIndex = np.where(Input == MembranPotential)
416
+ elif MembranThreshold == '!=':
417
+ PruneIndex = np.where(Input != MembranPotential)
418
418
 
419
419
  w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
420
420
 
@@ -424,8 +424,8 @@ def Fex(
424
424
  def Cat(
425
425
  Input, # list[num]: Input data.
426
426
  w, # list[list[num]]: Weight matrix of the neural network.
427
- ThresholdsSign, # str: Sign for threshold comparison ('<', '>', '==', '!=').
428
- ThresholdValue, # num: Threshold value for comparison.
427
+ MembranThreshold, # str: Sign for threshold comparison ('<', '>', '==', '!=').
428
+ MembranPotential, # num: Threshold value for comparison.
429
429
  isTrain # int: Flag indicating if the function is called during training (1 for training, 0 otherwise).
430
430
  ) -> tuple:
431
431
  """
@@ -434,23 +434,23 @@ def Cat(
434
434
  Args:
435
435
  Input (list[num]): Input data.
436
436
  w (list[list[num]]): Weight matrix of the neural network.
437
- ThresholdsSign (str): Sign for threshold comparison ('<', '>', '==', '!=').
438
- ThresholdValue (num): Threshold value for comparison.
437
+ MembranThreshold (str): Sign for threshold comparison ('<', '>', '==', '!=').
438
+ MembranPotential (num): Threshold value for comparison.
439
439
  isTrain (int): Flag indicating if the function is called during training (1 for training, 0 otherwise).
440
440
 
441
441
  Returns:
442
442
  tuple: A tuple containing the neural layer (vector) result and the possibly updated weight matrix.
443
443
  """
444
444
 
445
- if ThresholdsSign == '<':
446
- PruneIndex = np.where(Input < ThresholdValue)
447
- elif ThresholdsSign == '>':
448
- PruneIndex = np.where(Input > ThresholdValue)
449
- elif ThresholdsSign == '==':
450
- PruneIndex = np.where(Input == ThresholdValue)
451
- elif ThresholdsSign == '!=':
452
- PruneIndex = np.where(Input != ThresholdValue)
453
- if isTrain == 1 and ThresholdsSign != 'none':
445
+ if MembranThreshold == '<':
446
+ PruneIndex = np.where(Input < MembranPotential)
447
+ elif MembranThreshold == '>':
448
+ PruneIndex = np.where(Input > MembranPotential)
449
+ elif MembranThreshold == '==':
450
+ PruneIndex = np.where(Input == MembranPotential)
451
+ elif MembranThreshold == '!=':
452
+ PruneIndex = np.where(Input != MembranPotential)
453
+ if isTrain == 1 and MembranThreshold != 'none':
454
454
 
455
455
  w = SynapticPruning(w, PruneIndex, 'col', 0, 0)
456
456
 
@@ -534,8 +534,8 @@ def TestPLAN(
534
534
  TestInputs, # list[list[num]]: Test input data.
535
535
  TestLabels, # list[num]: Test labels.
536
536
  Layers, # list[str]: List of layer names.
537
- ThresholdSigns, # list[str]: List of threshold signs for each layer.
538
- ThresholdValues, # list[num]: List of threshold values for each layer.
537
+ MembranThresholds, # list[str]: List of MEMBRAN THRESHOLDS for each layer.
538
+ MembranPotentials, # list[num]: List of MEMBRAN POTENTIALS for each layer.
539
539
  Normalizations, # str: Whether normalization will be performed ("y" or "n").
540
540
  Activations, # str: Activation function list for the neural network.
541
541
  W # list[list[num]]: Weight matrix of the neural network.
@@ -547,8 +547,8 @@ def TestPLAN(
547
547
  TestInputs (list[list[num]]): Test input data.
548
548
  TestLabels (list[num]): Test labels.
549
549
  Layers (list[str]): List of layer names.
550
- ThresholdSigns (list[str]): List of threshold signs for each layer.
551
- ThresholdValues (list[num]): List of threshold values for each layer.
550
+ MembranThresholds (list[str]): List of MEMBRAN THRESHOLDS for each layer.
551
+ MembranPotentials (list[num]): List of MEMBRAN POTENTIALS for each layer.
552
552
  Normalizatios list([str]): Whether normalization will be performed ("yes" or "no").
553
553
  Activation (str): Activation function for the neural network.
554
554
  W (list[list[num]]): Weight matrix of the neural network.
@@ -585,9 +585,9 @@ def TestPLAN(
585
585
  NeuralLayer = Softmax(NeuralLayer)
586
586
 
587
587
  if Layers[index] == 'fex':
588
- NeuralLayer,useless = Fex(NeuralLayer, W[index], ThresholdSigns[index], ThresholdValues[index])
588
+ NeuralLayer,useless = Fex(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index])
589
589
  if Layers[index] == 'cat':
590
- NeuralLayer,useless = Cat(NeuralLayer, W[index], ThresholdSigns[index], ThresholdValues[index],0)
590
+ NeuralLayer,useless = Cat(NeuralLayer, W[index], MembranThresholds[index], MembranPotentials[index],0)
591
591
  for i, w in enumerate(Wc):
592
592
  W[i] = np.copy(w)
593
593
  RealOutput = np.argmax(TestLabels[inpIndex])
@@ -640,7 +640,7 @@ def TestPLAN(
640
640
 
641
641
  except:
642
642
 
643
- print(Fore.RED + "ERROR: Testing model parameters like 'Layers' 'ThresholdCounts' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: TestPLAN" + infoTestModel + Style.RESET_ALL)
643
+ print(Fore.RED + "ERROR: Testing model parameters like 'Layers' 'MembranCounts' must be same as trained model. Check parameters. Are you sure weights are loaded ? from: TestPLAN" + infoTestModel + Style.RESET_ALL)
644
644
  return 'e'
645
645
 
646
646
  return W,TestPredictions,Acc
@@ -649,8 +649,8 @@ def SavePLAN(ModelName,
649
649
  ModelType,
650
650
  Layers,
651
651
  ClassCount,
652
- ThresholdSigns,
653
- ThresholdValues,
652
+ MembranThresholds,
653
+ MembranPotentials,
654
654
  Normalizations,
655
655
  Activations,
656
656
  TestAcc,
@@ -669,8 +669,8 @@ def SavePLAN(ModelName,
669
669
  ModelType (str): Type of the model.(options: PLAN)
670
670
  Layers (list): List containing 'fex' and 'cat' layers.
671
671
  ClassCount (int): Number of classes.
672
- ThresholdSigns (list): List containing threshold signs.
673
- ThresholdValues (list): List containing threshold values.
672
+ MembranThresholds (list): List containing MEMBRAN THRESHOLDS.
673
+ MembranPotentials (list): List containing MEMBRAN POTENTIALS.
674
674
  DoNormalization (str): is that normalized data ? 'y' or 'n'.
675
675
  Activations (list): List containing activation functions for each layer.
676
676
  TestAcc (float): Test accuracy of the model.
@@ -718,8 +718,8 @@ def SavePLAN(ModelName,
718
718
  'LAYERS': Layers,
719
719
  'LAYER COUNT': len(Layers),
720
720
  'CLASS COUNT': ClassCount,
721
- 'THRESHOLD SIGNS': ThresholdSigns,
722
- 'THRESHOLD VALUES': ThresholdValues,
721
+ 'MEMBRAN THRESHOLDS': MembranThresholds,
722
+ 'MEMBRAN POTENTIALS': MembranPotentials,
723
723
  'NORMALIZATION': Normalizations,
724
724
  'ACTIVATIONS': Activations,
725
725
  'NEURON COUNT': NeuronCount,
@@ -748,7 +748,7 @@ def SavePLAN(ModelName,
748
748
 
749
749
  except:
750
750
 
751
- print(Fore.RED + "ERROR: Model log not saved. Check the log parameters from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
751
+ print(Fore.RED + "ERROR: Model log not saved probably ModelPath incorrect. Check the log parameters from: SavePLAN" + infoSavePLAN + Style.RESET_ALL)
752
752
  return 'e'
753
753
  try:
754
754
 
@@ -835,7 +835,7 @@ def LoadPLAN(ModelName,
835
835
  LogType (str): Type of log to load (options: 'csv', 'txt', 'hdf5').
836
836
 
837
837
  Returns:
838
- lists: W(list[num]), Layers, ThresholdSigns, ThresholdValues, Normalization,Activations
838
+ lists: W(list[num]), Layers, MembranThresholds, MembranPotentials, Normalization,Activations
839
839
  """
840
840
  pass
841
841
 
@@ -862,8 +862,8 @@ def LoadPLAN(ModelName,
862
862
  Layers = df['LAYERS'].tolist()
863
863
  LayerCount = int(df['LAYER COUNT'].iloc[0])
864
864
  ClassCount = int(df['CLASS COUNT'].iloc[0])
865
- ThresholdSigns = df['THRESHOLD SIGNS'].tolist()
866
- ThresholdValues = df['THRESHOLD VALUES'].tolist()
865
+ MembranThresholds = df['MEMBRAN THRESHOLDS'].tolist()
866
+ MembranPotentials = df['MEMBRAN POTENTIALS'].tolist()
867
867
  Normalizations = df['NORMALIZATION'].tolist()
868
868
  Activations = df['ACTIVATIONS'].tolist()
869
869
  NeuronCount = int(df['NEURON COUNT'].iloc[0])
@@ -888,7 +888,7 @@ def LoadPLAN(ModelName,
888
888
  else:
889
889
  raise ValueError(Fore.RED + "Incorrect weight type value. Value must be 'txt', 'npy' or 'mat' from: LoadPLAN." + infoLoadPLAN + Style.RESET_ALL)
890
890
  print(Fore.GREEN + "Model loaded succesfully" + Style.RESET_ALL)
891
- return W,Layers,ThresholdSigns,ThresholdValues,Normalizations,Activations,df
891
+ return W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,df
892
892
 
893
893
  def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
894
894
  infoPredictFromDİscPLAN = """
@@ -903,7 +903,7 @@ def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
903
903
  Returns:
904
904
  ndarray: Output from the model.
905
905
  """
906
- W,Layers,ThresholdSigns,ThresholdValues,Normalizations,Activations = LoadPLAN(ModelName,ModelPath,
906
+ W,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations = LoadPLAN(ModelName,ModelPath,
907
907
  LogType)[0:6]
908
908
  Wc = [0] * len(W)
909
909
  for i, w in enumerate(W):
@@ -924,12 +924,12 @@ def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
924
924
 
925
925
  if Layers[index] == 'fex':
926
926
  NeuralLayer,useless = Fex(NeuralLayer, W[index],
927
- ThresholdSigns[index],
928
- ThresholdValues[index])
927
+ MembranThresholds[index],
928
+ MembranPotentials[index])
929
929
  if Layers[index] == 'cat':
930
930
  NeuralLayer,useless = Cat(NeuralLayer, W[index],
931
- ThresholdSigns[index],
932
- ThresholdValues[index],
931
+ MembranThresholds[index],
932
+ MembranPotentials[index],
933
933
  0)
934
934
  except:
935
935
  print(Fore.RED + "ERROR: The input was probably entered incorrectly. from: PredictFromDiscPLAN" + infoPredictFromDİscPLAN + Style.RESET_ALL)
@@ -939,7 +939,7 @@ def PredictFromDiscPLAN(Input,ModelName,ModelPath,LogType):
939
939
  return NeuralLayer
940
940
 
941
941
 
942
- def PredictFromRamPLAN(Input,Layers,ThresholdSigns,ThresholdValues,Normalizations,Activations,W):
942
+ def PredictFromRamPLAN(Input,Layers,MembranThresholds,MembranPotentials,Normalizations,Activations,W):
943
943
  infoPredictFromRamPLAN = """
944
944
  Function to make a prediction using a pruning learning artificial neural network (PLAN)
945
945
  from weights and parameters stored in memory.
@@ -947,8 +947,8 @@ def PredictFromRamPLAN(Input,Layers,ThresholdSigns,ThresholdValues,Normalization
947
947
  Arguments:
948
948
  Input (list or ndarray): Input data for the model (single vector or single matrix).
949
949
  Layers (list): Number and types of layers.
950
- ThresholdSigns (list): Threshold signs.
951
- ThresholdValues (list): Threshold values.
950
+ MembranThresholds (list): MEMBRAN THRESHOLDS.
951
+ MembranPotentials (list): MEMBRAN POTENTIALS.
952
952
  DoNormalization (str): Whether to normalize ('y' or 'n').
953
953
  Activations (list): Activation functions for each layer.
954
954
  W (list of ndarrays): Weights of the model.
@@ -976,12 +976,12 @@ def PredictFromRamPLAN(Input,Layers,ThresholdSigns,ThresholdValues,Normalization
976
976
 
977
977
  if Layers[index] == 'fex':
978
978
  NeuralLayer,useless = Fex(NeuralLayer, W[index],
979
- ThresholdSigns[index],
980
- ThresholdValues[index])
979
+ MembranThresholds[index],
980
+ MembranPotentials[index])
981
981
  if Layers[index] == 'cat':
982
982
  NeuralLayer,useless = Cat(NeuralLayer, W[index],
983
- ThresholdSigns[index],
984
- ThresholdValues[index],0)
983
+ MembranThresholds[index],
984
+ MembranPotentials[index],0)
985
985
  except:
986
986
  print(Fore.RED + "ERROR: Unexpected input or wrong model parameters from: PredictFromRamPLAN." + infoPredictFromRamPLAN + Style.RESET_ALL)
987
987
  return 'e'
@@ -0,0 +1,8 @@
1
+ Metadata-Version: 2.1
2
+ Name: pyerualjetwork
3
+ Version: 1.2.8
4
+ Summary: Advanced python deep learning library. 'ThresholdSigns' changed to 'MembranThresholds' and 'ThresholdValues' changed to 'MembranPotentials'. (Documentation in desc. Examples in GİTHUB: https://github.com/HCB06/PyerualJetwork)
5
+ Author: Hasan Can Beydili
6
+ Author-email: tchasancan@gmail.com
7
+ Keywords: model evaluation,classifcation,pruning learning artficial neural networks
8
+
@@ -0,0 +1,6 @@
1
+ plan/__init__.py,sha256=4trwhtGaeJd2tncziQAKrPT-Jjz6Q8nBG0n3SlyH1a4,352
2
+ plan/plan.py,sha256=CD3vwTbQznu4-kicpjGej2nki2X9OIjGwq_29SzjNDM,40457
3
+ pyerualjetwork-1.2.8.dist-info/METADATA,sha256=_XxnIpTXEqryO_m53tkbbxuInpQjokbs04WOaT878vY,448
4
+ pyerualjetwork-1.2.8.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
5
+ pyerualjetwork-1.2.8.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
+ pyerualjetwork-1.2.8.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.2)
2
+ Generator: bdist_wheel (0.38.4)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,8 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: pyerualjetwork
3
- Version: 1.2.6
4
- Summary: Advanced python deep learning library.UPDATED TO 1.2.2 NEW FEATURES: GetWeights() func for TrainPLAN, TestPLAN and LoadPLAN, GetPreds() and GetAcc() funcs for TrainPLAN and TestPLAN, GetDf() func for LoadPLAN (Documentation in desc. Examples in GİTHUB: HCB06)
5
- Author: Hasan Can Beydili
6
- Author-email: tchasancan@gmail.com
7
- Keywords: model evaluation,classifcation,pruning learning artficial neural networks
8
-
@@ -1,6 +0,0 @@
1
- plan/__init__.py,sha256=4trwhtGaeJd2tncziQAKrPT-Jjz6Q8nBG0n3SlyH1a4,352
2
- plan/plan.py,sha256=MJFRiMUoHYdwQMeqaH-FdUildxV0OQP3Zlo1OozUt3A,40107
3
- pyerualjetwork-1.2.6.dist-info/METADATA,sha256=tzQViDtGao_3Cp1t-WT7K77dauJMOM_v4URMnBEpk3g,482
4
- pyerualjetwork-1.2.6.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92
5
- pyerualjetwork-1.2.6.dist-info/top_level.txt,sha256=G0Al3HuNJ88434XneyDtRKAIUaLCizOFYFYNhd7e2OM,5
6
- pyerualjetwork-1.2.6.dist-info/RECORD,,