dragon-ml-toolbox 5.0.0__tar.gz → 5.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dragon-ml-toolbox might be problematic. Click here for more details.

Files changed (38) hide show
  1. {dragon_ml_toolbox-5.0.0/dragon_ml_toolbox.egg-info → dragon_ml_toolbox-5.2.0}/PKG-INFO +4 -3
  2. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/README.md +3 -2
  3. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0/dragon_ml_toolbox.egg-info}/PKG-INFO +4 -3
  4. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/dragon_ml_toolbox.egg-info/SOURCES.txt +1 -1
  5. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ML_datasetmaster.py +91 -1
  6. dragon_ml_toolbox-5.2.0/ml_tools/ML_models.py +134 -0
  7. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/pyproject.toml +1 -1
  8. dragon_ml_toolbox-5.0.0/ml_tools/_pytorch_models.py +0 -239
  9. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/LICENSE +0 -0
  10. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/LICENSE-THIRD-PARTY.md +0 -0
  11. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/dragon_ml_toolbox.egg-info/dependency_links.txt +0 -0
  12. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/dragon_ml_toolbox.egg-info/requires.txt +0 -0
  13. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/dragon_ml_toolbox.egg-info/top_level.txt +0 -0
  14. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ETL_engineering.py +0 -0
  15. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/GUI_tools.py +0 -0
  16. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/MICE_imputation.py +0 -0
  17. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ML_callbacks.py +0 -0
  18. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ML_evaluation.py +0 -0
  19. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ML_inference.py +0 -0
  20. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ML_optimization.py +0 -0
  21. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ML_trainer.py +0 -0
  22. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/PSO_optimization.py +0 -0
  23. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/RNN_forecast.py +0 -0
  24. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/SQL.py +0 -0
  25. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/VIF_factor.py +0 -0
  26. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/__init__.py +0 -0
  27. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/_logger.py +0 -0
  28. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/_script_info.py +0 -0
  29. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/custom_logger.py +0 -0
  30. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/data_exploration.py +0 -0
  31. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ensemble_inference.py +0 -0
  32. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/ensemble_learning.py +0 -0
  33. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/handle_excel.py +0 -0
  34. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/keys.py +0 -0
  35. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/optimization_tools.py +0 -0
  36. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/path_manager.py +0 -0
  37. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/ml_tools/utilities.py +0 -0
  38. {dragon_ml_toolbox-5.0.0 → dragon_ml_toolbox-5.2.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dragon-ml-toolbox
3
- Version: 5.0.0
3
+ Version: 5.2.0
4
4
  Summary: A collection of tools for data science and machine learning projects.
5
5
  Author-email: Karl Loza <luigiloza@gmail.com>
6
6
  License-Expression: MIT
@@ -141,10 +141,11 @@ pip install "dragon-ml-toolbox[pytorch]"
141
141
  ```bash
142
142
  custom_logger
143
143
  data_exploration
144
- datasetmaster
145
144
  ensemble_learning
146
145
  ensemble_inference
147
146
  ETL_engineering
147
+ ML_datasetmaster
148
+ ML_models
148
149
  ML_callbacks
149
150
  ML_evaluation
150
151
  ML_trainer
@@ -268,5 +269,5 @@ After installation, import modules like this:
268
269
 
269
270
  ```python
270
271
  from ml_tools.utilities import serialize_object, deserialize_object
271
- from ml_tools.custom_logger import custom_logger
272
+ from ml_tools import custom_logger
272
273
  ```
@@ -60,10 +60,11 @@ pip install "dragon-ml-toolbox[pytorch]"
60
60
  ```bash
61
61
  custom_logger
62
62
  data_exploration
63
- datasetmaster
64
63
  ensemble_learning
65
64
  ensemble_inference
66
65
  ETL_engineering
66
+ ML_datasetmaster
67
+ ML_models
67
68
  ML_callbacks
68
69
  ML_evaluation
69
70
  ML_trainer
@@ -187,5 +188,5 @@ After installation, import modules like this:
187
188
 
188
189
  ```python
189
190
  from ml_tools.utilities import serialize_object, deserialize_object
190
- from ml_tools.custom_logger import custom_logger
191
+ from ml_tools import custom_logger
191
192
  ```
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dragon-ml-toolbox
3
- Version: 5.0.0
3
+ Version: 5.2.0
4
4
  Summary: A collection of tools for data science and machine learning projects.
5
5
  Author-email: Karl Loza <luigiloza@gmail.com>
6
6
  License-Expression: MIT
@@ -141,10 +141,11 @@ pip install "dragon-ml-toolbox[pytorch]"
141
141
  ```bash
142
142
  custom_logger
143
143
  data_exploration
144
- datasetmaster
145
144
  ensemble_learning
146
145
  ensemble_inference
147
146
  ETL_engineering
147
+ ML_datasetmaster
148
+ ML_models
148
149
  ML_callbacks
149
150
  ML_evaluation
150
151
  ML_trainer
@@ -268,5 +269,5 @@ After installation, import modules like this:
268
269
 
269
270
  ```python
270
271
  from ml_tools.utilities import serialize_object, deserialize_object
271
- from ml_tools.custom_logger import custom_logger
272
+ from ml_tools import custom_logger
272
273
  ```
@@ -14,6 +14,7 @@ ml_tools/ML_callbacks.py
14
14
  ml_tools/ML_datasetmaster.py
15
15
  ml_tools/ML_evaluation.py
16
16
  ml_tools/ML_inference.py
17
+ ml_tools/ML_models.py
17
18
  ml_tools/ML_optimization.py
18
19
  ml_tools/ML_trainer.py
19
20
  ml_tools/PSO_optimization.py
@@ -22,7 +23,6 @@ ml_tools/SQL.py
22
23
  ml_tools/VIF_factor.py
23
24
  ml_tools/__init__.py
24
25
  ml_tools/_logger.py
25
- ml_tools/_pytorch_models.py
26
26
  ml_tools/_script_info.py
27
27
  ml_tools/custom_logger.py
28
28
  ml_tools/data_exploration.py
@@ -21,6 +21,7 @@ from ._script_info import _script_info
21
21
  # --- public-facing API ---
22
22
  __all__ = [
23
23
  "DatasetMaker",
24
+ "SimpleDatasetMaker",
24
25
  "VisionDatasetMaker",
25
26
  "SequenceMaker",
26
27
  "ResizeAspectFill",
@@ -328,7 +329,7 @@ class DatasetMaker(_BaseMaker):
328
329
 
329
330
  return self.scaler.inverse_transform(data_np)
330
331
 
331
- def get_datasets(self) -> Tuple[_PytorchDataset, _PytorchDataset]:
332
+ def get_datasets(self) -> Tuple[Dataset, Dataset]:
332
333
  """Primary method to get the final PyTorch Datasets."""
333
334
  if not self._is_split:
334
335
  raise RuntimeError("Data has not been split yet. Call .split_data() or .process() first.")
@@ -370,6 +371,95 @@ class DatasetMaker(_BaseMaker):
370
371
  return pandas.DataFrame(full_tensor.numpy(), columns=new_columns, index=cat_df.index)
371
372
 
372
373
 
374
+ # Streamlined DatasetMaker version
375
+ class SimpleDatasetMaker:
376
+ """
377
+ A simplified dataset maker for pre-processed, numerical pandas DataFrames.
378
+
379
+ This class takes a DataFrame, automatically splits it into training and
380
+ testing sets, and converts them into PyTorch Datasets. It assumes the
381
+ target variable is the last column.
382
+
383
+ Args:
384
+ pandas_df (pandas.DataFrame): The pre-processed input DataFrame with numerical data.
385
+ test_size (float): The proportion of the dataset to allocate to the
386
+ test split.
387
+ random_state (int): The seed for the random number generator for
388
+ reproducibility.
389
+ id (str | None): An optional object identifier.
390
+ """
391
+ def __init__(self, pandas_df: pandas.DataFrame, test_size: float = 0.2, random_state: int = 42, id: Optional[str]=None):
392
+ """
393
+ Attributes:
394
+ `train_dataset` -> PyTorch Dataset
395
+ `test_dataset` -> PyTorch Dataset
396
+ `feature_names` -> list[str]
397
+ `target_name` -> str
398
+ `id` -> str | None
399
+ """
400
+
401
+ if not isinstance(pandas_df, pandas.DataFrame):
402
+ raise TypeError("Input must be a pandas.DataFrame.")
403
+
404
+ #set id
405
+ self._id = id
406
+
407
+ # 1. Identify features and target
408
+ features = pandas_df.iloc[:, :-1]
409
+ target = pandas_df.iloc[:, -1]
410
+
411
+ self._feature_names = features.columns.tolist()
412
+ self._target_name = target.name
413
+
414
+ # 2. Split the data
415
+ X_train, X_test, y_train, y_test = train_test_split(
416
+ features, target, test_size=test_size, random_state=random_state
417
+ )
418
+
419
+ self._X_train_shape = X_train.shape
420
+ self._X_test_shape = X_test.shape
421
+ self._y_train_shape = y_train.shape
422
+ self._y_test_shape = y_test.shape
423
+
424
+ # 3. Convert to PyTorch Datasets
425
+ self._train_ds = _PytorchDataset(X_train.values, y_train.values)
426
+ self._test_ds = _PytorchDataset(X_test.values, y_test.values)
427
+
428
+ @property
429
+ def train_dataset(self) -> Dataset:
430
+ """Returns the training PyTorch dataset."""
431
+ return self._train_ds
432
+
433
+ @property
434
+ def test_dataset(self) -> Dataset:
435
+ """Returns the testing PyTorch dataset."""
436
+ return self._test_ds
437
+
438
+ @property
439
+ def feature_names(self) -> list[str]:
440
+ """Returns the list of feature column names."""
441
+ return self._feature_names
442
+
443
+ @property
444
+ def target_name(self) -> str:
445
+ """Returns the name of the target column."""
446
+ return str(self._target_name)
447
+
448
+ @property
449
+ def id(self) -> Optional[str]:
450
+ """Returns the object identifier if any."""
451
+ return self._id
452
+
453
+ def dataframes_info(self) -> None:
454
+ """Prints the shape information of the split pandas DataFrames."""
455
+ print("--- Original DataFrame Shapes After Split ---")
456
+ print(f" X_train shape: {self._X_train_shape}")
457
+ print(f" y_train shape: {self._y_train_shape}\n")
458
+ print(f" X_test shape: {self._X_test_shape}")
459
+ print(f" y_test shape: {self._y_test_shape}")
460
+ print("-------------------------------------------")
461
+
462
+
373
463
  # --- VisionDatasetMaker ---
374
464
  class VisionDatasetMaker(_BaseMaker):
375
465
  """
@@ -0,0 +1,134 @@
1
+ import torch
2
+ from torch import nn
3
+ from ._script_info import _script_info
4
+ from typing import List
5
+
6
+
7
+ __all__ = [
8
+ "MultilayerPerceptron",
9
+ "SequencePredictorLSTM"
10
+ ]
11
+
12
+
13
+ class MultilayerPerceptron(nn.Module):
14
+ """
15
+ Creates a versatile Multilayer Perceptron (MLP) for regression or classification tasks.
16
+
17
+ This model generates raw output values (logits) suitable for use with loss
18
+ functions like `nn.CrossEntropyLoss` (for classification) or `nn.MSELoss`
19
+ (for regression).
20
+
21
+ Args:
22
+ in_features (int): The number of input features (e.g., columns in your data).
23
+ out_targets (int): The number of output targets. For regression, this is
24
+ typically 1. For classification, it's the number of classes.
25
+ hidden_layers (list[int]): A list where each integer represents the
26
+ number of neurons in a hidden layer. Defaults to [40, 80, 40].
27
+ drop_out (float): The dropout probability for neurons in each hidden
28
+ layer. Must be between 0.0 and 1.0. Defaults to 0.2.
29
+
30
+ ### Rules of thumb:
31
+ - Choose a number of hidden neurons between the size of the input layer and the size of the output layer.
32
+ - The number of hidden neurons should be 2/3 the size of the input layer, plus the size of the output layer.
33
+ - The number of hidden neurons should be less than twice the size of the input layer.
34
+ """
35
+ def __init__(self, in_features: int, out_targets: int,
36
+ hidden_layers: List[int] = [40, 80, 40], drop_out: float = 0.2) -> None:
37
+ super().__init__()
38
+
39
+ # --- Validation ---
40
+ if not isinstance(in_features, int) or in_features < 1:
41
+ raise ValueError("in_features must be a positive integer.")
42
+ if not isinstance(out_targets, int) or out_targets < 1:
43
+ raise ValueError("out_targets must be a positive integer.")
44
+ if not isinstance(hidden_layers, list) or not all(isinstance(n, int) for n in hidden_layers):
45
+ raise TypeError("hidden_layers must be a list of integers.")
46
+ if not (0.0 <= drop_out < 1.0):
47
+ raise ValueError("drop_out must be a float between 0.0 and 1.0.")
48
+
49
+ # --- Build network layers ---
50
+ layers = []
51
+ current_features = in_features
52
+ for neurons in hidden_layers:
53
+ layers.extend([
54
+ nn.Linear(current_features, neurons),
55
+ nn.BatchNorm1d(neurons),
56
+ nn.ReLU(),
57
+ nn.Dropout(p=drop_out)
58
+ ])
59
+ current_features = neurons
60
+
61
+ # Add the final output layer
62
+ layers.append(nn.Linear(current_features, out_targets))
63
+
64
+ self._layers = nn.Sequential(*layers)
65
+
66
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
67
+ """Defines the forward pass of the model."""
68
+ return self._layers(x)
69
+
70
+
71
+ class SequencePredictorLSTM(nn.Module):
72
+ """
73
+ A simple LSTM-based network for sequence-to-sequence prediction tasks.
74
+
75
+ This model is designed for datasets where each input sequence maps to an
76
+ output sequence of the same length. It's suitable for forecasting problems
77
+ prepared by the `SequenceMaker` class.
78
+
79
+ The expected input shape is `(batch_size, sequence_length, features)`.
80
+
81
+ Args:
82
+ features (int): The number of features in the input sequence. Defaults to 1.
83
+ hidden_size (int): The number of features in the LSTM's hidden state.
84
+ Defaults to 100.
85
+ recurrent_layers (int): The number of recurrent LSTM layers. Defaults to 1.
86
+ dropout (float): The dropout probability for all but the last LSTM layer.
87
+ Defaults to 0.
88
+ """
89
+ def __init__(self, features: int = 1, hidden_size: int = 100,
90
+ recurrent_layers: int = 1, dropout: float = 0):
91
+ super().__init__()
92
+
93
+ # --- Validation ---
94
+ if not isinstance(features, int) or features < 1:
95
+ raise ValueError("features must be a positive integer.")
96
+ if not isinstance(hidden_size, int) or hidden_size < 1:
97
+ raise ValueError("hidden_size must be a positive integer.")
98
+ if not isinstance(recurrent_layers, int) or recurrent_layers < 1:
99
+ raise ValueError("recurrent_layers must be a positive integer.")
100
+ if not (0.0 <= dropout < 1.0):
101
+ raise ValueError("dropout must be a float between 0.0 and 1.0.")
102
+
103
+ self.lstm = nn.LSTM(
104
+ input_size=features,
105
+ hidden_size=hidden_size,
106
+ num_layers=recurrent_layers,
107
+ dropout=dropout,
108
+ batch_first=True # This is crucial for (batch, seq, feature) input
109
+ )
110
+ self.linear = nn.Linear(in_features=hidden_size, out_features=features)
111
+
112
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
113
+ """
114
+ Defines the forward pass.
115
+
116
+ Args:
117
+ x (torch.Tensor): The input tensor with shape
118
+ (batch_size, sequence_length, features).
119
+
120
+ Returns:
121
+ torch.Tensor: The output tensor with shape
122
+ (batch_size, sequence_length, features).
123
+ """
124
+ # The LSTM returns the full output sequence and the final hidden/cell states
125
+ lstm_out, _ = self.lstm(x)
126
+
127
+ # Pass the LSTM's output sequence to the linear layer
128
+ predictions = self.linear(lstm_out)
129
+
130
+ return predictions
131
+
132
+
133
+ def info():
134
+ _script_info(__all__)
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "dragon-ml-toolbox"
3
- version = "5.0.0"
3
+ version = "5.2.0"
4
4
  description = "A collection of tools for data science and machine learning projects."
5
5
  authors = [
6
6
  { name = "Karl Loza", email = "luigiloza@gmail.com" }
@@ -1,239 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from ._script_info import _script_info
4
-
5
-
6
- __all__ = [
7
- "MyNeuralNetwork",
8
- "MyLSTMNetwork"
9
- ]
10
-
11
-
12
- class MyNeuralNetwork(nn.Module):
13
- def __init__(self, in_features: int, out_targets: int, hidden_layers: list[int]=[40,80,40], drop_out: float=0.2) -> None:
14
- """
15
- Creates a basic Neural Network.
16
-
17
- * For Regression the last layer is Linear.
18
- * For Classification the last layer is Logarithmic Softmax.
19
-
20
- `out_targets` Is the number of expected output classes for classification; or `1` for regression.
21
-
22
- `hidden_layers` takes a list of integers. Each position represents a hidden layer and its number of neurons.
23
-
24
- * One rule of thumb is to choose a number of hidden neurons between the size of the input layer and the size of the output layer.
25
- * Another rule suggests that the number of hidden neurons should be 2/3 the size of the input layer, plus the size of the output layer.
26
- * Another rule suggests that the number of hidden neurons should be less than twice the size of the input layer.
27
-
28
- `drop_out` represents the probability of neurons to be set to '0' during the training process of each layer. Range [0.0, 1.0).
29
- """
30
- super().__init__()
31
-
32
- # Validate inputs and outputs
33
- if isinstance(in_features, int) and isinstance(out_targets, int):
34
- if in_features < 1 or out_targets < 1:
35
- raise ValueError("Inputs or Outputs must be an integer value.")
36
- else:
37
- raise TypeError("Inputs or Outputs must be an integer value.")
38
-
39
- # Validate layers
40
- if isinstance(hidden_layers, list):
41
- for number in hidden_layers:
42
- if not isinstance(number, int):
43
- raise TypeError("Number of neurons per hidden layer must be an integer value.")
44
- else:
45
- raise TypeError("hidden_layers must be a list of integer values.")
46
-
47
- # Validate dropout
48
- if isinstance(drop_out, float):
49
- if 1.0 > drop_out >= 0.0:
50
- pass
51
- else:
52
- raise TypeError("drop_out must be a float value greater than or equal to 0 and less than 1.")
53
- elif drop_out == 0:
54
- pass
55
- else:
56
- raise TypeError("drop_out must be a float value greater than or equal to 0 and less than 1.")
57
-
58
- # Create layers
59
- layers = list()
60
- for neurons in hidden_layers:
61
- layers.append(nn.Linear(in_features=in_features, out_features=neurons))
62
- layers.append(nn.BatchNorm1d(num_features=neurons))
63
- layers.append(nn.ReLU())
64
- layers.append(nn.Dropout(p=drop_out))
65
- in_features = neurons
66
- # Append output layer
67
- layers.append(nn.Linear(in_features=in_features, out_features=out_targets))
68
-
69
- # Check for classification or regression output
70
- if out_targets > 1:
71
- # layers.append(nn.Sigmoid())
72
- layers.append(nn.LogSoftmax(dim=1))
73
-
74
- # Create a container for layers
75
- self._layers = nn.Sequential(*layers)
76
-
77
- # Override forward()
78
- def forward(self, X: torch.Tensor) -> torch.Tensor:
79
- X = self._layers(X)
80
- return X
81
-
82
-
83
- class _MyConvolutionalNetwork(nn.Module):
84
- def __init__(self, outputs: int, color_channels: int=3, img_size: int=256, drop_out: float=0.2):
85
- """
86
- - EDUCATIONAL PURPOSES ONLY, not optimized and requires lots of memory.
87
-
88
- Create a basic Convolutional Neural Network with two convolution layers with a pooling layer after each convolution.
89
-
90
- Args:
91
- `outputs`: Number of output classes (1 for regression).
92
-
93
- `color_channels`: Color channels. Default is 3 (RGB).
94
-
95
- `img_size`: Width and Height of image samples, must be square images. Default is 200.
96
-
97
- `drop_out`: Neuron drop out probability. Default is 20%.
98
- """
99
- super().__init__()
100
-
101
- # Validate outputs number
102
- integer_error = " must be an integer greater than 0."
103
- if isinstance(outputs, int):
104
- if outputs < 1:
105
- raise ValueError("Outputs" + integer_error)
106
- else:
107
- raise TypeError("Outputs" + integer_error)
108
- # Validate color channels
109
- if isinstance(color_channels, int):
110
- if color_channels < 1:
111
- raise ValueError("Color Channels" + integer_error)
112
- else:
113
- raise TypeError("Color Channels" + integer_error)
114
- # Validate image size
115
- if isinstance(img_size, int):
116
- if img_size < 1:
117
- raise ValueError("Image size" + integer_error)
118
- else:
119
- raise TypeError("Image size" + integer_error)
120
- # Validate drop out
121
- if isinstance(drop_out, float):
122
- if 1.0 > drop_out >= 0.0:
123
- pass
124
- else:
125
- raise TypeError("Drop out must be a float value greater than or equal to 0 and less than 1.")
126
- elif drop_out == 0:
127
- pass
128
- else:
129
- raise TypeError("Drop out must be a float value greater than or equal to 0 and less than 1.")
130
-
131
- # 2 convolutions, 2 pooling layers
132
- self._cnn_layers = nn.Sequential(
133
- nn.Conv2d(in_channels=color_channels, out_channels=(color_channels * 2), kernel_size=5, stride=1, padding=1),
134
- nn.MaxPool2d(kernel_size=4, stride=(4,4)),
135
- nn.Conv2d(in_channels=(color_channels * 2), out_channels=(color_channels * 3), kernel_size=3, stride=1, padding=0),
136
- nn.AvgPool2d(kernel_size=2, stride=(2,2))
137
- )
138
- # Calculate output features
139
- flat_features = int(int((int((img_size + 2 - (5-1))//4) - (3-1))//2)**2) * (color_channels * 3)
140
-
141
- # Make a standard ANN
142
- ann = MyNeuralNetwork(in_features=flat_features, hidden_layers=[int(flat_features*0.5), int(flat_features*0.2), int(flat_features*0.005)],
143
- out_targets=outputs, drop_out=drop_out)
144
- self._ann_layers = ann._layers
145
-
146
- # Join CNN and ANN
147
- self._structure = nn.Sequential(self._cnn_layers, nn.Flatten(), self._ann_layers)
148
-
149
- # Send to CUDA if available
150
- # if torch.cuda.is_available():
151
- # self.to('cuda')
152
-
153
- # Override forward()
154
- def forward(self, X: torch.Tensor) -> torch.Tensor:
155
- X = self._structure(X)
156
- return X
157
-
158
-
159
- class MyLSTMNetwork(nn.Module):
160
- def __init__(self, features: int=1, hidden_size: int=100, recurrent_layers: int=1, dropout: float=0, reset_memory: bool=False, **kwargs):
161
- """
162
- Create a simple Recurrent Neural Network to predict 1 time step into the future of sequential data.
163
-
164
- The sequence should be a 2D tensor with shape (sequence_length, number_of_features).
165
-
166
- Args:
167
- * `features`: Number of features representing the sequence. Defaults to 1.
168
- * `hidden_size`: Hidden size of the LSTM model. Defaults to 100.
169
- * `recurrent_layers`: Number of recurrent layers to use. Defaults to 1.
170
- * `dropout`: Probability of dropping out neurons in each recurrent layer, except the last layer. Defaults to 0.
171
- * `reset_memory`: Reset the initial hidden state and cell state for the recurrent layers at every epoch. Defaults to False.
172
- * `kwargs`: Create custom attributes for the model.
173
-
174
- Custom forward() parameters:
175
- * `batch_size=1` (int): batch size for the LSTM net.
176
- * `return_last_timestamp=False` (bool): Return only the value at `output[-1]`
177
- """
178
- # validate input size
179
- if not isinstance(features, int):
180
- raise TypeError("Input size must be an integer value.")
181
- # validate hidden size
182
- if not isinstance(hidden_size, int):
183
- raise TypeError("Hidden size must be an integer value.")
184
- # validate layers
185
- if not isinstance(recurrent_layers, int):
186
- raise TypeError("Number of recurrent layers must be an integer value.")
187
- # validate dropout
188
- if isinstance(dropout, (float, int)):
189
- if 0 <= dropout < 1:
190
- pass
191
- else:
192
- raise ValueError("Dropout must be a float in range [0.0, 1.0)")
193
- else:
194
- raise TypeError("Dropout must be a float in range [0.0, 1.0)")
195
-
196
- super().__init__()
197
-
198
- # Initialize memory
199
- self._reset = reset_memory
200
- self._memory = None
201
-
202
- # hidden size and features shape
203
- self._hidden = hidden_size
204
- self._features = features
205
-
206
- # RNN
207
- self._lstm = nn.LSTM(input_size=features, hidden_size=self._hidden, num_layers=recurrent_layers, dropout=dropout)
208
-
209
- # Fully connected layer
210
- self._ann = nn.Linear(in_features=self._hidden, out_features=features)
211
-
212
- # Parse extra parameters
213
- for key, value in kwargs.items():
214
- setattr(self, key, value)
215
-
216
-
217
- def forward(self, seq: torch.Tensor, batch_size: int=1, return_last_timestamp: bool=False) -> torch.Tensor:
218
- # reset memory
219
- if self._reset:
220
- self._memory = None
221
- # reshape sequence to feed RNN
222
- seq = seq.view(-1, batch_size, self._features)
223
- # Pass sequence through RNN
224
- seq, self._memory = self._lstm(seq, self._memory)
225
- # Detach hidden state and cell state to prevent backpropagation error
226
- self._memory = tuple(m.detach() for m in self._memory)
227
- # Reshape outputs
228
- seq = seq.view(-1, self._hidden)
229
- # Pass sequence through fully connected layer
230
- output = self._ann(seq)
231
- # Return prediction of 1 time step in the future
232
- if return_last_timestamp:
233
- return output[-1].view(1,-1) #last item as a tensor.
234
- else:
235
- return output
236
-
237
-
238
- def info():
239
- _script_info(__all__)