eoml 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. eoml/__init__.py +74 -0
  2. eoml/automation/__init__.py +7 -0
  3. eoml/automation/configuration.py +105 -0
  4. eoml/automation/dag.py +233 -0
  5. eoml/automation/experience.py +618 -0
  6. eoml/automation/tasks.py +825 -0
  7. eoml/bin/__init__.py +6 -0
  8. eoml/bin/clean_checkpoint.py +146 -0
  9. eoml/bin/land_cover_mapping_toml.py +435 -0
  10. eoml/bin/mosaic_images.py +137 -0
  11. eoml/data/__init__.py +7 -0
  12. eoml/data/basic_geo_data.py +214 -0
  13. eoml/data/dataset_utils.py +98 -0
  14. eoml/data/persistence/__init__.py +7 -0
  15. eoml/data/persistence/generic.py +253 -0
  16. eoml/data/persistence/lmdb.py +379 -0
  17. eoml/data/persistence/serializer.py +82 -0
  18. eoml/raster/__init__.py +7 -0
  19. eoml/raster/band.py +141 -0
  20. eoml/raster/dataset/__init__.py +6 -0
  21. eoml/raster/dataset/extractor.py +604 -0
  22. eoml/raster/raster_reader.py +602 -0
  23. eoml/raster/raster_utils.py +116 -0
  24. eoml/torch/__init__.py +7 -0
  25. eoml/torch/cnn/__init__.py +7 -0
  26. eoml/torch/cnn/augmentation.py +150 -0
  27. eoml/torch/cnn/dataset_evaluator.py +68 -0
  28. eoml/torch/cnn/db_dataset.py +605 -0
  29. eoml/torch/cnn/map_dataset.py +579 -0
  30. eoml/torch/cnn/map_dataset_const_mem.py +135 -0
  31. eoml/torch/cnn/outputs_transformer.py +130 -0
  32. eoml/torch/cnn/torch_utils.py +404 -0
  33. eoml/torch/cnn/training_dataset.py +241 -0
  34. eoml/torch/cnn/windows_dataset.py +120 -0
  35. eoml/torch/dataset/__init__.py +6 -0
  36. eoml/torch/dataset/shade_dataset_tester.py +46 -0
  37. eoml/torch/dataset/shade_tree_dataset_creators.py +537 -0
  38. eoml/torch/model_low_use.py +507 -0
  39. eoml/torch/models.py +282 -0
  40. eoml/torch/resnet.py +437 -0
  41. eoml/torch/sample_statistic.py +260 -0
  42. eoml/torch/trainer.py +782 -0
  43. eoml/torch/trainer_v2.py +253 -0
  44. eoml-0.9.0.dist-info/METADATA +93 -0
  45. eoml-0.9.0.dist-info/RECORD +47 -0
  46. eoml-0.9.0.dist-info/WHEEL +4 -0
  47. eoml-0.9.0.dist-info/entry_points.txt +3 -0
eoml/torch/models.py ADDED
@@ -0,0 +1,282 @@
1
+ """Model factory and custom neural network architectures for PyTorch.
2
+
3
+ This module provides a factory pattern for creating and managing neural network models,
4
+ along with custom CNN architectures for image classification tasks. It includes
5
+ initialization utilities and pre-configured model variants with batch normalization
6
+ and dropout layers.
7
+ """
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from eoml.torch.resnet import ResNet, resnet20, resnet56, resnet32
12
+ from eoml.torch.cnn.torch_utils import conv_out_sizes
13
+ from eoml.torch.model_low_use import Conv2Dense3, Conv3Dense3, Conv2DropDense3, ConvJavaSmall, \
14
+ Conv3Dense3Norm, \
15
+ Conv2Norm, Conv2NormV2
16
+ from torch import nn
17
+
18
+
19
+ def initialize_weights(m):
20
+ """Initialize weights for neural network layers using Kaiming initialization.
21
+
22
+ Applies appropriate initialization based on layer type:
23
+ - Conv2d: Kaiming uniform initialization for weights, zeros for bias
24
+ - BatchNorm2d: Ones for weights, zeros for bias
25
+ - Linear: Kaiming uniform initialization for weights, zeros for bias
26
+
27
+ Args:
28
+ m: PyTorch module/layer to initialize.
29
+ """
30
+ if isinstance(m, nn.Conv2d):
31
+ nn.init.kaiming_uniform_(m.weight.data, nonlinearity='relu')
32
+ if m.bias is not None:
33
+ nn.init.constant_(m.bias.data, 0)
34
+ elif isinstance(m, nn.BatchNorm2d):
35
+ nn.init.constant_(m.weight.data, 1)
36
+ nn.init.constant_(m.bias.data, 0)
37
+ elif isinstance(m, nn.Linear):
38
+ nn.init.kaiming_uniform_(m.weight.data)
39
+ nn.init.constant_(m.bias.data, 0)
40
+
41
+ class ModelFactory:
42
+ """Factory class for creating and managing neural network models.
43
+
44
+ Provides a registry of pre-configured model architectures and handles
45
+ model instantiation, including loading from saved weights or JIT-compiled models.
46
+
47
+ Attributes:
48
+ libray (dict): Dictionary mapping model names to their constructor functions.
49
+ """
50
+
51
+ def __init__(self):
52
+ """Initialize the model factory with default model registry."""
53
+ self.libray = {}
54
+ self.libray["Conv2Dense3"] = Conv2Dense3
55
+ self.libray["Conv3Dense3"] = Conv3Dense3
56
+ self.libray["Conv3Dense3Norm"] = Conv3Dense3Norm
57
+ self.libray["Conv2DropDense3"] = Conv2DropDense3
58
+ self.libray["ConvJavaSmall"] = ConvJavaSmall
59
+ self.libray["ConvJavaSmallNorm"] = ConvSmallNorm
60
+ self.libray["ConvJavaTinyNorm"] = ConvTinyNorm
61
+
62
+
63
+ self.libray["Conv2Norm"] = Conv2Norm
64
+ self.libray["Conv2NormV2"] = Conv2NormV2
65
+
66
+ self.libray["Resnet20"] = resnet20
67
+ self.libray["Resnet32"] = resnet32
68
+ self.libray["Resnet56"] = resnet56
69
+
70
+
71
+ def register(self, name, model):
72
+ """Register a new model in the factory.
73
+
74
+ Args:
75
+ name (str): Name identifier for the model.
76
+ model: Model constructor or factory function.
77
+ """
78
+ self.libray[name] = model
79
+
80
+ def __call__(self, name, type="normal", path=None, model_args=None ):
81
+ """Create a model instance from the factory.
82
+
83
+ Args:
84
+ name (str): Name of the model to create.
85
+ type (str, optional): Type of model - "normal" for standard PyTorch model or
86
+ "jitted" for TorchScript JIT-compiled model. Defaults to "normal".
87
+ path (str, optional): Path to saved model weights or JIT model. If None,
88
+ model uses random initialization. Defaults to None.
89
+ model_args (dict, optional): Arguments to pass to model constructor.
90
+ Defaults to None.
91
+
92
+ Returns:
93
+ torch.nn.Module: Instantiated model.
94
+
95
+ Raises:
96
+ Exception: If type is not "normal" or "jitted".
97
+ """
98
+
99
+ if type == "jitted":
100
+ return torch.jit.load(path)
101
+
102
+ if type == "normal":
103
+ constructor = self.libray[name]
104
+ model = constructor(**model_args)
105
+ else:
106
+ raise Exception("wrong args expected jitted or normal")
107
+
108
+ if path is not None:
109
+ model.load_state_dict(torch.load(path))
110
+
111
+ return model
112
+
113
+
114
+
115
+ class ConvSmallNorm(nn.Module):
116
+ """Convolutional neural network with batch normalization and dropout.
117
+
118
+ Architecture: Conv2d -> BatchNorm -> ReLU -> MaxPool -> 4x Dense layers with dropout.
119
+ Designed for small input images with configurable number of input bands.
120
+
121
+ Attributes:
122
+ in_size (int): Input image size (height and width).
123
+ n_bands (int): Number of input channels/bands.
124
+ conv (list): Convolutional kernel sizes.
125
+ pad (int): Padding for convolutional layers.
126
+ stride (list): Stride values for convolutions.
127
+ n_filter (list): Number of filters in convolutional layers.
128
+ input_sizes (list): Computed output sizes after each conv operation.
129
+ denses (list): Sizes of dense layers.
130
+ conv1 (nn.Conv2d): First convolutional layer.
131
+ conv1_bn (nn.BatchNorm2d): Batch normalization for first conv layer.
132
+ pool1 (nn.MaxPool2d): Max pooling layer.
133
+ fc1 (nn.Linear): First fully connected layer.
134
+ drop1 (nn.Dropout): Dropout after first FC layer.
135
+ fc2 (nn.Linear): Second fully connected layer.
136
+ drop2 (nn.Dropout): Dropout after second FC layer.
137
+ fc3 (nn.Linear): Third fully connected layer.
138
+ drop3 (nn.Dropout): Dropout after third FC layer.
139
+ fc4 (nn.Linear): Output fully connected layer.
140
+ """
141
+
142
+ def __init__(self, in_size, n_bands, n_out, p_drop=0.4):
143
+ """Initialize ConvJavaSmallNorm model.
144
+
145
+ Args:
146
+ in_size (int): Input image size (assumes square images).
147
+ n_bands (int): Number of input channels/bands.
148
+ n_out (int): Number of output classes.
149
+ p_drop (float, optional): Dropout probability. Defaults to 0.4.
150
+ """
151
+ self.in_size = in_size
152
+ self.n_bands = n_bands
153
+
154
+ self.conv = [4,2]
155
+ self.pad = 0
156
+ self.stride = [1,2]
157
+
158
+ self.n_filter = [128]
159
+
160
+ self.input_sizes = conv_out_sizes(in_size, self.conv, self.stride, self.pad)
161
+
162
+ self.denses = [2048, 2048, 2048]
163
+
164
+ super().__init__()
165
+ self.conv1 = nn.Conv2d(in_channels=n_bands, out_channels=self.n_filter[0], kernel_size=self.conv[0],
166
+ padding=self.pad)
167
+ self.conv1_bn = nn.BatchNorm2d(self.n_filter[0])
168
+ self.pool1 = nn.MaxPool2d(2)
169
+ self.fc1 = nn.Linear(self.n_filter[-1] * self.input_sizes[-1] * self.input_sizes[-1], self.denses[0])
170
+ self.drop1 = nn.Dropout(p_drop)
171
+ self.fc2 = nn.Linear(self.denses[0], self.denses[1])
172
+ self.drop2 = nn.Dropout(p_drop)
173
+ self.fc3 = nn.Linear(self.denses[1], self.denses[2])
174
+ self.drop3 = nn.Dropout(p_drop)
175
+ self.fc4 = nn.Linear(self.denses[2], n_out)
176
+
177
+ def forward(self, x):
178
+ """Forward pass through the network.
179
+
180
+ Args:
181
+ x (torch.Tensor): Input tensor of shape (batch_size, n_bands, height, width).
182
+
183
+ Returns:
184
+ torch.Tensor: Output tensor of shape (batch_size, n_out).
185
+ """
186
+ x = F.relu(self.conv1_bn(self.conv1(x)))
187
+ x = self.pool1(x)
188
+ x = torch.flatten(x, 1)
189
+ # flatten all dimensions except batch
190
+ x = F.relu(self.fc1(x))
191
+ x = self.drop1(x)
192
+ x = F.relu(self.fc2(x))
193
+ x = self.drop2(x)
194
+ x = F.relu(self.fc3(x))
195
+ x = self.drop3(x)
196
+
197
+ #F.softmax(
198
+ x = self.fc4(x)
199
+
200
+ return x
201
+
202
+
203
+ class ConvTinyNorm(nn.Module):
204
+ """Tiny convolutional neural network with batch normalization.
205
+
206
+ A smaller variant of ConvJavaSmallNorm with fewer filters and dense units.
207
+ Architecture: Conv2d -> BatchNorm -> ReLU -> MaxPool -> 3x Dense layers with dropout.
208
+
209
+ Attributes:
210
+ in_size (int): Input image size (height and width).
211
+ n_bands (int): Number of input channels/bands.
212
+ conv (list): Convolutional kernel sizes.
213
+ pad (int): Padding for convolutional layers.
214
+ stride (list): Stride values for convolutions.
215
+ n_filter (list): Number of filters in convolutional layers.
216
+ input_sizes (list): Computed output sizes after each conv operation.
217
+ denses (list): Sizes of dense layers.
218
+ conv1 (nn.Conv2d): First convolutional layer.
219
+ conv1_bn (nn.BatchNorm2d): Batch normalization for first conv layer.
220
+ pool1 (nn.MaxPool2d): Max pooling layer.
221
+ fc1 (nn.Linear): First fully connected layer.
222
+ drop1 (nn.Dropout): Dropout after first FC layer.
223
+ fc2 (nn.Linear): Second fully connected layer.
224
+ drop2 (nn.Dropout): Dropout after second FC layer.
225
+ fc3 (nn.Linear): Output fully connected layer.
226
+ """
227
+
228
+ def __init__(self, in_size, n_bands, n_out, p_drop=0.4):
229
+ """Initialize ConvJavaTinyNorm model.
230
+
231
+ Args:
232
+ in_size (int): Input image size (assumes square images).
233
+ n_bands (int): Number of input channels/bands.
234
+ n_out (int): Number of output classes.
235
+ p_drop (float, optional): Dropout probability. Defaults to 0.4.
236
+ """
237
+ self.in_size = in_size
238
+ self.n_bands = n_bands
239
+
240
+ self.conv = [4,2]
241
+ self.pad = 0
242
+ self.stride = [1,2]
243
+
244
+ self.n_filter = [64, 64] #we repeat 256 for the drop out
245
+
246
+ self.input_sizes = conv_out_sizes(in_size, self.conv, self.stride, self.pad)
247
+
248
+ self.denses = [1024, 1024]
249
+
250
+ super().__init__()
251
+ self.conv1 = nn.Conv2d(in_channels=n_bands, out_channels=self.n_filter[0], kernel_size=self.conv[0],
252
+ padding=self.pad)
253
+ self.conv1_bn = nn.BatchNorm2d(self.n_filter[0])
254
+ self.pool1 = nn.MaxPool2d(2)
255
+ self.fc1 = nn.Linear(self.n_filter[0] * self.input_sizes[-1] * self.input_sizes[-1], self.denses[0])
256
+ self.drop1 = nn.Dropout(p_drop)
257
+ self.fc2 = nn.Linear(self.denses[0], self.denses[1])
258
+ self.drop2 = nn.Dropout(p_drop)
259
+ self.fc3 = nn.Linear(self.denses[1], n_out)
260
+
261
+ def forward(self, x):
262
+ """Forward pass through the network.
263
+
264
+ Args:
265
+ x (torch.Tensor): Input tensor of shape (batch_size, n_bands, height, width).
266
+
267
+ Returns:
268
+ torch.Tensor: Output tensor of shape (batch_size, n_out).
269
+ """
270
+ x = F.relu(self.conv1_bn(self.conv1(x)))
271
+ x = self.pool1(x)
272
+ x = torch.flatten(x, 1)
273
+ # flatten all dimensions except batch
274
+ x = F.relu(self.fc1(x))
275
+ x = self.drop1(x)
276
+ x = F.relu(self.fc2(x))
277
+ x = self.drop2(x)
278
+
279
+ #F.softmax(
280
+ x = self.fc3(x)
281
+
282
+ return x