geoai-py 0.0.1__py2.py3-none-any.whl → 0.1.5__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
geoai/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
1
  """Top-level package for geoai."""
2
2
 
3
3
  __author__ = """Qiusheng Wu"""
4
- __email__ = 'giswqs@gmail.com'
5
- __version__ = '0.0.1'
4
+ __email__ = "giswqs@gmail.com"
5
+ __version__ = "0.1.5"
geoai/common.py CHANGED
@@ -1,7 +1,6 @@
1
- """The common module contains common functions and classes used by the other modules.
2
- """
1
+ """The common module contains common functions and classes used by the other modules."""
2
+
3
3
 
4
4
  def hello_world():
5
- """Prints "Hello World!" to the console.
6
- """
7
- print("Hello World!")
5
+ """Prints "Hello World!" to the console."""
6
+ print("Hello World!")
geoai/segmentation.py ADDED
@@ -0,0 +1,349 @@
1
+ import os
2
+ import numpy as np
3
+ from PIL import Image
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+ from torch.utils.data import Dataset, Subset
7
+ import torch.nn.functional as F
8
+ from sklearn.model_selection import train_test_split
9
+ import albumentations as A
10
+ from albumentations.pytorch import ToTensorV2
11
+ from transformers import (
12
+ Trainer,
13
+ TrainingArguments,
14
+ SegformerForSemanticSegmentation,
15
+ DefaultDataCollator,
16
+ )
17
+
18
+
19
+ class CustomDataset(Dataset):
20
+ """Custom Dataset for loading images and masks."""
21
+
22
+ def __init__(
23
+ self,
24
+ images_dir: str,
25
+ masks_dir: str,
26
+ transform: A.Compose = None,
27
+ target_size: tuple = (256, 256),
28
+ num_classes: int = 2,
29
+ ):
30
+ """
31
+ Args:
32
+ images_dir (str): Directory containing images.
33
+ masks_dir (str): Directory containing masks.
34
+ transform (A.Compose, optional): Transformations to be applied on the images and masks.
35
+ target_size (tuple, optional): Target size for resizing images and masks.
36
+ num_classes (int, optional): Number of classes in the masks.
37
+ """
38
+ self.images_dir = images_dir
39
+ self.masks_dir = masks_dir
40
+ self.transform = transform
41
+ self.target_size = target_size
42
+ self.num_classes = num_classes
43
+ self.images = sorted(os.listdir(images_dir))
44
+ self.masks = sorted(os.listdir(masks_dir))
45
+
46
+ def __len__(self) -> int:
47
+ """Returns the total number of samples."""
48
+ return len(self.images)
49
+
50
+ def __getitem__(self, idx: int) -> dict:
51
+ """
52
+ Args:
53
+ idx (int): Index of the sample to fetch.
54
+
55
+ Returns:
56
+ dict: A dictionary with 'pixel_values' and 'labels'.
57
+ """
58
+ img_path = os.path.join(self.images_dir, self.images[idx])
59
+ mask_path = os.path.join(self.masks_dir, self.masks[idx])
60
+ image = Image.open(img_path).convert("RGB")
61
+ mask = Image.open(mask_path).convert("L")
62
+
63
+ image = image.resize(self.target_size)
64
+ mask = mask.resize(self.target_size)
65
+
66
+ image = np.array(image)
67
+ mask = np.array(mask)
68
+
69
+ mask = (mask > 127).astype(np.uint8)
70
+
71
+ if self.transform:
72
+ transformed = self.transform(image=image, mask=mask)
73
+ image = transformed["image"]
74
+ mask = transformed["mask"]
75
+
76
+ assert (
77
+ mask.max() < self.num_classes
78
+ ), f"Mask values should be less than {self.num_classes}, but found {mask.max()}"
79
+ assert (
80
+ mask.min() >= 0
81
+ ), f"Mask values should be greater than or equal to 0, but found {mask.min()}"
82
+
83
+ mask = mask.clone().detach().long()
84
+
85
+ return {"pixel_values": image, "labels": mask}
86
+
87
+
88
+ def get_transform() -> A.Compose:
89
+ """
90
+ Returns:
91
+ A.Compose: A composition of image transformations.
92
+ """
93
+ return A.Compose(
94
+ [
95
+ A.Resize(256, 256),
96
+ A.HorizontalFlip(p=0.5),
97
+ A.VerticalFlip(p=0.5),
98
+ A.RandomRotate90(p=0.5),
99
+ A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
100
+ ToTensorV2(),
101
+ ]
102
+ )
103
+
104
+
105
+ def prepare_datasets(
106
+ images_dir: str,
107
+ masks_dir: str,
108
+ transform: A.Compose,
109
+ test_size: float = 0.2,
110
+ random_state: int = 42,
111
+ ) -> tuple:
112
+ """
113
+ Args:
114
+ images_dir (str): Directory containing images.
115
+ masks_dir (str): Directory containing masks.
116
+ transform (A.Compose): Transformations to be applied.
117
+ test_size (float, optional): Proportion of the dataset to include in the validation split.
118
+ random_state (int, optional): Random seed for shuffling the dataset.
119
+
120
+ Returns:
121
+ tuple: Training and validation datasets.
122
+ """
123
+ dataset = CustomDataset(images_dir, masks_dir, transform)
124
+ train_indices, val_indices = train_test_split(
125
+ list(range(len(dataset))), test_size=test_size, random_state=random_state
126
+ )
127
+ train_dataset = Subset(dataset, train_indices)
128
+ val_dataset = Subset(dataset, val_indices)
129
+ return train_dataset, val_dataset
130
+
131
+
132
+ def train_model(
133
+ train_dataset: Dataset,
134
+ val_dataset: Dataset,
135
+ pretrained_model: str = "nvidia/segformer-b0-finetuned-ade-512-512",
136
+ model_save_path: str = "./model",
137
+ output_dir: str = "./results",
138
+ num_epochs: int = 10,
139
+ batch_size: int = 8,
140
+ learning_rate: float = 5e-5,
141
+ ) -> str:
142
+ """
143
+ Trains the model and saves the fine-tuned model to the specified path.
144
+
145
+ Args:
146
+ train_dataset (Dataset): Training dataset.
147
+ val_dataset (Dataset): Validation dataset.
148
+ pretrained_model (str, optional): Pretrained model to fine-tune.
149
+ model_save_path (str): Path to save the fine-tuned model. Defaults to './model'.
150
+ output_dir (str, optional): Directory to save training outputs.
151
+ num_epochs (int, optional): Number of training epochs.
152
+ batch_size (int, optional): Batch size for training and evaluation.
153
+ learning_rate (float, optional): Learning rate for training.
154
+
155
+ Returns:
156
+ str: Path to the saved fine-tuned model.
157
+ """
158
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
159
+ model = SegformerForSemanticSegmentation.from_pretrained(pretrained_model).to(
160
+ device
161
+ )
162
+ data_collator = DefaultDataCollator(return_tensors="pt")
163
+
164
+ training_args = TrainingArguments(
165
+ output_dir=output_dir,
166
+ num_train_epochs=num_epochs,
167
+ per_device_train_batch_size=batch_size,
168
+ per_device_eval_batch_size=batch_size,
169
+ eval_strategy="epoch",
170
+ save_strategy="epoch",
171
+ logging_dir="./logs",
172
+ learning_rate=learning_rate,
173
+ )
174
+
175
+ trainer = Trainer(
176
+ model=model,
177
+ args=training_args,
178
+ data_collator=data_collator,
179
+ train_dataset=train_dataset,
180
+ eval_dataset=val_dataset,
181
+ )
182
+
183
+ trainer.train()
184
+ model.save_pretrained(model_save_path)
185
+ print(f"Model saved to {model_save_path}")
186
+ return model_save_path
187
+
188
+
189
+ def load_model(
190
+ model_path: str, device: torch.device
191
+ ) -> SegformerForSemanticSegmentation:
192
+ """
193
+ Loads the fine-tuned model from the specified path.
194
+
195
+ Args:
196
+ model_path (str): Path to the model.
197
+ device (torch.device): Device to load the model on.
198
+
199
+ Returns:
200
+ SegformerForSemanticSegmentation: Loaded model.
201
+ """
202
+ model = SegformerForSemanticSegmentation.from_pretrained(model_path)
203
+ model.to(device)
204
+ model.eval()
205
+ return model
206
+
207
+
208
+ def preprocess_image(image_path: str, target_size: tuple = (256, 256)) -> torch.Tensor:
209
+ """
210
+ Preprocesses the input image for prediction.
211
+
212
+ Args:
213
+ image_path (str): Path to the input image.
214
+ target_size (tuple, optional): Target size for resizing the image.
215
+
216
+ Returns:
217
+ torch.Tensor: Preprocessed image tensor.
218
+ """
219
+ image = Image.open(image_path).convert("RGB")
220
+ transform = A.Compose(
221
+ [
222
+ A.Resize(target_size[0], target_size[1]),
223
+ A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
224
+ ToTensorV2(),
225
+ ]
226
+ )
227
+ image = np.array(image)
228
+ transformed = transform(image=image)
229
+ return transformed["image"].unsqueeze(0)
230
+
231
+
232
+ def predict_image(
233
+ model: SegformerForSemanticSegmentation,
234
+ image_tensor: torch.Tensor,
235
+ original_size: tuple,
236
+ device: torch.device,
237
+ ) -> np.ndarray:
238
+ """
239
+ Predicts the segmentation mask for the input image.
240
+
241
+ Args:
242
+ model (SegformerForSemanticSegmentation): Fine-tuned model.
243
+ image_tensor (torch.Tensor): Preprocessed image tensor.
244
+ original_size (tuple): Original size of the image (width, height).
245
+ device (torch.device): Device to perform inference on.
246
+
247
+ Returns:
248
+ np.ndarray: Predicted segmentation mask.
249
+ """
250
+ with torch.no_grad():
251
+ image_tensor = image_tensor.to(device)
252
+ outputs = model(pixel_values=image_tensor)
253
+ logits = outputs.logits
254
+ upsampled_logits = F.interpolate(
255
+ logits, size=original_size[::-1], mode="bilinear", align_corners=False
256
+ )
257
+ predictions = torch.argmax(upsampled_logits, dim=1).cpu().numpy()
258
+ return predictions[0]
259
+
260
+
261
+ def segment_image(
262
+ image_path: str,
263
+ model_path: str,
264
+ target_size: tuple = (256, 256),
265
+ device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),
266
+ ) -> np.ndarray:
267
+ """
268
+ Segments the input image using the fine-tuned model.
269
+
270
+ Args:
271
+ image_path (str): Path to the input image.
272
+ model_path (str): Path to the fine-tuned model.
273
+ target_size (tuple, optional): Target size for resizing the image.
274
+ device (torch.device, optional): Device to perform inference on.
275
+
276
+ Returns:
277
+ np.ndarray: Predicted segmentation mask.
278
+ """
279
+ model = load_model(model_path, device)
280
+ image = Image.open(image_path).convert("RGB")
281
+ original_size = image.size
282
+ image_tensor = preprocess_image(image_path, target_size)
283
+ predictions = predict_image(model, image_tensor, original_size, device)
284
+ return predictions
285
+
286
+
287
+ def visualize_predictions(
288
+ image_path: str,
289
+ segmented_mask: np.ndarray,
290
+ target_size: tuple = (256, 256),
291
+ reference_image_path: str = None,
292
+ ) -> None:
293
+ """
294
+ Visualizes the original image, segmented mask, and optionally the reference image.
295
+
296
+ Args:
297
+ image_path (str): Path to the original image.
298
+ segmented_mask (np.ndarray): Predicted segmentation mask.
299
+ target_size (tuple, optional): Target size for resizing images.
300
+ reference_image_path (str, optional): Path to the reference image.
301
+ """
302
+ original_image = Image.open(image_path).convert("RGB")
303
+ original_image = original_image.resize(target_size)
304
+ segmented_image = Image.fromarray((segmented_mask * 255).astype(np.uint8))
305
+
306
+ if reference_image_path:
307
+ reference_image = Image.open(reference_image_path).convert("RGB")
308
+ reference_image = reference_image.resize(target_size)
309
+ fig, axes = plt.subplots(1, 3, figsize=(18, 6))
310
+ axes[1].imshow(reference_image)
311
+ axes[1].set_title("Reference Image")
312
+ axes[1].axis("off")
313
+ else:
314
+ fig, axes = plt.subplots(1, 2, figsize=(12, 6))
315
+
316
+ axes[0].imshow(original_image)
317
+ axes[0].set_title("Original Image")
318
+ axes[0].axis("off")
319
+
320
+ if reference_image_path:
321
+ axes[2].imshow(segmented_image, cmap="gray")
322
+ axes[2].set_title("Segmented Image")
323
+ axes[2].axis("off")
324
+ else:
325
+ axes[1].imshow(segmented_image, cmap="gray")
326
+ axes[1].set_title("Segmented Image")
327
+ axes[1].axis("off")
328
+
329
+ plt.tight_layout()
330
+ plt.show()
331
+
332
+
333
+ # Example usage
334
+ if __name__ == "__main__":
335
+ images_dir = "../datasets/Water-Bodies-Dataset/Images"
336
+ masks_dir = "../datasets/Water-Bodies-Dataset/Masks"
337
+ transform = get_transform()
338
+ train_dataset, val_dataset = prepare_datasets(images_dir, masks_dir, transform)
339
+
340
+ model_save_path = "./fine_tuned_model"
341
+ train_model(train_dataset, val_dataset, model_save_path)
342
+
343
+ image_path = "../datasets/Water-Bodies-Dataset/Images/water_body_44.jpg"
344
+ reference_image_path = image_path.replace("Images", "Masks")
345
+ segmented_mask = segment_image(image_path, model_save_path)
346
+
347
+ visualize_predictions(
348
+ image_path, segmented_mask, reference_image_path=reference_image_path
349
+ )
@@ -0,0 +1,44 @@
1
+ Metadata-Version: 2.2
2
+ Name: geoai-py
3
+ Version: 0.1.5
4
+ Summary: A Python package for using Artificial Intelligence (AI) with geospatial data
5
+ Author-email: Qiusheng Wu <giswqs@gmail.com>
6
+ License: MIT License
7
+ Project-URL: Homepage, https://github.com/giswqs/geoai
8
+ Keywords: geoai
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Natural Language :: English
12
+ Classifier: Programming Language :: Python :: 3.9
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
+ Requires-Python: >=3.9
18
+ Description-Content-Type: text/markdown
19
+ License-File: LICENSE
20
+ Requires-Dist: albumentations
21
+ Requires-Dist: scikit-learn
22
+ Requires-Dist: segment-geospatial
23
+ Requires-Dist: torch
24
+ Requires-Dist: transformers
25
+ Provides-Extra: all
26
+ Requires-Dist: geoai[extra]; extra == "all"
27
+ Provides-Extra: extra
28
+ Requires-Dist: pandas; extra == "extra"
29
+
30
+ # geoai
31
+
32
+ [![image](https://img.shields.io/pypi/v/geoai-py.svg)](https://pypi.python.org/pypi/geoai-py)
33
+ [![image](https://img.shields.io/conda/vn/conda-forge/geoai.svg)](https://anaconda.org/conda-forge/geoai)
34
+
35
+ **A Python package for using Artificial Intelligence (AI) with geospatial data**
36
+
37
+ - Free software: MIT license
38
+ - Documentation: <https://geoai.gishub.org>
39
+
40
+ ## Features
41
+
42
+ - Visualizing geospatial data, including vector, raster, and LiDAR data
43
+ - Segmenting remote sensing imagery with the Segment Anything Model
44
+ - Classifying remote sensing imagery with deep learning models
@@ -0,0 +1,10 @@
1
+ geoai/__init__.py,sha256=xkyHrnU3iebQB2V0bl1Xd9s8YKI5-UTS-tMbjgnJXNY,120
2
+ geoai/common.py,sha256=Rw6d9qmZDu3dUGTyJto1Y97S7-QA-m2p-pbCNvMDrm4,184
3
+ geoai/geoai.py,sha256=h0hwdogXGFqerm-5ZPeT-irPn91pCcQRjiHThXsRzEk,19
4
+ geoai/segmentation.py,sha256=Vcymnhwl_xikt4v9x8CYJq_vId9R1gB7-YzLfwg-F9M,11372
5
+ geoai_py-0.1.5.dist-info/LICENSE,sha256=vN2L5U7cZ6ZkOHFmc8WiGlsogWsZc5dllMeNxnKVOZg,1070
6
+ geoai_py-0.1.5.dist-info/METADATA,sha256=oQrREZxyg5_OgaVK4Pkn9txkaqUYcUtvigo13MfFNo0,1609
7
+ geoai_py-0.1.5.dist-info/WHEEL,sha256=9Hm2OB-j1QcCUq9Jguht7ayGIIZBRTdOXD1qg9cCgPM,109
8
+ geoai_py-0.1.5.dist-info/entry_points.txt,sha256=uGp3Az3HURIsRHP9v-ys0hIbUuBBNUfXv6VbYHIXeg4,41
9
+ geoai_py-0.1.5.dist-info/top_level.txt,sha256=1YkCUWu-ii-0qIex7kbwAvfei-gos9ycyDyUCJPNWHY,6
10
+ geoai_py-0.1.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.1)
2
+ Generator: setuptools (75.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py2-none-any
5
5
  Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ geoai = geoai.cli:main
@@ -1,38 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: geoai-py
3
- Version: 0.0.1
4
- Summary: A Python package for using Artificial Intelligence (AI) with geospatial data
5
- Home-page: https://github.com/opengeos/geoai
6
- Author: Qiusheng Wu
7
- Author-email: giswqs@gmail.com
8
- License: MIT license
9
- Keywords: geoai
10
- Classifier: Intended Audience :: Developers
11
- Classifier: License :: OSI Approved :: MIT License
12
- Classifier: Natural Language :: English
13
- Classifier: Programming Language :: Python :: 3
14
- Classifier: Programming Language :: Python :: 3.8
15
- Classifier: Programming Language :: Python :: 3.9
16
- Classifier: Programming Language :: Python :: 3.10
17
- Classifier: Programming Language :: Python :: 3.11
18
- Requires-Python: >=3.8
19
- Description-Content-Type: text/markdown
20
- License-File: LICENSE
21
-
22
- # geoai
23
-
24
-
25
- [![image](https://img.shields.io/pypi/v/geoai.svg)](https://pypi.python.org/pypi/geoai)
26
- [![image](https://img.shields.io/conda/vn/conda-forge/geoai.svg)](https://anaconda.org/conda-forge/geoai)
27
-
28
-
29
- **A Python package for using Artificial Intelligence (AI) with geospatial data**
30
-
31
-
32
- - Free software: MIT license
33
- - Documentation: https://opengeos.github.io/geoai
34
-
35
-
36
- ## Features
37
-
38
- - TODO
@@ -1,8 +0,0 @@
1
- geoai/__init__.py,sha256=XvWapn8jRuXQLdHwiVTjXdEJErlnJSKLU4hK_2VN3Vc,120
2
- geoai/common.py,sha256=Si4ZvbkkaVGyKrOa7gmsVNWS2Abtji-V5nEsJhrIM3M,188
3
- geoai/geoai.py,sha256=h0hwdogXGFqerm-5ZPeT-irPn91pCcQRjiHThXsRzEk,19
4
- geoai_py-0.0.1.dist-info/LICENSE,sha256=vN2L5U7cZ6ZkOHFmc8WiGlsogWsZc5dllMeNxnKVOZg,1070
5
- geoai_py-0.0.1.dist-info/METADATA,sha256=9hKtttzpSD-lZc2suU7CX58dWwu6nrtKM6bhHg40AxY,1142
6
- geoai_py-0.0.1.dist-info/WHEEL,sha256=m9WAupmBd2JGDsXWQGJgMGXIWbQY3F5c2xBJbBhq0nY,110
7
- geoai_py-0.0.1.dist-info/top_level.txt,sha256=1YkCUWu-ii-0qIex7kbwAvfei-gos9ycyDyUCJPNWHY,6
8
- geoai_py-0.0.1.dist-info/RECORD,,