geoai-py 0.8.3__py2.py3-none-any.whl → 0.9.1__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- geoai/__init__.py +1 -1
- geoai/change_detection.py +1568 -0
- geoai/classify.py +58 -57
- geoai/detectron2.py +466 -0
- geoai/download.py +74 -68
- geoai/extract.py +186 -141
- geoai/geoai.py +13 -11
- geoai/hf.py +14 -12
- geoai/segment.py +44 -39
- geoai/segmentation.py +10 -9
- geoai/train.py +372 -241
- geoai/utils.py +198 -123
- {geoai_py-0.8.3.dist-info → geoai_py-0.9.1.dist-info}/METADATA +5 -1
- geoai_py-0.9.1.dist-info/RECORD +19 -0
- geoai_py-0.8.3.dist-info/RECORD +0 -17
- {geoai_py-0.8.3.dist-info → geoai_py-0.9.1.dist-info}/WHEEL +0 -0
- {geoai_py-0.8.3.dist-info → geoai_py-0.9.1.dist-info}/entry_points.txt +0 -0
- {geoai_py-0.8.3.dist-info → geoai_py-0.9.1.dist-info}/licenses/LICENSE +0 -0
- {geoai_py-0.8.3.dist-info → geoai_py-0.9.1.dist-info}/top_level.txt +0 -0
geoai/geoai.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1
1
|
"""Main module."""
|
2
2
|
|
3
3
|
import logging
|
4
|
+
from typing import Any, Dict, List, Optional
|
4
5
|
|
5
6
|
logging.getLogger("maplibre").setLevel(logging.ERROR)
|
6
7
|
|
7
8
|
import leafmap
|
8
9
|
import leafmap.maplibregl as maplibregl
|
9
10
|
|
11
|
+
from .change_detection import ChangeDetection
|
10
12
|
from .classify import classify_image, classify_images, train_classifier
|
11
13
|
from .download import (
|
12
14
|
download_naip,
|
@@ -26,17 +28,17 @@ from .extract import *
|
|
26
28
|
from .hf import *
|
27
29
|
from .segment import *
|
28
30
|
from .train import (
|
31
|
+
get_instance_segmentation_model,
|
32
|
+
instance_segmentation,
|
33
|
+
instance_segmentation_batch,
|
34
|
+
instance_segmentation_inference_on_geotiff,
|
29
35
|
object_detection,
|
30
36
|
object_detection_batch,
|
31
|
-
train_MaskRCNN_model,
|
32
|
-
train_segmentation_model,
|
33
37
|
semantic_segmentation,
|
34
38
|
semantic_segmentation_batch,
|
35
39
|
train_instance_segmentation_model,
|
36
|
-
|
37
|
-
|
38
|
-
get_instance_segmentation_model,
|
39
|
-
instance_segmentation_inference_on_geotiff,
|
40
|
+
train_MaskRCNN_model,
|
41
|
+
train_segmentation_model,
|
40
42
|
)
|
41
43
|
from .utils import *
|
42
44
|
|
@@ -44,7 +46,7 @@ from .utils import *
|
|
44
46
|
class Map(leafmap.Map):
|
45
47
|
"""A subclass of leafmap.Map for GeoAI applications."""
|
46
48
|
|
47
|
-
def __init__(self, *args, **kwargs):
|
49
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
48
50
|
"""Initialize the Map class."""
|
49
51
|
super().__init__(*args, **kwargs)
|
50
52
|
|
@@ -52,7 +54,7 @@ class Map(leafmap.Map):
|
|
52
54
|
class MapLibre(maplibregl.Map):
|
53
55
|
"""A subclass of maplibregl.Map for GeoAI applications."""
|
54
56
|
|
55
|
-
def __init__(self, *args, **kwargs):
|
57
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
56
58
|
"""Initialize the MapLibre class."""
|
57
59
|
super().__init__(*args, **kwargs)
|
58
60
|
|
@@ -73,7 +75,7 @@ def create_vector_data(
|
|
73
75
|
height: int = 420,
|
74
76
|
frame_border: int = 0,
|
75
77
|
**kwargs: Any,
|
76
|
-
):
|
78
|
+
) -> Any:
|
77
79
|
"""Generates a widget-based interface for creating and managing vector data on a map.
|
78
80
|
|
79
81
|
This function creates an interactive widget interface that allows users to draw features
|
@@ -157,9 +159,9 @@ def edit_vector_data(
|
|
157
159
|
frame_border: int = 0,
|
158
160
|
controls: Optional[List[str]] = None,
|
159
161
|
position: str = "top-right",
|
160
|
-
fit_bounds_options: Dict = None,
|
162
|
+
fit_bounds_options: Optional[Dict] = None,
|
161
163
|
**kwargs: Any,
|
162
|
-
):
|
164
|
+
) -> Any:
|
163
165
|
"""Generates a widget-based interface for creating and managing vector data on a map.
|
164
166
|
|
165
167
|
This function creates an interactive widget interface that allows users to draw features
|
geoai/hf.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
import csv
|
4
4
|
import os
|
5
|
-
from typing import Dict, List, Optional, Tuple, Union
|
5
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import pandas as pd
|
@@ -12,7 +12,9 @@ from tqdm import tqdm
|
|
12
12
|
from transformers import AutoConfig, AutoModelForMaskedImageModeling, pipeline
|
13
13
|
|
14
14
|
|
15
|
-
def get_model_config(
|
15
|
+
def get_model_config(
|
16
|
+
model_id: str,
|
17
|
+
) -> "transformers.configuration_utils.PretrainedConfig":
|
16
18
|
"""
|
17
19
|
Get the model configuration for a Hugging Face model.
|
18
20
|
|
@@ -25,7 +27,7 @@ def get_model_config(model_id):
|
|
25
27
|
return AutoConfig.from_pretrained(model_id)
|
26
28
|
|
27
29
|
|
28
|
-
def get_model_input_channels(model_id):
|
30
|
+
def get_model_input_channels(model_id: str) -> int:
|
29
31
|
"""
|
30
32
|
Check the number of input channels supported by a Hugging Face model.
|
31
33
|
|
@@ -63,14 +65,14 @@ def get_model_input_channels(model_id):
|
|
63
65
|
|
64
66
|
|
65
67
|
def image_segmentation(
|
66
|
-
tif_path,
|
67
|
-
output_path,
|
68
|
-
labels_to_extract=None,
|
69
|
-
dtype="uint8",
|
70
|
-
model_name=None,
|
71
|
-
segmenter_args=None,
|
72
|
-
**kwargs,
|
73
|
-
):
|
68
|
+
tif_path: str,
|
69
|
+
output_path: str,
|
70
|
+
labels_to_extract: Optional[List[str]] = None,
|
71
|
+
dtype: str = "uint8",
|
72
|
+
model_name: Optional[str] = None,
|
73
|
+
segmenter_args: Optional[Dict] = None,
|
74
|
+
**kwargs: Any,
|
75
|
+
) -> str:
|
74
76
|
"""
|
75
77
|
Segments an image with a Hugging Face segmentation model and saves the results
|
76
78
|
as a single georeferenced image where each class has a unique integer value.
|
@@ -204,7 +206,7 @@ def mask_generation(
|
|
204
206
|
band_indices: Optional[List[int]] = None,
|
205
207
|
min_object_size: int = 0,
|
206
208
|
generator_kwargs: Optional[Dict] = None,
|
207
|
-
**kwargs,
|
209
|
+
**kwargs: Any,
|
208
210
|
) -> Tuple[str, str]:
|
209
211
|
"""
|
210
212
|
Process a GeoTIFF using SAM mask generation and save results as a GeoTIFF and CSV.
|
geoai/segment.py
CHANGED
@@ -2,23 +2,23 @@
|
|
2
2
|
|
3
3
|
import os
|
4
4
|
from dataclasses import dataclass
|
5
|
-
from typing import Any,
|
5
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
6
6
|
|
7
7
|
import cv2
|
8
|
+
import geopandas as gpd
|
8
9
|
import numpy as np
|
9
10
|
import rasterio
|
10
11
|
import torch
|
11
|
-
import geopandas as gpd
|
12
12
|
from PIL import Image
|
13
|
-
from rasterio.windows import Window
|
14
13
|
from rasterio.warp import transform_bounds
|
15
|
-
from
|
14
|
+
from rasterio.windows import Window
|
15
|
+
from shapely.geometry import Polygon, box
|
16
16
|
from tqdm import tqdm
|
17
17
|
from transformers import (
|
18
|
-
CLIPSegForImageSegmentation,
|
19
|
-
CLIPSegProcessor,
|
20
18
|
AutoModelForMaskGeneration,
|
21
19
|
AutoProcessor,
|
20
|
+
CLIPSegForImageSegmentation,
|
21
|
+
CLIPSegProcessor,
|
22
22
|
pipeline,
|
23
23
|
)
|
24
24
|
|
@@ -90,13 +90,13 @@ class GroundedSAM:
|
|
90
90
|
|
91
91
|
def __init__(
|
92
92
|
self,
|
93
|
-
detector_id="IDEA-Research/grounding-dino-tiny",
|
94
|
-
segmenter_id="facebook/sam-vit-base",
|
95
|
-
device=None,
|
96
|
-
tile_size=1024,
|
97
|
-
overlap=128,
|
98
|
-
threshold=0.3,
|
99
|
-
):
|
93
|
+
detector_id: str = "IDEA-Research/grounding-dino-tiny",
|
94
|
+
segmenter_id: str = "facebook/sam-vit-base",
|
95
|
+
device: Optional[str] = None,
|
96
|
+
tile_size: int = 1024,
|
97
|
+
overlap: int = 128,
|
98
|
+
threshold: float = 0.3,
|
99
|
+
) -> None:
|
100
100
|
"""
|
101
101
|
Initialize the GroundedSAM with the specified models and settings.
|
102
102
|
|
@@ -125,7 +125,7 @@ class GroundedSAM:
|
|
125
125
|
|
126
126
|
print(f"GroundedSAM initialized on {self.device}")
|
127
127
|
|
128
|
-
def _load_models(self):
|
128
|
+
def _load_models(self) -> None:
|
129
129
|
"""Load the Grounding DINO and SAM models."""
|
130
130
|
# Load Grounding DINO
|
131
131
|
self.object_detector = pipeline(
|
@@ -408,17 +408,17 @@ class GroundedSAM:
|
|
408
408
|
|
409
409
|
def segment_image(
|
410
410
|
self,
|
411
|
-
input_path,
|
412
|
-
output_path,
|
413
|
-
text_prompts,
|
414
|
-
polygon_refinement=False,
|
415
|
-
export_boxes=False,
|
416
|
-
export_polygons=True,
|
417
|
-
smoothing_sigma=1.0,
|
418
|
-
nms_threshold=0.5,
|
419
|
-
min_polygon_area=50,
|
420
|
-
simplify_tolerance=2.0,
|
421
|
-
):
|
411
|
+
input_path: str,
|
412
|
+
output_path: str,
|
413
|
+
text_prompts: Union[str, List[str]],
|
414
|
+
polygon_refinement: bool = False,
|
415
|
+
export_boxes: bool = False,
|
416
|
+
export_polygons: bool = True,
|
417
|
+
smoothing_sigma: float = 1.0,
|
418
|
+
nms_threshold: float = 0.5,
|
419
|
+
min_polygon_area: int = 50,
|
420
|
+
simplify_tolerance: float = 2.0,
|
421
|
+
) -> str:
|
422
422
|
"""
|
423
423
|
Segment a GeoTIFF image using text prompts with improved instance segmentation.
|
424
424
|
|
@@ -742,11 +742,11 @@ class CLIPSegmentation:
|
|
742
742
|
|
743
743
|
def __init__(
|
744
744
|
self,
|
745
|
-
model_name="CIDAS/clipseg-rd64-refined",
|
746
|
-
device=None,
|
747
|
-
tile_size=512,
|
748
|
-
overlap=32,
|
749
|
-
):
|
745
|
+
model_name: str = "CIDAS/clipseg-rd64-refined",
|
746
|
+
device: Optional[str] = None,
|
747
|
+
tile_size: int = 512,
|
748
|
+
overlap: int = 32,
|
749
|
+
) -> None:
|
750
750
|
"""
|
751
751
|
Initialize the ImageSegmenter with the specified model and settings.
|
752
752
|
|
@@ -774,8 +774,13 @@ class CLIPSegmentation:
|
|
774
774
|
print(f"Model loaded on {self.device}")
|
775
775
|
|
776
776
|
def segment_image(
|
777
|
-
self,
|
778
|
-
|
777
|
+
self,
|
778
|
+
input_path: str,
|
779
|
+
output_path: str,
|
780
|
+
text_prompt: str,
|
781
|
+
threshold: float = 0.5,
|
782
|
+
smoothing_sigma: float = 1.0,
|
783
|
+
) -> str:
|
779
784
|
"""
|
780
785
|
Segment a GeoTIFF image using the provided text prompt.
|
781
786
|
|
@@ -974,13 +979,13 @@ class CLIPSegmentation:
|
|
974
979
|
|
975
980
|
def segment_image_batch(
|
976
981
|
self,
|
977
|
-
input_paths,
|
978
|
-
output_dir,
|
979
|
-
text_prompt,
|
980
|
-
threshold=0.5,
|
981
|
-
smoothing_sigma=1.0,
|
982
|
-
suffix="_segmented",
|
983
|
-
):
|
982
|
+
input_paths: List[str],
|
983
|
+
output_dir: str,
|
984
|
+
text_prompt: str,
|
985
|
+
threshold: float = 0.5,
|
986
|
+
smoothing_sigma: float = 1.0,
|
987
|
+
suffix: str = "_segmented",
|
988
|
+
) -> List[str]:
|
984
989
|
"""
|
985
990
|
Segment multiple GeoTIFF images using the provided text prompt.
|
986
991
|
|
geoai/segmentation.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
import os
|
2
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
2
3
|
|
3
4
|
import albumentations as A
|
4
5
|
import matplotlib.pyplot as plt
|
@@ -24,10 +25,10 @@ class CustomDataset(Dataset):
|
|
24
25
|
self,
|
25
26
|
images_dir: str,
|
26
27
|
masks_dir: str,
|
27
|
-
transform: A.Compose = None,
|
28
|
-
target_size:
|
28
|
+
transform: Optional[A.Compose] = None,
|
29
|
+
target_size: Tuple[int, int] = (256, 256),
|
29
30
|
num_classes: int = 2,
|
30
|
-
):
|
31
|
+
) -> None:
|
31
32
|
"""
|
32
33
|
Args:
|
33
34
|
images_dir (str): Directory containing images.
|
@@ -48,7 +49,7 @@ class CustomDataset(Dataset):
|
|
48
49
|
"""Returns the total number of samples."""
|
49
50
|
return len(self.images)
|
50
51
|
|
51
|
-
def __getitem__(self, idx: int) ->
|
52
|
+
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
|
52
53
|
"""
|
53
54
|
Args:
|
54
55
|
idx (int): Index of the sample to fetch.
|
@@ -109,7 +110,7 @@ def prepare_datasets(
|
|
109
110
|
transform: A.Compose,
|
110
111
|
test_size: float = 0.2,
|
111
112
|
random_state: int = 42,
|
112
|
-
) ->
|
113
|
+
) -> Tuple[Subset, Subset]:
|
113
114
|
"""
|
114
115
|
Args:
|
115
116
|
images_dir (str): Directory containing images.
|
@@ -233,7 +234,7 @@ def preprocess_image(image_path: str, target_size: tuple = (256, 256)) -> torch.
|
|
233
234
|
def predict_image(
|
234
235
|
model: SegformerForSemanticSegmentation,
|
235
236
|
image_tensor: torch.Tensor,
|
236
|
-
original_size:
|
237
|
+
original_size: Tuple[int, int],
|
237
238
|
device: torch.device,
|
238
239
|
) -> np.ndarray:
|
239
240
|
"""
|
@@ -262,7 +263,7 @@ def predict_image(
|
|
262
263
|
def segment_image(
|
263
264
|
image_path: str,
|
264
265
|
model_path: str,
|
265
|
-
target_size:
|
266
|
+
target_size: Tuple[int, int] = (256, 256),
|
266
267
|
device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu"),
|
267
268
|
) -> np.ndarray:
|
268
269
|
"""
|
@@ -288,8 +289,8 @@ def segment_image(
|
|
288
289
|
def visualize_predictions(
|
289
290
|
image_path: str,
|
290
291
|
segmented_mask: np.ndarray,
|
291
|
-
target_size:
|
292
|
-
reference_image_path: str = None,
|
292
|
+
target_size: Tuple[int, int] = (256, 256),
|
293
|
+
reference_image_path: Optional[str] = None,
|
293
294
|
) -> None:
|
294
295
|
"""
|
295
296
|
Visualizes the original image, segmented mask, and optionally the reference image.
|