geoai-py 0.26.0__tar.gz → 0.27.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. {geoai_py-0.26.0 → geoai_py-0.27.0}/.gitignore +3 -0
  2. {geoai_py-0.26.0 → geoai_py-0.27.0}/PKG-INFO +4 -4
  3. {geoai_py-0.26.0 → geoai_py-0.27.0}/README.md +3 -3
  4. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/__init__.py +27 -1
  5. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/auto.py +4 -1
  6. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/change_detection.py +1 -1
  7. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/detectron2.py +4 -1
  8. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/extract.py +4 -1
  9. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/prithvi.py +91 -6
  10. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/sam.py +2 -1
  11. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/segment.py +10 -1
  12. geoai_py-0.27.0/geoai/timm_regress.py +1652 -0
  13. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/utils.py +3 -1
  14. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai_py.egg-info/PKG-INFO +4 -4
  15. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai_py.egg-info/SOURCES.txt +1 -0
  16. {geoai_py-0.26.0 → geoai_py-0.27.0}/mkdocs.yml +1 -0
  17. {geoai_py-0.26.0 → geoai_py-0.27.0}/pyproject.toml +2 -2
  18. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/README.md +3 -0
  19. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/dialogs/moondream.py +241 -42
  20. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/dialogs/samgeo.py +288 -80
  21. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/dialogs/segmentation.py +23 -4
  22. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/metadata.txt +4 -1
  23. {geoai_py-0.26.0 → geoai_py-0.27.0}/.dockerignore +0 -0
  24. {geoai_py-0.26.0 → geoai_py-0.27.0}/.editorconfig +0 -0
  25. {geoai_py-0.26.0 → geoai_py-0.27.0}/.pre-commit-config.yaml +0 -0
  26. {geoai_py-0.26.0 → geoai_py-0.27.0}/CITATION.cff +0 -0
  27. {geoai_py-0.26.0 → geoai_py-0.27.0}/Dockerfile +0 -0
  28. {geoai_py-0.26.0 → geoai_py-0.27.0}/LICENSE +0 -0
  29. {geoai_py-0.26.0 → geoai_py-0.27.0}/MANIFEST.in +0 -0
  30. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/agents/__init__.py +0 -0
  31. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/agents/catalog_models.py +0 -0
  32. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/agents/catalog_tools.py +0 -0
  33. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/agents/geo_agents.py +0 -0
  34. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/agents/map_tools.py +0 -0
  35. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/agents/stac_models.py +0 -0
  36. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/agents/stac_tools.py +0 -0
  37. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/classify.py +0 -0
  38. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/dinov3.py +0 -0
  39. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/download.py +0 -0
  40. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/geoai.py +0 -0
  41. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/hf.py +0 -0
  42. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/landcover_train.py +0 -0
  43. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/landcover_utils.py +0 -0
  44. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/map_widgets.py +0 -0
  45. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/moondream.py +0 -0
  46. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/segmentation.py +0 -0
  47. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/timm_segment.py +0 -0
  48. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/timm_train.py +0 -0
  49. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/tools/__init__.py +0 -0
  50. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/tools/cloudmask.py +0 -0
  51. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/tools/multiclean.py +0 -0
  52. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/tools/sr.py +0 -0
  53. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai/train.py +0 -0
  54. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai_py.egg-info/dependency_links.txt +0 -0
  55. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai_py.egg-info/entry_points.txt +0 -0
  56. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai_py.egg-info/requires.txt +0 -0
  57. {geoai_py-0.26.0 → geoai_py-0.27.0}/geoai_py.egg-info/top_level.txt +0 -0
  58. {geoai_py-0.26.0 → geoai_py-0.27.0}/pytest.ini +0 -0
  59. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/LICENSE +0 -0
  60. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/__init__.py +0 -0
  61. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/_geoai_lib.py +0 -0
  62. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/dialogs/__init__.py +0 -0
  63. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/dialogs/map_tools.py +0 -0
  64. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/dialogs/update_checker.py +0 -0
  65. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/geoai_plugin.py +0 -0
  66. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/icons/about.svg +0 -0
  67. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/icons/gpu.svg +0 -0
  68. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/icons/icon.png +0 -0
  69. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/icons/moondream.svg +0 -0
  70. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/icons/samgeo.png +0 -0
  71. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/geoai_plugin/icons/segment.svg +0 -0
  72. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/install.py +0 -0
  73. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/install.sh +0 -0
  74. {geoai_py-0.26.0 → geoai_py-0.27.0}/qgis_plugin/package_plugin.py +0 -0
  75. {geoai_py-0.26.0 → geoai_py-0.27.0}/requirements.txt +0 -0
  76. {geoai_py-0.26.0 → geoai_py-0.27.0}/requirements_docs.txt +0 -0
  77. {geoai_py-0.26.0 → geoai_py-0.27.0}/setup.cfg +0 -0
  78. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/__init__.py +0 -0
  79. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/create_test_data.py +0 -0
  80. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_classify.py +0 -0
  81. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_download.py +0 -0
  82. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_extract.py +0 -0
  83. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_fixtures.py +0 -0
  84. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_geoai.py +0 -0
  85. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_moondream.py +0 -0
  86. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_segment.py +0 -0
  87. {geoai_py-0.26.0 → geoai_py-0.27.0}/tests/test_utils.py +0 -0
@@ -32,6 +32,9 @@ docs/workshops/**/*.jpg
32
32
  docs/workshops/**/*.png
33
33
  docs/examples/data/
34
34
  docs/workshops/data/
35
+ docs/examples/ndvi_regression_output/
36
+ docs/examples/timm_regression_output/
37
+ docs/examples/ndvi_model/
35
38
  *.pth
36
39
 
37
40
  # Distribution / packaging
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: geoai-py
3
- Version: 0.26.0
3
+ Version: 0.27.0
4
4
  Summary: A Python package for using Artificial Intelligence (AI) with geospatial data
5
5
  Author-email: Qiusheng Wu <giswqs@gmail.com>
6
6
  License: MIT License
@@ -71,7 +71,7 @@ Dynamic: license-file
71
71
  [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/geoai.svg)](https://anaconda.org/conda-forge/geoai)
72
72
  [![Conda Recipe](https://img.shields.io/badge/recipe-geoai-green.svg)](https://github.com/conda-forge/geoai-py-feedstock)
73
73
  [![image](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
74
- [![image](https://img.shields.io/badge/YouTube-Tutorials-red)](https://tinyurl.com/GeoAI-Tutorials)
74
+ [![image](https://img.shields.io/badge/YouTube-Tutorials-red)](https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPcvENqwaPa_QwbbkZ5sctZE)
75
75
  [![QGIS](https://img.shields.io/badge/QGIS-plugin-orange.svg)](https://opengeoai.org/qgis_plugin)
76
76
 
77
77
  [![logo](https://raw.githubusercontent.com/opengeos/geoai/master/docs/assets/logo_rect.png)](https://github.com/opengeos/geoai/blob/master/docs/assets/logo.png)
@@ -91,7 +91,7 @@ The package provides five core capabilities:
91
91
  5. Interactive visualization through integration with [Leafmap](https://github.com/opengeos/leafmap/) and [MapLibre](https://github.com/eoda-dev/py-maplibregl).
92
92
  6. Seamless QGIS integration via a dedicated GeoAI plugin, enabling users to run AI-powered geospatial workflows directly within the QGIS desktop environment, without writing code.
93
93
 
94
- GeoAI addresses the growing demand for accessible AI tools in geospatial research by providing high-level APIs that abstract complex machine learning workflows while maintaining flexibility for advanced users. The package supports multiple data formats (GeoTIFF, JPEG2000,GeoJSON, Shapefile, GeoPackage) and includes automatic device management for GPU acceleration when available. With over 10 modules and extensive notebook examples, GeoAI serves as both a research tool and educational resource for the geospatial AI community.
94
+ GeoAI addresses the growing demand for accessible AI tools in geospatial research by providing high-level APIs that abstract complex machine learning workflows while maintaining flexibility for advanced users. The package supports multiple data formats (GeoTIFF, JPEG2000, GeoJSON, Shapefile, GeoPackage) and includes automatic device management for GPU acceleration when available. With over 10 modules and extensive notebook examples, GeoAI serves as both a research tool and educational resource for the geospatial AI community.
95
95
 
96
96
  ## 📝 Statement of Need
97
97
 
@@ -130,7 +130,7 @@ If you find GeoAI useful in your research, please consider citing the following
130
130
 
131
131
  - Integration with [PyTorch Segmentation Models](https://github.com/qubvel-org/segmentation_models.pytorch) for automatic feature extraction
132
132
  - Specialized segmentation algorithms optimized for satellite and aerial imagery
133
- - Streamlined workflows for segmenting buildings, water bodies, wetlands,solar panels, etc.
133
+ - Streamlined workflows for segmenting buildings, water bodies, wetlands, solar panels, etc.
134
134
  - Export capabilities to standard geospatial formats (GeoJSON, Shapefile, GeoPackage, GeoParquet)
135
135
 
136
136
  ### 🔍 Image Classification
@@ -6,7 +6,7 @@
6
6
  [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/geoai.svg)](https://anaconda.org/conda-forge/geoai)
7
7
  [![Conda Recipe](https://img.shields.io/badge/recipe-geoai-green.svg)](https://github.com/conda-forge/geoai-py-feedstock)
8
8
  [![image](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
9
- [![image](https://img.shields.io/badge/YouTube-Tutorials-red)](https://tinyurl.com/GeoAI-Tutorials)
9
+ [![image](https://img.shields.io/badge/YouTube-Tutorials-red)](https://www.youtube.com/playlist?list=PLAxJ4-o7ZoPcvENqwaPa_QwbbkZ5sctZE)
10
10
  [![QGIS](https://img.shields.io/badge/QGIS-plugin-orange.svg)](https://opengeoai.org/qgis_plugin)
11
11
 
12
12
  [![logo](https://raw.githubusercontent.com/opengeos/geoai/master/docs/assets/logo_rect.png)](https://github.com/opengeos/geoai/blob/master/docs/assets/logo.png)
@@ -26,7 +26,7 @@ The package provides five core capabilities:
26
26
  5. Interactive visualization through integration with [Leafmap](https://github.com/opengeos/leafmap/) and [MapLibre](https://github.com/eoda-dev/py-maplibregl).
27
27
  6. Seamless QGIS integration via a dedicated GeoAI plugin, enabling users to run AI-powered geospatial workflows directly within the QGIS desktop environment, without writing code.
28
28
 
29
- GeoAI addresses the growing demand for accessible AI tools in geospatial research by providing high-level APIs that abstract complex machine learning workflows while maintaining flexibility for advanced users. The package supports multiple data formats (GeoTIFF, JPEG2000,GeoJSON, Shapefile, GeoPackage) and includes automatic device management for GPU acceleration when available. With over 10 modules and extensive notebook examples, GeoAI serves as both a research tool and educational resource for the geospatial AI community.
29
+ GeoAI addresses the growing demand for accessible AI tools in geospatial research by providing high-level APIs that abstract complex machine learning workflows while maintaining flexibility for advanced users. The package supports multiple data formats (GeoTIFF, JPEG2000, GeoJSON, Shapefile, GeoPackage) and includes automatic device management for GPU acceleration when available. With over 10 modules and extensive notebook examples, GeoAI serves as both a research tool and educational resource for the geospatial AI community.
30
30
 
31
31
  ## 📝 Statement of Need
32
32
 
@@ -65,7 +65,7 @@ If you find GeoAI useful in your research, please consider citing the following
65
65
 
66
66
  - Integration with [PyTorch Segmentation Models](https://github.com/qubvel-org/segmentation_models.pytorch) for automatic feature extraction
67
67
  - Specialized segmentation algorithms optimized for satellite and aerial imagery
68
- - Streamlined workflows for segmenting buildings, water bodies, wetlands,solar panels, etc.
68
+ - Streamlined workflows for segmenting buildings, water bodies, wetlands, solar panels, etc.
69
69
  - Export capabilities to standard geospatial formats (GeoJSON, Shapefile, GeoPackage, GeoParquet)
70
70
 
71
71
  ### 🔍 Image Classification
@@ -2,7 +2,7 @@
2
2
 
3
3
  __author__ = """Qiusheng Wu"""
4
4
  __email__ = "giswqs@gmail.com"
5
- __version__ = "0.26.0"
5
+ __version__ = "0.27.0"
6
6
 
7
7
 
8
8
  import os
@@ -101,6 +101,12 @@ def set_proj_lib_path(verbose=False):
101
101
 
102
102
  from .dinov3 import DINOv3GeoProcessor, analyze_image_patches, create_similarity_map
103
103
  from .geoai import *
104
+ from .utils import (
105
+ orthogonalize,
106
+ regularization,
107
+ hybrid_regularization,
108
+ adaptive_regularization,
109
+ )
104
110
 
105
111
  from .timm_train import (
106
112
  get_timm_model,
@@ -122,6 +128,25 @@ from .timm_segment import (
122
128
  push_timm_model_to_hub,
123
129
  )
124
130
 
131
+ from .timm_regress import (
132
+ PixelRegressionModel,
133
+ PixelRegressionDataset,
134
+ create_regression_tiles,
135
+ train_pixel_regressor,
136
+ predict_raster,
137
+ evaluate_regression,
138
+ plot_regression_comparison,
139
+ plot_scatter,
140
+ plot_training_history,
141
+ visualize_prediction,
142
+ plot_regression_results,
143
+ # Backward compatibility aliases
144
+ TimmRegressor,
145
+ RegressionDataset,
146
+ train_timm_regressor,
147
+ create_regression_patches,
148
+ )
149
+
125
150
  # Import tools subpackage
126
151
  from . import tools
127
152
 
@@ -167,6 +192,7 @@ except ImportError:
167
192
  try:
168
193
  from .prithvi import (
169
194
  PrithviProcessor,
195
+ get_available_prithvi_models,
170
196
  load_prithvi_model,
171
197
  prithvi_inference,
172
198
  )
@@ -24,7 +24,6 @@ Example:
24
24
  import os
25
25
  from typing import Any, Dict, List, Optional, Tuple, Union
26
26
 
27
- import cv2
28
27
  import geopandas as gpd
29
28
  import numpy as np
30
29
  import rasterio
@@ -877,6 +876,8 @@ class AutoGeoModel:
877
876
  **kwargs: Any,
878
877
  ) -> Dict[str, Any]:
879
878
  """Run tiled inference for large images."""
879
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
880
+
880
881
  if data.ndim == 3:
881
882
  _, height, width = data.shape
882
883
  else:
@@ -1872,6 +1873,7 @@ def show_segmentation(
1872
1873
  >>> result = geoai.auto.semantic_segmentation("aerial.tif", output_path="seg.tif")
1873
1874
  >>> fig = show_segmentation("aerial.tif", result["mask"])
1874
1875
  """
1876
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
1875
1877
  import matplotlib.pyplot as plt
1876
1878
 
1877
1879
  img, _ = _load_image_for_display(source)
@@ -1941,6 +1943,7 @@ def show_depth(
1941
1943
  >>> result = geoai.auto.depth_estimation("aerial.tif", output_path="depth.tif")
1942
1944
  >>> fig = show_depth("aerial.tif", result["depth"])
1943
1945
  """
1946
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
1944
1947
  import matplotlib.pyplot as plt
1945
1948
 
1946
1949
  img, _ = _load_image_for_display(source)
@@ -3,7 +3,6 @@
3
3
  import os
4
4
  from typing import Any, Dict, List, Optional, Tuple, Union
5
5
 
6
- import cv2
7
6
  import matplotlib.pyplot as plt
8
7
  import numpy as np
9
8
  import rasterio
@@ -736,6 +735,7 @@ class ChangeDetection:
736
735
  output_path="split_comparison.png",
737
736
  ):
738
737
  """Create a split comparison visualization showing before/after with change overlay."""
738
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
739
739
 
740
740
  # Load data
741
741
  with rasterio.open(image1_path) as src:
@@ -6,7 +6,6 @@ import os
6
6
  import warnings
7
7
  from typing import Dict, List, Optional, Tuple, Union
8
8
 
9
- import cv2
10
9
  import numpy as np
11
10
  import rasterio
12
11
  import torch
@@ -135,6 +134,8 @@ def detectron2_segment(
135
134
  Returns:
136
135
  Dict containing segmentation results and output file paths
137
136
  """
137
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
138
+
138
139
  check_detectron2()
139
140
 
140
141
  # Load the model
@@ -315,6 +316,8 @@ def visualize_detectron2_results(
315
316
  Returns:
316
317
  Visualization image as numpy array
317
318
  """
319
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
320
+
318
321
  check_detectron2()
319
322
 
320
323
  # Load the image
@@ -6,7 +6,6 @@ import time
6
6
  from typing import Any, Dict, Generator, List, Optional, Tuple, Union
7
7
 
8
8
  # Third-Party Libraries
9
- import cv2
10
9
  import geopandas as gpd
11
10
  import matplotlib.pyplot as plt
12
11
  import numpy as np
@@ -440,6 +439,7 @@ class ObjectDetector:
440
439
  Returns:
441
440
  List of polygons as lists of (x, y) coordinates
442
441
  """
442
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
443
443
 
444
444
  # Get parameters from kwargs or use instance defaults
445
445
  simplify_tolerance = kwargs.get("simplify_tolerance", self.simplify_tolerance)
@@ -637,6 +637,8 @@ class ObjectDetector:
637
637
  Returns:
638
638
  GeoDataFrame with objects
639
639
  """
640
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
641
+
640
642
  # Use class defaults if parameters not provided
641
643
  simplify_tolerance = (
642
644
  simplify_tolerance
@@ -2165,6 +2167,7 @@ class ObjectDetector:
2165
2167
  Returns:
2166
2168
  GeoDataFrame with car detections and confidence values
2167
2169
  """
2170
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
2168
2171
 
2169
2172
  def _process_single_component(
2170
2173
  component_mask: np.ndarray,
@@ -28,6 +28,16 @@ NO_DATA_FLOAT = 0.0001
28
28
  OFFSET = 0
29
29
  PERCENTILE = 99.9
30
30
 
31
+ # Available Prithvi models
32
+ AVAILABLE_MODELS = [
33
+ "Prithvi-EO-2.0-tiny-TL", # tiny transfer learning, embed_dim=192, depth=12, with coords
34
+ "Prithvi-EO-2.0-100M-TL", # 100M transfer learning, embed_dim=768, depth=12, with coords
35
+ "Prithvi-EO-2.0-300M", # 300M base model, embed_dim=1024, depth=24, no coords
36
+ "Prithvi-EO-2.0-300M-TL", # 300M transfer learning, embed_dim=768, depth=12, with coords
37
+ "Prithvi-EO-2.0-600M", # 600M base model, embed_dim=1280, depth=32, no coords
38
+ "Prithvi-EO-2.0-600M-TL", # 600M transfer learning, embed_dim=1280, depth=32, with coords
39
+ ]
40
+
31
41
 
32
42
  def get_3d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):
33
43
  """Create 3D sin/cos positional embeddings.
@@ -622,8 +632,22 @@ class PrithviMAE(nn.Module):
622
632
  class PrithviProcessor:
623
633
  """Prithvi EO 2.0 processor with GeoTIFF input/output support.
624
634
 
625
- https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL
626
- https://github.com/NASA-IMPACT/Prithvi-EO-2.0
635
+ Supports multiple model variants:
636
+ - Prithvi-EO-2.0-tiny-TL (tiny transfer learning)
637
+ - Prithvi-EO-2.0-100M-TL (100M transfer learning)
638
+ - Prithvi-EO-2.0-300M (300M base model)
639
+ - Prithvi-EO-2.0-300M-TL (300M transfer learning)
640
+ - Prithvi-EO-2.0-600M (600M base model)
641
+ - Prithvi-EO-2.0-600M-TL (600M transfer learning)
642
+
643
+ References:
644
+ - tiny-TL: https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-tiny-TL
645
+ - 100M-TL: https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-100M-TL
646
+ - 300M: https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-300M
647
+ - 300M-TL: https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-300M-TL
648
+ - 600M: https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-600M
649
+ - 600M-TL: https://huggingface.co/ibm-nasa-geospatial/Prithvi-EO-2.0-600M-TL
650
+ - GitHub: https://github.com/NASA-IMPACT/Prithvi-EO-2.0
627
651
  """
628
652
 
629
653
  def __init__(
@@ -637,7 +661,14 @@ class PrithviProcessor:
637
661
  """Initialize Prithvi processor.
638
662
 
639
663
  Args:
640
- model_name: Name of the Prithvi model to download from HuggingFace Hub
664
+ model_name: Name of the Prithvi model to download from HuggingFace Hub.
665
+ Options:
666
+ - "Prithvi-EO-2.0-tiny-TL" (tiny, 192 dim, 12 layers)
667
+ - "Prithvi-EO-2.0-100M-TL" (100M, 768 dim, 12 layers)
668
+ - "Prithvi-EO-2.0-300M" (base, 1024 dim, 24 layers)
669
+ - "Prithvi-EO-2.0-300M-TL" (default, 768 dim, 12 layers)
670
+ - "Prithvi-EO-2.0-600M" (base, 1280 dim, 32 layers)
671
+ - "Prithvi-EO-2.0-600M-TL" (1280 dim, 32 layers)
641
672
  config_path: Path to config file (optional, downloads if not provided)
642
673
  checkpoint_path: Path to checkpoint file (optional, downloads if not provided)
643
674
  device: Torch device to use
@@ -679,7 +710,13 @@ class PrithviProcessor:
679
710
  """Download Prithvi model from HuggingFace Hub.
680
711
 
681
712
  Args:
682
- model_name: Name of the model
713
+ model_name: Name of the model. Options:
714
+ - "Prithvi-EO-2.0-tiny-TL"
715
+ - "Prithvi-EO-2.0-100M-TL"
716
+ - "Prithvi-EO-2.0-300M" (base model)
717
+ - "Prithvi-EO-2.0-300M-TL" (default)
718
+ - "Prithvi-EO-2.0-600M" (base model)
719
+ - "Prithvi-EO-2.0-600M-TL"
683
720
  cache_dir: Directory to cache files
684
721
 
685
722
  Returns:
@@ -1208,6 +1245,20 @@ class PrithviProcessor:
1208
1245
  dest.write(image[i], i + 1)
1209
1246
 
1210
1247
 
1248
+ def get_available_prithvi_models() -> List[str]:
1249
+ """Get list of available Prithvi model names.
1250
+
1251
+ Returns:
1252
+ List of available model names
1253
+
1254
+ Example:
1255
+ >>> models = get_available_prithvi_models()
1256
+ >>> print(models)
1257
+ ['Prithvi-EO-2.0-300M-TL', 'Prithvi-EO-2.0-600M-TL']
1258
+ """
1259
+ return AVAILABLE_MODELS.copy()
1260
+
1261
+
1211
1262
  def load_prithvi_model(
1212
1263
  model_name: str = "Prithvi-EO-2.0-300M-TL",
1213
1264
  device: Optional[str] = None,
@@ -1216,12 +1267,32 @@ def load_prithvi_model(
1216
1267
  """Load Prithvi model (convenience function).
1217
1268
 
1218
1269
  Args:
1219
- model_name: Name of the model
1270
+ model_name: Name of the model. Options:
1271
+ - "Prithvi-EO-2.0-tiny-TL"
1272
+ - "Prithvi-EO-2.0-100M-TL"
1273
+ - "Prithvi-EO-2.0-300M" (base)
1274
+ - "Prithvi-EO-2.0-300M-TL" (default)
1275
+ - "Prithvi-EO-2.0-600M" (base)
1276
+ - "Prithvi-EO-2.0-600M-TL"
1220
1277
  device: Device to use ('cuda' or 'cpu')
1221
1278
  cache_dir: Cache directory
1222
1279
 
1223
1280
  Returns:
1224
1281
  PrithviProcessor instance
1282
+
1283
+ Example:
1284
+ >>> # Load tiny-TL model
1285
+ >>> processor = load_prithvi_model("Prithvi-EO-2.0-tiny-TL")
1286
+ >>> # Load 100M-TL model
1287
+ >>> processor = load_prithvi_model("Prithvi-EO-2.0-100M-TL")
1288
+ >>> # Load 300M base model
1289
+ >>> processor = load_prithvi_model("Prithvi-EO-2.0-300M")
1290
+ >>> # Load 300M-TL model
1291
+ >>> processor = load_prithvi_model("Prithvi-EO-2.0-300M-TL")
1292
+ >>> # Load 600M base model
1293
+ >>> processor = load_prithvi_model("Prithvi-EO-2.0-600M")
1294
+ >>> # Load 600M-TL model
1295
+ >>> processor = load_prithvi_model("Prithvi-EO-2.0-600M-TL")
1225
1296
  """
1226
1297
  if device is not None:
1227
1298
  device = torch.device(device)
@@ -1245,9 +1316,23 @@ def prithvi_inference(
1245
1316
  Args:
1246
1317
  file_paths: List of input GeoTIFF files
1247
1318
  output_dir: Output directory
1248
- model_name: Name of the model
1319
+ model_name: Name of the model. Options:
1320
+ - "Prithvi-EO-2.0-tiny-TL"
1321
+ - "Prithvi-EO-2.0-100M-TL"
1322
+ - "Prithvi-EO-2.0-300M" (base)
1323
+ - "Prithvi-EO-2.0-300M-TL" (default)
1324
+ - "Prithvi-EO-2.0-600M" (base)
1325
+ - "Prithvi-EO-2.0-600M-TL"
1249
1326
  mask_ratio: Optional mask ratio
1250
1327
  device: Device to use
1328
+
1329
+ Example:
1330
+ >>> # Use tiny-TL model
1331
+ >>> prithvi_inference(
1332
+ ... file_paths=["img1.tif", "img2.tif", "img3.tif", "img4.tif"],
1333
+ ... model_name="Prithvi-EO-2.0-tiny-TL",
1334
+ ... output_dir="output_tiny"
1335
+ ... )
1251
1336
  """
1252
1337
  processor = load_prithvi_model(model_name, device)
1253
1338
  processor.process_files(file_paths, output_dir, mask_ratio)
@@ -5,7 +5,6 @@ The SamGeo class provides an interface for segmenting geospatial data using the
5
5
  import os
6
6
  from typing import Any, Dict, List, Optional, Tuple, Union
7
7
 
8
- import cv2
9
8
  import numpy as np
10
9
  import torch
11
10
  from leafmap import array_to_image, blend_images
@@ -125,6 +124,7 @@ class SamGeo:
125
124
  Raises:
126
125
  ValueError: If the input source is not a valid path or numpy array.
127
126
  """
127
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
128
128
 
129
129
  if isinstance(source, str):
130
130
  if source.startswith("http"):
@@ -399,6 +399,7 @@ class SamGeo:
399
399
  Raises:
400
400
  ValueError: If no masks are available and `save_masks()` cannot generate them.
401
401
  """
402
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
402
403
  import matplotlib.pyplot as plt
403
404
 
404
405
  if self.batch:
@@ -4,7 +4,6 @@ import os
4
4
  from dataclasses import dataclass
5
5
  from typing import Any, Dict, List, Optional, Tuple, Union
6
6
 
7
- import cv2
8
7
  import geopandas as gpd
9
8
  import numpy as np
10
9
  import rasterio
@@ -174,6 +173,8 @@ class GroundedSAM:
174
173
  Returns:
175
174
  List[DetectionResult]: Filtered detection results.
176
175
  """
176
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
177
+
177
178
  if not detections:
178
179
  return detections
179
180
 
@@ -235,6 +236,8 @@ class GroundedSAM:
235
236
 
236
237
  def _mask_to_polygon(self, mask: np.ndarray) -> List[List[int]]:
237
238
  """Convert mask to polygon coordinates."""
239
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
240
+
238
241
  # Find contours in the binary mask
239
242
  contours, _ = cv2.findContours(
240
243
  mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
@@ -255,6 +258,8 @@ class GroundedSAM:
255
258
  self, polygon: List[Tuple[int, int]], image_shape: Tuple[int, int]
256
259
  ) -> np.ndarray:
257
260
  """Convert polygon to mask."""
261
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
262
+
258
263
  # Create an empty mask
259
264
  mask = np.zeros(image_shape, dtype=np.uint8)
260
265
 
@@ -279,6 +284,8 @@ class GroundedSAM:
279
284
  Returns:
280
285
  List[np.ndarray]: List of individual instance masks.
281
286
  """
287
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
288
+
282
289
  # Find connected components
283
290
  num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(
284
291
  mask.astype(np.uint8), connectivity=8
@@ -320,6 +327,8 @@ class GroundedSAM:
320
327
  Returns:
321
328
  List[Dict]: List of polygon dictionaries with geometry and properties.
322
329
  """
330
+ import cv2 # Lazy import to avoid QGIS opencv conflicts
331
+
323
332
  polygons = []
324
333
 
325
334
  # Get individual instances