geoai-py 0.8.1__tar.gz → 0.8.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. {geoai_py-0.8.1 → geoai_py-0.8.2}/PKG-INFO +1 -1
  2. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/building_detection_lidar.ipynb +12 -2
  3. geoai_py-0.8.2/docs/examples/water_detection_s2.ipynb +297 -0
  4. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/__init__.py +1 -1
  5. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/utils.py +58 -40
  6. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai_py.egg-info/PKG-INFO +1 -1
  7. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai_py.egg-info/SOURCES.txt +1 -0
  8. {geoai_py-0.8.1 → geoai_py-0.8.2}/mkdocs.yml +1 -0
  9. {geoai_py-0.8.1 → geoai_py-0.8.2}/pyproject.toml +2 -2
  10. {geoai_py-0.8.1 → geoai_py-0.8.2}/.editorconfig +0 -0
  11. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/FUNDING.yml +0 -0
  12. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/ISSUE_TEMPLATE/bug_report.md +0 -0
  13. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/ISSUE_TEMPLATE/config.yml +0 -0
  14. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/ISSUE_TEMPLATE/feature_request.md +0 -0
  15. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/dependabot.yml +0 -0
  16. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/docker-image.yml +0 -0
  17. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/docker-publish.yml +0 -0
  18. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/docs-build.yml +0 -0
  19. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/docs.yml +0 -0
  20. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/macos.yml +0 -0
  21. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/pypi.yml +0 -0
  22. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/ubuntu.yml +0 -0
  23. {geoai_py-0.8.1 → geoai_py-0.8.2}/.github/workflows/windows.yml +0 -0
  24. {geoai_py-0.8.1 → geoai_py-0.8.2}/.gitignore +0 -0
  25. {geoai_py-0.8.1 → geoai_py-0.8.2}/.pre-commit-config.yaml +0 -0
  26. {geoai_py-0.8.1 → geoai_py-0.8.2}/Dockerfile +0 -0
  27. {geoai_py-0.8.1 → geoai_py-0.8.2}/LICENSE +0 -0
  28. {geoai_py-0.8.1 → geoai_py-0.8.2}/MANIFEST.in +0 -0
  29. {geoai_py-0.8.1 → geoai_py-0.8.2}/README.md +0 -0
  30. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/CNAME +0 -0
  31. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/assets/logo.ico +0 -0
  32. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/assets/logo.png +0 -0
  33. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/assets/logo_rect.png +0 -0
  34. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/changelog.md +0 -0
  35. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/classify.md +0 -0
  36. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/contributing.md +0 -0
  37. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/download.md +0 -0
  38. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/_template.ipynb +0 -0
  39. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/batch_segmentation.ipynb +0 -0
  40. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/building_footprints_africa.ipynb +0 -0
  41. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/building_footprints_china.ipynb +0 -0
  42. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/building_footprints_usa.ipynb +0 -0
  43. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/building_regularization.ipynb +0 -0
  44. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/car_detection.ipynb +0 -0
  45. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/create_vector.ipynb +0 -0
  46. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/data_visualization.ipynb +0 -0
  47. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/dataviz/lidar_viz.ipynb +0 -0
  48. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/dataviz/raster_viz.ipynb +0 -0
  49. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/dataviz/vector_viz.ipynb +0 -0
  50. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/download_data.ipynb +0 -0
  51. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/download_naip.ipynb +0 -0
  52. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/download_sentinel2.ipynb +0 -0
  53. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/edit_vector.ipynb +0 -0
  54. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/geometric_properties.ipynb +0 -0
  55. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/globe_projection.ipynb +0 -0
  56. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/grounded_sam.ipynb +0 -0
  57. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/image_chips.ipynb +0 -0
  58. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/image_tiling.ipynb +0 -0
  59. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/jupytext.toml +0 -0
  60. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/load_model_checkpoint.ipynb +0 -0
  61. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/parking_spot_detection.ipynb +0 -0
  62. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/planetary_computer.ipynb +0 -0
  63. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/rastervision/semantic_segmentation.ipynb +0 -0
  64. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/regularization.ipynb +0 -0
  65. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/arcgis.ipynb +0 -0
  66. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/automatic_mask_generator.ipynb +0 -0
  67. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/automatic_mask_generator_hq.ipynb +0 -0
  68. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/box_prompts.ipynb +0 -0
  69. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/fast_sam.ipynb +0 -0
  70. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/input_prompts.ipynb +0 -0
  71. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/input_prompts_hq.ipynb +0 -0
  72. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/maxar_open_data.ipynb +0 -0
  73. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/satellite-predictor.ipynb +0 -0
  74. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/satellite.ipynb +0 -0
  75. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/swimming_pools.ipynb +0 -0
  76. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/text_prompts.ipynb +0 -0
  77. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo/text_prompts_batch.ipynb +0 -0
  78. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/samgeo.ipynb +0 -0
  79. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/ship_detection.ipynb +0 -0
  80. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/solar_panel_detection.ipynb +0 -0
  81. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/text_prompt_segmentation.ipynb +0 -0
  82. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_building_footprints_usa.ipynb +0 -0
  83. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_car_detection.ipynb +0 -0
  84. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_landcover_classification.ipynb +0 -0
  85. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_object_detection_model.ipynb +0 -0
  86. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_segmentation_model.ipynb +0 -0
  87. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_ship_detection.ipynb +0 -0
  88. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_solar_panel_detection.ipynb +0 -0
  89. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/train_water_detection.ipynb +0 -0
  90. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/view_metadata.ipynb +0 -0
  91. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/water_detection.ipynb +0 -0
  92. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/water_dynamics.ipynb +0 -0
  93. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/waterbody-dataset-sample/README.md +0 -0
  94. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/examples/wetland_mapping.ipynb +0 -0
  95. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/extract.md +0 -0
  96. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/faq.md +0 -0
  97. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/geoai.md +0 -0
  98. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/hf.md +0 -0
  99. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/index.md +0 -0
  100. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/installation.md +0 -0
  101. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/overrides/main.html +0 -0
  102. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/sam.md +0 -0
  103. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/segment.md +0 -0
  104. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/segmentation.md +0 -0
  105. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/train.md +0 -0
  106. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/usage.md +0 -0
  107. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/utils.md +0 -0
  108. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/workshops/GeoAI_Workshop_2025.ipynb +0 -0
  109. {geoai_py-0.8.1 → geoai_py-0.8.2}/docs/workshops/jupytext.toml +0 -0
  110. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/classify.py +0 -0
  111. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/download.py +0 -0
  112. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/extract.py +0 -0
  113. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/geoai.py +0 -0
  114. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/hf.py +0 -0
  115. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/sam.py +0 -0
  116. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/segment.py +0 -0
  117. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/segmentation.py +0 -0
  118. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai/train.py +0 -0
  119. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai_py.egg-info/dependency_links.txt +0 -0
  120. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai_py.egg-info/entry_points.txt +0 -0
  121. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai_py.egg-info/requires.txt +0 -0
  122. {geoai_py-0.8.1 → geoai_py-0.8.2}/geoai_py.egg-info/top_level.txt +0 -0
  123. {geoai_py-0.8.1 → geoai_py-0.8.2}/requirements.txt +0 -0
  124. {geoai_py-0.8.1 → geoai_py-0.8.2}/requirements_docs.txt +0 -0
  125. {geoai_py-0.8.1 → geoai_py-0.8.2}/setup.cfg +0 -0
  126. {geoai_py-0.8.1 → geoai_py-0.8.2}/tests/__init__.py +0 -0
  127. {geoai_py-0.8.1 → geoai_py-0.8.2}/tests/test_geoai.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: geoai-py
3
- Version: 0.8.1
3
+ Version: 0.8.2
4
4
  Summary: A Python package for using Artificial Intelligence (AI) with geospatial data
5
5
  Author-email: Qiusheng Wu <giswqs@gmail.com>
6
6
  License: MIT License
@@ -10,9 +10,9 @@
10
10
  "source": [
11
11
  "# Building Detection from Aerial Imagery and LiDAR Data\n",
12
12
  "\n",
13
- "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/opengeos/geoai/blob/main/docs/examples/building_detection_LiDAR.ipynb)\n",
13
+ "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/opengeos/geoai/blob/main/docs/examples/building_detection_lidar.ipynb)\n",
14
14
  "\n",
15
- "This notebook demonstrates how to train semantic segmentation models for building detection from [NAIP aerial imagery](https://planetarycomputer.microsoft.com/dataset/naip) and [height above ground (HAG)](https://planetarycomputer.microsoft.com/dataset/3dep-LiDAR-hag) data derived from LiDAR data with just a few lines of code. You can adapt this notebook to segment other objects of interest (such as trees, cars, etc.) from aerial imagery and LiDAR data.\n",
15
+ "This notebook demonstrates how to train semantic segmentation models for building detection from [NAIP aerial imagery](https://planetarycomputer.microsoft.com/dataset/naip) and [height above ground (HAG)](https://planetarycomputer.microsoft.com/dataset/3dep-lidar-hag) data derived from LiDAR data with just a few lines of code. You can adapt this notebook to segment other objects of interest (such as trees, cars, etc.) from aerial imagery and LiDAR data.\n",
16
16
  "\n",
17
17
  "## Install packages\n",
18
18
  "\n",
@@ -45,6 +45,7 @@
45
45
  "metadata": {},
46
46
  "outputs": [],
47
47
  "source": [
48
+ "import os\n",
48
49
  "import geoai"
49
50
  ]
50
51
  },
@@ -100,6 +101,15 @@
100
101
  "Visualize the building footprints with the aerial imagery."
101
102
  ]
102
103
  },
104
+ {
105
+ "cell_type": "code",
106
+ "execution_count": null,
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "os.environ[\"TITILER_ENDPOINT\"] = \"https://titiler.xyz\""
111
+ ]
112
+ },
103
113
  {
104
114
  "cell_type": "code",
105
115
  "execution_count": null,
@@ -0,0 +1,297 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "vscode": {
7
+ "languageId": "raw"
8
+ }
9
+ },
10
+ "source": [
11
+ "# Water Detection with Sentinel-2 Imagery\n",
12
+ "\n",
13
+ "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/opengeos/geoai/blob/main/docs/examples/water_detection_s2.ipynb)\n",
14
+ "\n",
15
+ "This notebook demonstrates how to train semantic segmentation models for water detection using Sentinel-2 imagery.\n",
16
+ "\n",
17
+ "## Install packages\n",
18
+ "\n",
19
+ "To use the new functionality, ensure the required packages are installed."
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": null,
25
+ "metadata": {},
26
+ "outputs": [],
27
+ "source": [
28
+ "# %pip install geoai-py"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "markdown",
33
+ "metadata": {
34
+ "vscode": {
35
+ "languageId": "raw"
36
+ }
37
+ },
38
+ "source": [
39
+ "## Import libraries"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "metadata": {},
46
+ "outputs": [],
47
+ "source": [
48
+ "import geoai"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "markdown",
53
+ "metadata": {
54
+ "vscode": {
55
+ "languageId": "raw"
56
+ }
57
+ },
58
+ "source": [
59
+ "## Download sample data\n",
60
+ "\n",
61
+ "We'll use the [Earth Surface Water Dataset](https://zenodo.org/records/5205674#.Y4iEFezP1hE) from Zenodo. Credits to the author (Xin Luo) of the dataset"
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": null,
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "url = \"https://zenodo.org/records/5205674/files/dset-s2.zip?download=1\""
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "execution_count": null,
76
+ "metadata": {},
77
+ "outputs": [],
78
+ "source": [
79
+ "data_dir = geoai.download_file(url)"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": null,
85
+ "metadata": {},
86
+ "outputs": [],
87
+ "source": [
88
+ "images_dir = f\"{data_dir}/dset-s2/tra_scene\"\n",
89
+ "masks_dir = f\"{data_dir}/dset-s2/tra_truth\"\n",
90
+ "tiles_dir = f\"{data_dir}/dset-s2/tiles\""
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "markdown",
95
+ "metadata": {},
96
+ "source": [
97
+ "## Create training data\n",
98
+ "\n",
99
+ "We'll create the same training tiles as before."
100
+ ]
101
+ },
102
+ {
103
+ "cell_type": "code",
104
+ "execution_count": null,
105
+ "metadata": {},
106
+ "outputs": [],
107
+ "source": [
108
+ "result = geoai.export_geotiff_tiles_batch(\n",
109
+ " images_folder=images_dir,\n",
110
+ " masks_folder=masks_dir,\n",
111
+ " output_folder=tiles_dir,\n",
112
+ " tile_size=512,\n",
113
+ " stride=128,\n",
114
+ " quiet=True,\n",
115
+ ")"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "markdown",
120
+ "metadata": {
121
+ "vscode": {
122
+ "languageId": "raw"
123
+ }
124
+ },
125
+ "source": [
126
+ "## Train semantic segmentation model\n",
127
+ "\n",
128
+ "Now we'll train a semantic segmentation model using the new `train_segmentation_model` function. This function supports various architectures from `segmentation-models-pytorch`:\n",
129
+ "\n",
130
+ "- **Architectures**: `unet`, `unetplusplus` `deeplabv3`, `deeplabv3plus`, `fpn`, `pspnet`, `linknet`, `manet`\n",
131
+ "- **Encoders**: `resnet34`, `resnet50`, `efficientnet-b0`, `mobilenet_v2`, etc.\n",
132
+ "\n",
133
+ "For more details, please refer to the [segmentation-models-pytorch documentation](https://smp.readthedocs.io/en/latest/models.html).\n",
134
+ "\n",
135
+ "Let's train the module using U-Net with ResNet34 encoder:"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": null,
141
+ "metadata": {},
142
+ "outputs": [],
143
+ "source": [
144
+ "# Test train_segmentation_model with automatic size detection\n",
145
+ "geoai.train_segmentation_model(\n",
146
+ " images_dir=f\"{tiles_dir}/images\",\n",
147
+ " labels_dir=f\"{tiles_dir}/masks\",\n",
148
+ " output_dir=f\"{tiles_dir}/unet_models\",\n",
149
+ " architecture=\"unet\",\n",
150
+ " encoder_name=\"resnet34\",\n",
151
+ " encoder_weights=\"imagenet\",\n",
152
+ " num_channels=6,\n",
153
+ " num_classes=2, # background and water\n",
154
+ " batch_size=8,\n",
155
+ " num_epochs=50,\n",
156
+ " learning_rate=0.001,\n",
157
+ " val_split=0.2,\n",
158
+ " verbose=True,\n",
159
+ ")"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "markdown",
164
+ "metadata": {},
165
+ "source": [
166
+ "## Evaluate the model\n",
167
+ "\n",
168
+ "Let's examine the training curves and model performance:"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": null,
174
+ "metadata": {},
175
+ "outputs": [],
176
+ "source": [
177
+ "geoai.plot_performance_metrics(\n",
178
+ " history_path=f\"{tiles_dir}/unet_models/training_history.pth\",\n",
179
+ " figsize=(15, 5),\n",
180
+ " verbose=True,\n",
181
+ ")"
182
+ ]
183
+ },
184
+ {
185
+ "cell_type": "markdown",
186
+ "metadata": {},
187
+ "source": [
188
+ "![image](https://github.com/user-attachments/assets/61f675a7-ee67-4650-81c0-f754fe681f4d)"
189
+ ]
190
+ },
191
+ {
192
+ "cell_type": "markdown",
193
+ "metadata": {},
194
+ "source": [
195
+ "## Run inference"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": null,
201
+ "metadata": {},
202
+ "outputs": [],
203
+ "source": [
204
+ "images_dir = f\"{data_dir}/dset-s2/val_scene\"\n",
205
+ "masks_dir = f\"{data_dir}/dset-s2/val_truth\"\n",
206
+ "predictions_dir = f\"{data_dir}/dset-s2/predictions\"\n",
207
+ "model_path = f\"{tiles_dir}/unet_models/best_model.pth\""
208
+ ]
209
+ },
210
+ {
211
+ "cell_type": "code",
212
+ "execution_count": null,
213
+ "metadata": {},
214
+ "outputs": [],
215
+ "source": [
216
+ "geoai.semantic_segmentation_batch(\n",
217
+ " input_dir=images_dir,\n",
218
+ " output_dir=predictions_dir,\n",
219
+ " model_path=model_path,\n",
220
+ " architecture=\"unet\",\n",
221
+ " encoder_name=\"resnet34\",\n",
222
+ " num_channels=6,\n",
223
+ " num_classes=2,\n",
224
+ " window_size=512,\n",
225
+ " overlap=256,\n",
226
+ " batch_size=8,\n",
227
+ " quiet=True,\n",
228
+ ")"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "markdown",
233
+ "metadata": {},
234
+ "source": [
235
+ "## Visualize results"
236
+ ]
237
+ },
238
+ {
239
+ "cell_type": "code",
240
+ "execution_count": null,
241
+ "metadata": {},
242
+ "outputs": [],
243
+ "source": [
244
+ "test_image_path = (\n",
245
+ " f\"{data_dir}/dset-s2/val_scene/S2A_L2A_20190318_N0211_R061_6Bands_S2.tif\"\n",
246
+ ")\n",
247
+ "ground_truth_path = (\n",
248
+ " f\"{data_dir}/dset-s2/val_truth/S2A_L2A_20190318_N0211_R061_S2_Truth.tif\"\n",
249
+ ")\n",
250
+ "prediction_path = (\n",
251
+ " f\"{data_dir}/dset-s2/predictions/S2A_L2A_20190318_N0211_R061_6Bands_S2_mask.tif\"\n",
252
+ ")\n",
253
+ "save_path = f\"{data_dir}/dset-s2/S2A_L2A_20190318_N0211_R061_6Bands_S2_comparison.png\"\n",
254
+ "\n",
255
+ "fig = geoai.plot_prediction_comparison(\n",
256
+ " original_image=test_image_path,\n",
257
+ " prediction_image=prediction_path,\n",
258
+ " ground_truth_image=ground_truth_path,\n",
259
+ " titles=[\"Original\", \"Prediction\", \"Ground Truth\"],\n",
260
+ " figsize=(15, 5),\n",
261
+ " save_path=save_path,\n",
262
+ " show_plot=True,\n",
263
+ " indexes=[5, 4, 3],\n",
264
+ " divider=5000,\n",
265
+ ")"
266
+ ]
267
+ },
268
+ {
269
+ "cell_type": "markdown",
270
+ "metadata": {},
271
+ "source": [
272
+ "![image](https://github.com/user-attachments/assets/53601ed7-2bd6-4e7e-b369-4d7bfc2ce120)"
273
+ ]
274
+ }
275
+ ],
276
+ "metadata": {
277
+ "kernelspec": {
278
+ "display_name": "geo",
279
+ "language": "python",
280
+ "name": "python3"
281
+ },
282
+ "language_info": {
283
+ "codemirror_mode": {
284
+ "name": "ipython",
285
+ "version": 3
286
+ },
287
+ "file_extension": ".py",
288
+ "mimetype": "text/x-python",
289
+ "name": "python",
290
+ "nbconvert_exporter": "python",
291
+ "pygments_lexer": "ipython3",
292
+ "version": "3.12.2"
293
+ }
294
+ },
295
+ "nbformat": 4,
296
+ "nbformat_minor": 4
297
+ }
@@ -2,7 +2,7 @@
2
2
 
3
3
  __author__ = """Qiusheng Wu"""
4
4
  __email__ = "giswqs@gmail.com"
5
- __version__ = "0.8.1"
5
+ __version__ = "0.8.2"
6
6
 
7
7
 
8
8
  import os
@@ -7280,105 +7280,123 @@ def plot_prediction_comparison(
7280
7280
  prediction_colormap: str = "gray",
7281
7281
  ground_truth_colormap: str = "gray",
7282
7282
  original_colormap: Optional[str] = None,
7283
+ indexes: Optional[List[int]] = None,
7284
+ divider: Optional[float] = None,
7283
7285
  ):
7284
- """
7285
- Plot original image, prediction image, and optionally ground truth image side by side.
7286
+ """Plot original image, prediction, and optional ground truth side by side.
7287
+
7288
+ Supports input as file paths, NumPy arrays, or PIL Images. For multi-band
7289
+ images, selected channels can be specified via `indexes`. If the image data
7290
+ is not normalized (e.g., Sentinel-2 [0, 10000]), the `divider` can be used
7291
+ to scale values for visualization.
7286
7292
 
7287
7293
  Args:
7288
- original_image: Original input image (file path, numpy array, or PIL Image)
7289
- prediction_image: Prediction/segmentation mask (file path, numpy array, or PIL Image)
7290
- ground_truth_image: Optional ground truth mask (file path, numpy array, or PIL Image)
7291
- titles: Optional list of titles for each subplot
7292
- figsize: Figure size tuple (width, height)
7293
- save_path: Optional path to save the plot
7294
- show_plot: Whether to display the plot
7295
- prediction_colormap: Colormap for prediction image
7296
- ground_truth_colormap: Colormap for ground truth image
7297
- original_colormap: Colormap for original image (None for RGB)
7294
+ original_image (Union[str, np.ndarray, Image.Image]):
7295
+ Original input image as a file path, NumPy array, or PIL Image.
7296
+ prediction_image (Union[str, np.ndarray, Image.Image]):
7297
+ Predicted segmentation mask image.
7298
+ ground_truth_image (Optional[Union[str, np.ndarray, Image.Image]], optional):
7299
+ Ground truth mask image. Defaults to None.
7300
+ titles (Optional[List[str]], optional):
7301
+ List of titles for the subplots. If not provided, default titles are used.
7302
+ figsize (Tuple[int, int], optional):
7303
+ Size of the entire figure in inches. Defaults to (15, 5).
7304
+ save_path (Optional[str], optional):
7305
+ If specified, saves the figure to this path. Defaults to None.
7306
+ show_plot (bool, optional):
7307
+ Whether to display the figure using plt.show(). Defaults to True.
7308
+ prediction_colormap (str, optional):
7309
+ Colormap to use for the prediction mask. Defaults to "gray".
7310
+ ground_truth_colormap (str, optional):
7311
+ Colormap to use for the ground truth mask. Defaults to "gray".
7312
+ original_colormap (Optional[str], optional):
7313
+ Colormap to use for the original image if it's grayscale. Defaults to None.
7314
+ indexes (Optional[List[int]], optional):
7315
+ List of band/channel indexes (0-based for NumPy, 1-based for rasterio) to extract from the original image.
7316
+ Useful for multi-band imagery like Sentinel-2. Defaults to None.
7317
+ divider (Optional[float], optional):
7318
+ Value to divide the original image by for normalization (e.g., 10000 for reflectance). Defaults to None.
7298
7319
 
7299
7320
  Returns:
7300
- matplotlib.figure.Figure: The figure object
7321
+ matplotlib.figure.Figure:
7322
+ The generated matplotlib figure object.
7301
7323
  """
7302
7324
 
7303
- def _load_image(img_input):
7325
+ def _load_image(img_input, indexes=None):
7304
7326
  """Helper function to load image from various input types."""
7305
7327
  if isinstance(img_input, str):
7306
- # File path
7307
7328
  if img_input.lower().endswith((".tif", ".tiff")):
7308
- # Handle GeoTIFF files
7309
7329
  with rasterio.open(img_input) as src:
7310
- img = src.read()
7311
- if img.shape[0] == 1:
7312
- # Single band
7313
- img = img[0]
7330
+ if indexes:
7331
+ img = src.read(indexes) # 1-based
7332
+ img = (
7333
+ np.transpose(img, (1, 2, 0)) if len(indexes) > 1 else img[0]
7334
+ )
7314
7335
  else:
7315
- # Multi-band, transpose to (H, W, C)
7316
- img = np.transpose(img, (1, 2, 0))
7336
+ img = src.read()
7337
+ if img.shape[0] == 1:
7338
+ img = img[0]
7339
+ else:
7340
+ img = np.transpose(img, (1, 2, 0))
7317
7341
  else:
7318
- # Regular image file
7319
7342
  img = np.array(Image.open(img_input))
7320
7343
  elif isinstance(img_input, Image.Image):
7321
- # PIL Image
7322
7344
  img = np.array(img_input)
7323
7345
  elif isinstance(img_input, np.ndarray):
7324
- # NumPy array
7325
7346
  img = img_input
7347
+ if indexes is not None and img.ndim == 3:
7348
+ img = img[:, :, indexes]
7326
7349
  else:
7327
7350
  raise ValueError(f"Unsupported image type: {type(img_input)}")
7328
-
7329
7351
  return img
7330
7352
 
7331
7353
  # Load images
7332
- original = _load_image(original_image)
7354
+ original = _load_image(original_image, indexes=indexes)
7333
7355
  prediction = _load_image(prediction_image)
7334
7356
  ground_truth = (
7335
7357
  _load_image(ground_truth_image) if ground_truth_image is not None else None
7336
7358
  )
7337
7359
 
7338
- # Determine number of subplots
7339
- num_plots = 3 if ground_truth is not None else 2
7360
+ # Apply divider normalization if requested
7361
+ if divider is not None and isinstance(original, np.ndarray) and original.ndim == 3:
7362
+ original = np.clip(original.astype(np.float32) / divider, 0, 1)
7340
7363
 
7341
- # Create figure and subplots
7364
+ # Determine layout
7365
+ num_plots = 3 if ground_truth is not None else 2
7342
7366
  fig, axes = plt.subplots(1, num_plots, figsize=figsize)
7343
7367
  if num_plots == 2:
7344
7368
  axes = [axes[0], axes[1]]
7345
7369
 
7346
- # Default titles
7347
7370
  if titles is None:
7348
7371
  titles = ["Original Image", "Prediction"]
7349
7372
  if ground_truth is not None:
7350
7373
  titles.append("Ground Truth")
7351
7374
 
7352
- # Plot original image
7353
- if len(original.shape) == 3 and original.shape[2] in [3, 4]:
7354
- # RGB or RGBA image
7375
+ # Plot original
7376
+ if original.ndim == 3 and original.shape[2] in [3, 4]:
7355
7377
  axes[0].imshow(original)
7356
7378
  else:
7357
- # Grayscale or single channel
7358
7379
  axes[0].imshow(original, cmap=original_colormap)
7359
7380
  axes[0].set_title(titles[0])
7360
7381
  axes[0].axis("off")
7361
7382
 
7362
- # Plot prediction image
7383
+ # Prediction
7363
7384
  axes[1].imshow(prediction, cmap=prediction_colormap)
7364
7385
  axes[1].set_title(titles[1])
7365
7386
  axes[1].axis("off")
7366
7387
 
7367
- # Plot ground truth if provided
7388
+ # Ground truth
7368
7389
  if ground_truth is not None:
7369
7390
  axes[2].imshow(ground_truth, cmap=ground_truth_colormap)
7370
7391
  axes[2].set_title(titles[2])
7371
7392
  axes[2].axis("off")
7372
7393
 
7373
- # Adjust layout
7374
7394
  plt.tight_layout()
7375
7395
 
7376
- # Save if requested
7377
7396
  if save_path:
7378
7397
  plt.savefig(save_path, dpi=300, bbox_inches="tight")
7379
7398
  print(f"Plot saved to: {save_path}")
7380
7399
 
7381
- # Show plot
7382
7400
  if show_plot:
7383
7401
  plt.show()
7384
7402
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: geoai-py
3
- Version: 0.8.1
3
+ Version: 0.8.2
4
4
  Summary: A Python package for using Artificial Intelligence (AI) with geospatial data
5
5
  Author-email: Qiusheng Wu <giswqs@gmail.com>
6
6
  License: MIT License
@@ -80,6 +80,7 @@ docs/examples/train_solar_panel_detection.ipynb
80
80
  docs/examples/train_water_detection.ipynb
81
81
  docs/examples/view_metadata.ipynb
82
82
  docs/examples/water_detection.ipynb
83
+ docs/examples/water_detection_s2.ipynb
83
84
  docs/examples/water_dynamics.ipynb
84
85
  docs/examples/wetland_mapping.ipynb
85
86
  docs/examples/dataviz/lidar_viz.ipynb
@@ -137,6 +137,7 @@ nav:
137
137
  - examples/grounded_sam.ipynb
138
138
  - examples/load_model_checkpoint.ipynb
139
139
  - examples/water_detection.ipynb
140
+ - examples/water_detection_s2.ipynb
140
141
  - examples/batch_segmentation.ipynb
141
142
  - examples/building_detection_lidar.ipynb
142
143
  # - examples/samgeo/satellite.ipynb
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "geoai-py"
3
- version = "0.8.1"
3
+ version = "0.8.2"
4
4
  dynamic = [
5
5
  "dependencies",
6
6
  ]
@@ -43,7 +43,7 @@ universal = true
43
43
 
44
44
 
45
45
  [tool.bumpversion]
46
- current_version = "0.8.1"
46
+ current_version = "0.8.2"
47
47
  commit = true
48
48
  tag = true
49
49
 
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes