geomind-ai 1.0.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- geomind/__init__.py +3 -3
- geomind/agent.py +127 -127
- geomind/cli.py +12 -14
- geomind/tools/geocoding.py +21 -18
- geomind/tools/processing.py +80 -82
- geomind/tools/stac_search.py +35 -33
- {geomind_ai-1.0.0.dist-info → geomind_ai-1.0.1.dist-info}/METADATA +10 -17
- geomind_ai-1.0.1.dist-info/RECORD +14 -0
- geomind_ai-1.0.0.dist-info/RECORD +0 -14
- {geomind_ai-1.0.0.dist-info → geomind_ai-1.0.1.dist-info}/WHEEL +0 -0
- {geomind_ai-1.0.0.dist-info → geomind_ai-1.0.1.dist-info}/entry_points.txt +0 -0
- {geomind_ai-1.0.0.dist-info → geomind_ai-1.0.1.dist-info}/licenses/LICENSE +0 -0
- {geomind_ai-1.0.0.dist-info → geomind_ai-1.0.1.dist-info}/top_level.txt +0 -0
geomind/__init__.py
CHANGED
|
@@ -3,9 +3,9 @@ GeoMind - Geospatial AI Agent
|
|
|
3
3
|
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
__version__ = "1.0.
|
|
7
|
-
__author__ = "Harsh Shinde"
|
|
6
|
+
__version__ = "1.0.1"
|
|
7
|
+
__author__ = "Harsh Shinde, Rajat Shinde"
|
|
8
8
|
|
|
9
9
|
from .agent import GeoMindAgent
|
|
10
10
|
|
|
11
|
-
__all__ = ["GeoMindAgent"]
|
|
11
|
+
__all__ = ["GeoMindAgent"]
|
geomind/agent.py
CHANGED
|
@@ -6,12 +6,15 @@ queries about satellite imagery and execute the appropriate tools.
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import json
|
|
9
|
-
|
|
9
|
+
import re
|
|
10
|
+
from typing import Optional, Callable, Any
|
|
10
11
|
from datetime import datetime
|
|
11
12
|
|
|
12
13
|
from openai import OpenAI
|
|
13
14
|
|
|
14
|
-
from .config import
|
|
15
|
+
from .config import (
|
|
16
|
+
OPENROUTER_API_KEY, OPENROUTER_API_URL, OPENROUTER_MODEL
|
|
17
|
+
)
|
|
15
18
|
from .tools import (
|
|
16
19
|
geocode_location,
|
|
17
20
|
get_bbox_from_location,
|
|
@@ -48,12 +51,12 @@ TOOLS = [
|
|
|
48
51
|
"properties": {
|
|
49
52
|
"place_name": {
|
|
50
53
|
"type": "string",
|
|
51
|
-
"description": "The name of the place to geocode (e.g., 'New York City', 'Paris, France')"
|
|
54
|
+
"description": "The name of the place to geocode (e.g., 'New York City', 'Paris, France')"
|
|
52
55
|
}
|
|
53
56
|
},
|
|
54
|
-
"required": ["place_name"]
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
+
"required": ["place_name"]
|
|
58
|
+
}
|
|
59
|
+
}
|
|
57
60
|
},
|
|
58
61
|
{
|
|
59
62
|
"type": "function",
|
|
@@ -65,16 +68,16 @@ TOOLS = [
|
|
|
65
68
|
"properties": {
|
|
66
69
|
"place_name": {
|
|
67
70
|
"type": "string",
|
|
68
|
-
"description": "The name of the place"
|
|
71
|
+
"description": "The name of the place"
|
|
69
72
|
},
|
|
70
73
|
"buffer_km": {
|
|
71
74
|
"type": "number",
|
|
72
|
-
"description": "Buffer distance in kilometers (default: 10)"
|
|
73
|
-
}
|
|
75
|
+
"description": "Buffer distance in kilometers (default: 10)"
|
|
76
|
+
}
|
|
74
77
|
},
|
|
75
|
-
"required": ["place_name"]
|
|
76
|
-
}
|
|
77
|
-
}
|
|
78
|
+
"required": ["place_name"]
|
|
79
|
+
}
|
|
80
|
+
}
|
|
78
81
|
},
|
|
79
82
|
{
|
|
80
83
|
"type": "function",
|
|
@@ -87,28 +90,28 @@ TOOLS = [
|
|
|
87
90
|
"bbox": {
|
|
88
91
|
"type": "array",
|
|
89
92
|
"items": {"type": "number"},
|
|
90
|
-
"description": "Bounding box as [min_lon, min_lat, max_lon, max_lat]"
|
|
93
|
+
"description": "Bounding box as [min_lon, min_lat, max_lon, max_lat]"
|
|
91
94
|
},
|
|
92
95
|
"start_date": {
|
|
93
96
|
"type": "string",
|
|
94
|
-
"description": "Start date in YYYY-MM-DD format"
|
|
97
|
+
"description": "Start date in YYYY-MM-DD format"
|
|
95
98
|
},
|
|
96
99
|
"end_date": {
|
|
97
100
|
"type": "string",
|
|
98
|
-
"description": "End date in YYYY-MM-DD format"
|
|
101
|
+
"description": "End date in YYYY-MM-DD format"
|
|
99
102
|
},
|
|
100
103
|
"max_cloud_cover": {
|
|
101
104
|
"type": "number",
|
|
102
|
-
"description": "Maximum cloud cover percentage (0-100)"
|
|
105
|
+
"description": "Maximum cloud cover percentage (0-100)"
|
|
103
106
|
},
|
|
104
107
|
"max_items": {
|
|
105
108
|
"type": "integer",
|
|
106
|
-
"description": "Maximum number of results"
|
|
107
|
-
}
|
|
109
|
+
"description": "Maximum number of results"
|
|
110
|
+
}
|
|
108
111
|
},
|
|
109
|
-
"required": []
|
|
110
|
-
}
|
|
111
|
-
}
|
|
112
|
+
"required": []
|
|
113
|
+
}
|
|
114
|
+
}
|
|
112
115
|
},
|
|
113
116
|
{
|
|
114
117
|
"type": "function",
|
|
@@ -120,24 +123,24 @@ TOOLS = [
|
|
|
120
123
|
"properties": {
|
|
121
124
|
"location_name": {
|
|
122
125
|
"type": "string",
|
|
123
|
-
"description": "Name of the location to search"
|
|
126
|
+
"description": "Name of the location to search"
|
|
124
127
|
},
|
|
125
128
|
"days": {
|
|
126
129
|
"type": "integer",
|
|
127
|
-
"description": "Number of days to look back (default: 7)"
|
|
130
|
+
"description": "Number of days to look back (default: 7)"
|
|
128
131
|
},
|
|
129
132
|
"max_cloud_cover": {
|
|
130
133
|
"type": "number",
|
|
131
|
-
"description": "Maximum cloud cover percentage"
|
|
134
|
+
"description": "Maximum cloud cover percentage"
|
|
132
135
|
},
|
|
133
136
|
"max_items": {
|
|
134
137
|
"type": "integer",
|
|
135
|
-
"description": "Maximum number of results"
|
|
136
|
-
}
|
|
138
|
+
"description": "Maximum number of results"
|
|
139
|
+
}
|
|
137
140
|
},
|
|
138
|
-
"required": []
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
+
"required": []
|
|
142
|
+
}
|
|
143
|
+
}
|
|
141
144
|
},
|
|
142
145
|
{
|
|
143
146
|
"type": "function",
|
|
@@ -147,11 +150,14 @@ TOOLS = [
|
|
|
147
150
|
"parameters": {
|
|
148
151
|
"type": "object",
|
|
149
152
|
"properties": {
|
|
150
|
-
"item_id": {
|
|
153
|
+
"item_id": {
|
|
154
|
+
"type": "string",
|
|
155
|
+
"description": "The STAC item ID"
|
|
156
|
+
}
|
|
151
157
|
},
|
|
152
|
-
"required": ["item_id"]
|
|
153
|
-
}
|
|
154
|
-
}
|
|
158
|
+
"required": ["item_id"]
|
|
159
|
+
}
|
|
160
|
+
}
|
|
155
161
|
},
|
|
156
162
|
{
|
|
157
163
|
"type": "function",
|
|
@@ -163,20 +169,20 @@ TOOLS = [
|
|
|
163
169
|
"properties": {
|
|
164
170
|
"zarr_url": {
|
|
165
171
|
"type": "string",
|
|
166
|
-
"description": "URL to the SR_10m Zarr asset from a STAC item"
|
|
172
|
+
"description": "URL to the SR_10m Zarr asset from a STAC item"
|
|
167
173
|
},
|
|
168
174
|
"output_path": {
|
|
169
175
|
"type": "string",
|
|
170
|
-
"description": "Optional path to save the output image"
|
|
176
|
+
"description": "Optional path to save the output image"
|
|
171
177
|
},
|
|
172
178
|
"subset_size": {
|
|
173
179
|
"type": "integer",
|
|
174
|
-
"description": "Size to subset the image (default: 1000 pixels)"
|
|
175
|
-
}
|
|
180
|
+
"description": "Size to subset the image (default: 1000 pixels)"
|
|
181
|
+
}
|
|
176
182
|
},
|
|
177
|
-
"required": ["zarr_url"]
|
|
178
|
-
}
|
|
179
|
-
}
|
|
183
|
+
"required": ["zarr_url"]
|
|
184
|
+
}
|
|
185
|
+
}
|
|
180
186
|
},
|
|
181
187
|
{
|
|
182
188
|
"type": "function",
|
|
@@ -188,20 +194,20 @@ TOOLS = [
|
|
|
188
194
|
"properties": {
|
|
189
195
|
"zarr_url": {
|
|
190
196
|
"type": "string",
|
|
191
|
-
"description": "URL to the SR_10m Zarr asset"
|
|
197
|
+
"description": "URL to the SR_10m Zarr asset"
|
|
192
198
|
},
|
|
193
199
|
"output_path": {
|
|
194
200
|
"type": "string",
|
|
195
|
-
"description": "Optional path to save the NDVI image"
|
|
201
|
+
"description": "Optional path to save the NDVI image"
|
|
196
202
|
},
|
|
197
203
|
"subset_size": {
|
|
198
204
|
"type": "integer",
|
|
199
|
-
"description": "Size to subset the image"
|
|
200
|
-
}
|
|
205
|
+
"description": "Size to subset the image"
|
|
206
|
+
}
|
|
201
207
|
},
|
|
202
|
-
"required": ["zarr_url"]
|
|
203
|
-
}
|
|
204
|
-
}
|
|
208
|
+
"required": ["zarr_url"]
|
|
209
|
+
}
|
|
210
|
+
}
|
|
205
211
|
},
|
|
206
212
|
{
|
|
207
213
|
"type": "function",
|
|
@@ -213,67 +219,65 @@ TOOLS = [
|
|
|
213
219
|
"properties": {
|
|
214
220
|
"zarr_url": {
|
|
215
221
|
"type": "string",
|
|
216
|
-
"description": "URL to the Zarr asset"
|
|
222
|
+
"description": "URL to the Zarr asset"
|
|
217
223
|
},
|
|
218
224
|
"bands": {
|
|
219
225
|
"type": "array",
|
|
220
226
|
"items": {"type": "string"},
|
|
221
|
-
"description": "List of band names to analyze"
|
|
222
|
-
}
|
|
227
|
+
"description": "List of band names to analyze"
|
|
228
|
+
}
|
|
223
229
|
},
|
|
224
|
-
"required": ["zarr_url"]
|
|
225
|
-
}
|
|
226
|
-
}
|
|
227
|
-
}
|
|
230
|
+
"required": ["zarr_url"]
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
}
|
|
228
234
|
]
|
|
229
235
|
|
|
230
236
|
|
|
231
237
|
class GeoMindAgent:
|
|
232
238
|
"""
|
|
233
239
|
GeoMind - An AI agent for geospatial analysis with Sentinel-2 imagery.
|
|
234
|
-
|
|
240
|
+
|
|
235
241
|
Uses OpenRouter API for access to multiple AI models.
|
|
236
242
|
"""
|
|
237
|
-
|
|
238
|
-
def __init__(self, model: Optional[str] = None
|
|
243
|
+
|
|
244
|
+
def __init__(self, model: Optional[str] = None):
|
|
239
245
|
"""
|
|
240
246
|
Initialize the GeoMind agent.
|
|
241
|
-
|
|
247
|
+
|
|
242
248
|
Args:
|
|
243
249
|
model: Model name (default: xiaomi/mimo-v2-flash:free)
|
|
244
|
-
api_key: OpenRouter API key. If not provided, looks for OPENROUTER_API_KEY env variable.
|
|
245
250
|
"""
|
|
246
251
|
self.provider = "openrouter"
|
|
247
|
-
self.api_key =
|
|
252
|
+
self.api_key = OPENROUTER_API_KEY
|
|
248
253
|
self.model_name = model or OPENROUTER_MODEL
|
|
249
254
|
self.base_url = OPENROUTER_API_URL
|
|
250
|
-
|
|
255
|
+
|
|
251
256
|
if not self.api_key:
|
|
252
257
|
raise ValueError(
|
|
253
|
-
"OpenRouter API key required.\n"
|
|
254
|
-
"
|
|
255
|
-
"1. Pass it to the constructor: GeoMindAgent(api_key='your-key')\n"
|
|
256
|
-
"2. Set OPENROUTER_API_KEY environment variable\n"
|
|
257
|
-
"3. Create a .env file with OPENROUTER_API_KEY=your-key\n"
|
|
258
|
-
"\nGet your API key at: https://openrouter.ai/settings/keys"
|
|
258
|
+
"OpenRouter API key required. Set OPENROUTER_API_KEY in .env file.\n"
|
|
259
|
+
"Get your API key at: https://openrouter.ai/settings/keys"
|
|
259
260
|
)
|
|
260
|
-
|
|
261
|
+
|
|
261
262
|
print(f"🚀 GeoMind Agent initialized with {self.model_name} (OpenRouter)")
|
|
262
263
|
print(f" API URL: {self.base_url}")
|
|
263
|
-
|
|
264
|
+
|
|
264
265
|
# Create OpenAI-compatible client
|
|
265
|
-
self.client = OpenAI(
|
|
266
|
-
|
|
266
|
+
self.client = OpenAI(
|
|
267
|
+
base_url=self.base_url,
|
|
268
|
+
api_key=self.api_key
|
|
269
|
+
)
|
|
270
|
+
|
|
267
271
|
# Chat history
|
|
268
272
|
self.history = []
|
|
269
|
-
|
|
273
|
+
|
|
270
274
|
# Add system message
|
|
271
275
|
self.system_prompt = self._get_system_prompt()
|
|
272
|
-
|
|
276
|
+
|
|
273
277
|
def _get_system_prompt(self) -> str:
|
|
274
278
|
"""Get the system prompt for the agent."""
|
|
275
|
-
return f"""You are GeoMind, an expert AI assistant specialized in geospatial analysis
|
|
276
|
-
and satellite imagery. You help users find, analyze, and visualize Sentinel-2 satellite data
|
|
279
|
+
return f"""You are GeoMind, an expert AI assistant specialized in geospatial analysis
|
|
280
|
+
and satellite imagery. You help users find, analyze, and visualize Sentinel-2 satellite data
|
|
277
281
|
from the EOPF (ESA Earth Observation Processing Framework) catalog.
|
|
278
282
|
|
|
279
283
|
Your capabilities include:
|
|
@@ -294,20 +298,20 @@ When users ask for imagery:
|
|
|
294
298
|
3. Offer to create visualizations if data is found
|
|
295
299
|
|
|
296
300
|
Always explain what you're doing and interpret results in a helpful way."""
|
|
297
|
-
|
|
301
|
+
|
|
298
302
|
def _execute_function(self, name: str, args: dict) -> dict:
|
|
299
303
|
"""Execute a function call and return the result."""
|
|
300
304
|
print(f" 🔧 Executing: {name}({args})")
|
|
301
|
-
|
|
305
|
+
|
|
302
306
|
if name not in TOOL_FUNCTIONS:
|
|
303
307
|
return {"error": f"Unknown function: {name}"}
|
|
304
|
-
|
|
308
|
+
|
|
305
309
|
try:
|
|
306
310
|
result = TOOL_FUNCTIONS[name](**args)
|
|
307
311
|
return result
|
|
308
312
|
except Exception as e:
|
|
309
313
|
return {"error": str(e)}
|
|
310
|
-
|
|
314
|
+
|
|
311
315
|
def chat(self, message: str, verbose: bool = True) -> str:
|
|
312
316
|
"""
|
|
313
317
|
Send a message to the agent and get a response.
|
|
@@ -315,19 +319,19 @@ Always explain what you're doing and interpret results in a helpful way."""
|
|
|
315
319
|
if verbose:
|
|
316
320
|
print(f"\n💬 User: {message}")
|
|
317
321
|
print("🤔 Processing...")
|
|
318
|
-
|
|
322
|
+
|
|
319
323
|
# Add user message to history
|
|
320
324
|
self.history.append({"role": "user", "content": message})
|
|
321
|
-
|
|
325
|
+
|
|
322
326
|
# Build messages with system prompt
|
|
323
327
|
messages = [{"role": "system", "content": self.system_prompt}] + self.history
|
|
324
|
-
|
|
328
|
+
|
|
325
329
|
max_iterations = 10
|
|
326
330
|
iteration = 0
|
|
327
|
-
|
|
331
|
+
|
|
328
332
|
while iteration < max_iterations:
|
|
329
333
|
iteration += 1
|
|
330
|
-
|
|
334
|
+
|
|
331
335
|
# Call the model
|
|
332
336
|
response = self.client.chat.completions.create(
|
|
333
337
|
model=self.model_name,
|
|
@@ -336,59 +340,55 @@ Always explain what you're doing and interpret results in a helpful way."""
|
|
|
336
340
|
tool_choice="auto",
|
|
337
341
|
max_tokens=4096,
|
|
338
342
|
)
|
|
339
|
-
|
|
343
|
+
|
|
340
344
|
assistant_message = response.choices[0].message
|
|
341
|
-
|
|
345
|
+
|
|
342
346
|
# Check if there are tool calls
|
|
343
347
|
if assistant_message.tool_calls:
|
|
344
348
|
# Add assistant message with tool calls to messages
|
|
345
|
-
messages.append(
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
"
|
|
354
|
-
|
|
355
|
-
"arguments": tc.function.arguments,
|
|
356
|
-
},
|
|
349
|
+
messages.append({
|
|
350
|
+
"role": "assistant",
|
|
351
|
+
"content": assistant_message.content or "",
|
|
352
|
+
"tool_calls": [
|
|
353
|
+
{
|
|
354
|
+
"id": tc.id,
|
|
355
|
+
"type": "function",
|
|
356
|
+
"function": {
|
|
357
|
+
"name": tc.function.name,
|
|
358
|
+
"arguments": tc.function.arguments
|
|
357
359
|
}
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
)
|
|
362
|
-
|
|
360
|
+
}
|
|
361
|
+
for tc in assistant_message.tool_calls
|
|
362
|
+
]
|
|
363
|
+
})
|
|
364
|
+
|
|
363
365
|
# Execute each tool call
|
|
364
366
|
for tool_call in assistant_message.tool_calls:
|
|
365
367
|
func_name = tool_call.function.name
|
|
366
368
|
func_args = json.loads(tool_call.function.arguments)
|
|
367
|
-
|
|
369
|
+
|
|
368
370
|
result = self._execute_function(func_name, func_args)
|
|
369
|
-
|
|
371
|
+
|
|
370
372
|
# Add tool result to messages
|
|
371
|
-
messages.append(
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
}
|
|
377
|
-
)
|
|
373
|
+
messages.append({
|
|
374
|
+
"role": "tool",
|
|
375
|
+
"tool_call_id": tool_call.id,
|
|
376
|
+
"content": json.dumps(result, default=str)
|
|
377
|
+
})
|
|
378
378
|
else:
|
|
379
379
|
# No tool calls, we have a final response
|
|
380
380
|
final_text = assistant_message.content or ""
|
|
381
|
-
|
|
381
|
+
|
|
382
382
|
# Add to history
|
|
383
383
|
self.history.append({"role": "assistant", "content": final_text})
|
|
384
|
-
|
|
384
|
+
|
|
385
385
|
if verbose:
|
|
386
386
|
print(f"\n🌍 GeoMind: {final_text}")
|
|
387
|
-
|
|
387
|
+
|
|
388
388
|
return final_text
|
|
389
|
-
|
|
389
|
+
|
|
390
390
|
return "Max iterations reached."
|
|
391
|
-
|
|
391
|
+
|
|
392
392
|
def reset(self):
|
|
393
393
|
"""Reset the chat session."""
|
|
394
394
|
self.history = []
|
|
@@ -398,7 +398,7 @@ Always explain what you're doing and interpret results in a helpful way."""
|
|
|
398
398
|
def main(model: Optional[str] = None):
|
|
399
399
|
"""Main entry point for CLI usage."""
|
|
400
400
|
import sys
|
|
401
|
-
|
|
401
|
+
|
|
402
402
|
print("=" * 60)
|
|
403
403
|
print("🌍 GeoMind - Geospatial AI Agent")
|
|
404
404
|
print("=" * 60)
|
|
@@ -406,7 +406,7 @@ def main(model: Optional[str] = None):
|
|
|
406
406
|
print("Type 'quit' or 'exit' to end the session")
|
|
407
407
|
print("Type 'reset' to start a new conversation")
|
|
408
408
|
print("=" * 60)
|
|
409
|
-
|
|
409
|
+
|
|
410
410
|
try:
|
|
411
411
|
agent = GeoMindAgent(model=model)
|
|
412
412
|
except ValueError as e:
|
|
@@ -416,24 +416,24 @@ def main(model: Optional[str] = None):
|
|
|
416
416
|
print(f"\n❌ Error: {e}")
|
|
417
417
|
print("\nPlease check your API key and internet connection.")
|
|
418
418
|
sys.exit(1)
|
|
419
|
-
|
|
419
|
+
|
|
420
420
|
while True:
|
|
421
421
|
try:
|
|
422
422
|
user_input = input("\n💬 You: ").strip()
|
|
423
|
-
|
|
423
|
+
|
|
424
424
|
if not user_input:
|
|
425
425
|
continue
|
|
426
|
-
|
|
427
|
-
if user_input.lower() in [
|
|
426
|
+
|
|
427
|
+
if user_input.lower() in ['quit', 'exit', 'q']:
|
|
428
428
|
print("\n👋 Goodbye!")
|
|
429
429
|
break
|
|
430
|
-
|
|
431
|
-
if user_input.lower() ==
|
|
430
|
+
|
|
431
|
+
if user_input.lower() == 'reset':
|
|
432
432
|
agent.reset()
|
|
433
433
|
continue
|
|
434
|
-
|
|
434
|
+
|
|
435
435
|
agent.chat(user_input)
|
|
436
|
-
|
|
436
|
+
|
|
437
437
|
except KeyboardInterrupt:
|
|
438
438
|
print("\n\n👋 Goodbye!")
|
|
439
439
|
break
|
geomind/cli.py
CHANGED
|
@@ -32,36 +32,34 @@ Environment Variables:
|
|
|
32
32
|
OPENROUTER_API_KEY Your OpenRouter API key
|
|
33
33
|
OPENROUTER_MODEL Model to use (default: xiaomi/mimo-v2-flash:free)
|
|
34
34
|
OPENROUTER_API_URL API endpoint (default: https://openrouter.ai/api/v1)
|
|
35
|
-
"""
|
|
35
|
+
"""
|
|
36
36
|
)
|
|
37
37
|
|
|
38
38
|
parser.add_argument(
|
|
39
|
-
"--query",
|
|
40
|
-
"-q",
|
|
39
|
+
"--query", "-q",
|
|
41
40
|
type=str,
|
|
42
|
-
help="Single query to run (if not provided, starts interactive mode)"
|
|
41
|
+
help="Single query to run (if not provided, starts interactive mode)"
|
|
43
42
|
)
|
|
44
43
|
parser.add_argument(
|
|
45
|
-
"--model",
|
|
46
|
-
"-m",
|
|
44
|
+
"--model", "-m",
|
|
47
45
|
type=str,
|
|
48
|
-
help="Model name to use (e.g., 'anthropic/claude-3.5-sonnet')"
|
|
46
|
+
help="Model name to use (e.g., 'anthropic/claude-3.5-sonnet')"
|
|
49
47
|
)
|
|
50
48
|
parser.add_argument(
|
|
51
|
-
"--api-key",
|
|
52
|
-
"-k",
|
|
49
|
+
"--api-key", "-k",
|
|
53
50
|
type=str,
|
|
54
|
-
help="OpenRouter API key (or set OPENROUTER_API_KEY env variable)"
|
|
51
|
+
help="OpenRouter API key (or set OPENROUTER_API_KEY env variable)"
|
|
55
52
|
)
|
|
56
53
|
parser.add_argument(
|
|
57
|
-
"--version", "-v",
|
|
54
|
+
"--version", "-v",
|
|
55
|
+
action="store_true",
|
|
56
|
+
help="Show version and exit"
|
|
58
57
|
)
|
|
59
58
|
|
|
60
59
|
args = parser.parse_args()
|
|
61
60
|
|
|
62
61
|
if args.version:
|
|
63
62
|
from . import __version__
|
|
64
|
-
|
|
65
63
|
print(f"GeoMind version {__version__}")
|
|
66
64
|
sys.exit(0)
|
|
67
65
|
|
|
@@ -104,11 +102,11 @@ def run_interactive(model: Optional[str] = None, api_key: Optional[str] = None):
|
|
|
104
102
|
if not user_input:
|
|
105
103
|
continue
|
|
106
104
|
|
|
107
|
-
if user_input.lower() in [
|
|
105
|
+
if user_input.lower() in ['quit', 'exit', 'q']:
|
|
108
106
|
print("\n👋 Goodbye!")
|
|
109
107
|
break
|
|
110
108
|
|
|
111
|
-
if user_input.lower() ==
|
|
109
|
+
if user_input.lower() == 'reset':
|
|
112
110
|
agent.reset()
|
|
113
111
|
continue
|
|
114
112
|
|
geomind/tools/geocoding.py
CHANGED
|
@@ -14,22 +14,22 @@ from ..config import GEOCODER_USER_AGENT, DEFAULT_BUFFER_KM
|
|
|
14
14
|
def geocode_location(place_name: str) -> dict:
|
|
15
15
|
"""
|
|
16
16
|
Convert a place name to geographic coordinates.
|
|
17
|
-
|
|
17
|
+
|
|
18
18
|
Args:
|
|
19
19
|
place_name: Name of the location (e.g., "New York", "Paris, France")
|
|
20
|
-
|
|
20
|
+
|
|
21
21
|
Returns:
|
|
22
22
|
Dictionary with latitude, longitude, and full address
|
|
23
|
-
|
|
23
|
+
|
|
24
24
|
Example:
|
|
25
25
|
>>> geocode_location("Central Park, New York")
|
|
26
26
|
{'latitude': 40.7828, 'longitude': -73.9653, 'address': '...'}
|
|
27
27
|
"""
|
|
28
28
|
geolocator = Nominatim(user_agent=GEOCODER_USER_AGENT, timeout=10)
|
|
29
29
|
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=1)
|
|
30
|
-
|
|
30
|
+
|
|
31
31
|
location = geocode(place_name)
|
|
32
|
-
|
|
32
|
+
|
|
33
33
|
if location is None:
|
|
34
34
|
return {
|
|
35
35
|
"success": False,
|
|
@@ -38,7 +38,7 @@ def geocode_location(place_name: str) -> dict:
|
|
|
38
38
|
"longitude": None,
|
|
39
39
|
"address": None,
|
|
40
40
|
}
|
|
41
|
-
|
|
41
|
+
|
|
42
42
|
return {
|
|
43
43
|
"success": True,
|
|
44
44
|
"latitude": location.latitude,
|
|
@@ -47,55 +47,58 @@ def geocode_location(place_name: str) -> dict:
|
|
|
47
47
|
}
|
|
48
48
|
|
|
49
49
|
|
|
50
|
-
def get_bbox_from_location(
|
|
50
|
+
def get_bbox_from_location(
|
|
51
|
+
place_name: str,
|
|
52
|
+
buffer_km: Optional[float] = None
|
|
53
|
+
) -> dict:
|
|
51
54
|
"""
|
|
52
55
|
Convert a place name to a bounding box suitable for STAC queries.
|
|
53
|
-
|
|
56
|
+
|
|
54
57
|
Creates a square bounding box centered on the location with the
|
|
55
58
|
specified buffer distance.
|
|
56
|
-
|
|
59
|
+
|
|
57
60
|
Args:
|
|
58
61
|
place_name: Name of the location (e.g., "San Francisco")
|
|
59
62
|
buffer_km: Buffer distance in kilometers (default: 10km)
|
|
60
|
-
|
|
63
|
+
|
|
61
64
|
Returns:
|
|
62
65
|
Dictionary with bbox [min_lon, min_lat, max_lon, max_lat] and center point
|
|
63
|
-
|
|
66
|
+
|
|
64
67
|
Example:
|
|
65
68
|
>>> get_bbox_from_location("London", buffer_km=5)
|
|
66
69
|
{'bbox': [-0.17, 51.46, -0.08, 51.55], 'center': {...}}
|
|
67
70
|
"""
|
|
68
71
|
if buffer_km is None:
|
|
69
72
|
buffer_km = DEFAULT_BUFFER_KM
|
|
70
|
-
|
|
73
|
+
|
|
71
74
|
# Get coordinates
|
|
72
75
|
location_result = geocode_location(place_name)
|
|
73
|
-
|
|
76
|
+
|
|
74
77
|
if not location_result["success"]:
|
|
75
78
|
return {
|
|
76
79
|
"success": False,
|
|
77
80
|
"error": location_result["error"],
|
|
78
81
|
"bbox": None,
|
|
79
82
|
}
|
|
80
|
-
|
|
83
|
+
|
|
81
84
|
lat = location_result["latitude"]
|
|
82
85
|
lon = location_result["longitude"]
|
|
83
|
-
|
|
86
|
+
|
|
84
87
|
# Calculate approximate degree offset
|
|
85
88
|
# 1 degree latitude ≈ 111 km
|
|
86
89
|
# 1 degree longitude ≈ 111 * cos(latitude) km
|
|
87
90
|
import math
|
|
88
|
-
|
|
91
|
+
|
|
89
92
|
lat_offset = buffer_km / 111.0
|
|
90
93
|
lon_offset = buffer_km / (111.0 * math.cos(math.radians(lat)))
|
|
91
|
-
|
|
94
|
+
|
|
92
95
|
bbox = [
|
|
93
96
|
lon - lon_offset, # min_lon (west)
|
|
94
97
|
lat - lat_offset, # min_lat (south)
|
|
95
98
|
lon + lon_offset, # max_lon (east)
|
|
96
99
|
lat + lat_offset, # max_lat (north)
|
|
97
100
|
]
|
|
98
|
-
|
|
101
|
+
|
|
99
102
|
return {
|
|
100
103
|
"success": True,
|
|
101
104
|
"bbox": bbox,
|
geomind/tools/processing.py
CHANGED
|
@@ -4,13 +4,14 @@ Image processing tools for Sentinel-2 data.
|
|
|
4
4
|
Handles loading Zarr data, applying corrections, and creating visualizations.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
from typing import Optional, List
|
|
7
|
+
from typing import Optional, List, Tuple
|
|
8
8
|
from pathlib import Path
|
|
9
9
|
import numpy as np
|
|
10
10
|
|
|
11
11
|
from ..config import (
|
|
12
12
|
REFLECTANCE_SCALE,
|
|
13
13
|
REFLECTANCE_OFFSET,
|
|
14
|
+
RGB_BANDS,
|
|
14
15
|
OUTPUT_DIR,
|
|
15
16
|
)
|
|
16
17
|
|
|
@@ -23,27 +24,27 @@ def _apply_scale_offset(
|
|
|
23
24
|
) -> np.ndarray:
|
|
24
25
|
"""
|
|
25
26
|
Apply scale and offset to convert DN to surface reflectance.
|
|
26
|
-
|
|
27
|
+
|
|
27
28
|
Formula: reflectance = (DN * scale) + offset
|
|
28
|
-
|
|
29
|
+
|
|
29
30
|
Args:
|
|
30
31
|
data: Raw digital number values
|
|
31
32
|
scale: Scale factor (default: 0.0001)
|
|
32
33
|
offset: Offset value (default: -0.1)
|
|
33
34
|
nodata: NoData value to mask (default: 0)
|
|
34
|
-
|
|
35
|
+
|
|
35
36
|
Returns:
|
|
36
37
|
Surface reflectance values
|
|
37
38
|
"""
|
|
38
39
|
# Create mask for nodata
|
|
39
40
|
mask = data == nodata
|
|
40
|
-
|
|
41
|
+
|
|
41
42
|
# Apply transformation
|
|
42
43
|
result = (data.astype(np.float32) * scale) + offset
|
|
43
|
-
|
|
44
|
+
|
|
44
45
|
# Set nodata pixels to NaN
|
|
45
46
|
result[mask] = np.nan
|
|
46
|
-
|
|
47
|
+
|
|
47
48
|
return result
|
|
48
49
|
|
|
49
50
|
|
|
@@ -54,37 +55,37 @@ def _normalize_for_display(
|
|
|
54
55
|
) -> np.ndarray:
|
|
55
56
|
"""
|
|
56
57
|
Normalize data to 0-1 range for display using percentile stretch.
|
|
57
|
-
|
|
58
|
+
|
|
58
59
|
Args:
|
|
59
60
|
data: Input array
|
|
60
61
|
percentile_low: Lower percentile for clipping
|
|
61
62
|
percentile_high: Upper percentile for clipping
|
|
62
|
-
|
|
63
|
+
|
|
63
64
|
Returns:
|
|
64
65
|
Normalized array in 0-1 range
|
|
65
66
|
"""
|
|
66
67
|
# Get valid (non-NaN) values
|
|
67
68
|
valid = data[~np.isnan(data)]
|
|
68
|
-
|
|
69
|
+
|
|
69
70
|
if len(valid) == 0:
|
|
70
71
|
return np.zeros_like(data)
|
|
71
|
-
|
|
72
|
+
|
|
72
73
|
# Calculate percentiles
|
|
73
74
|
low = np.percentile(valid, percentile_low)
|
|
74
75
|
high = np.percentile(valid, percentile_high)
|
|
75
|
-
|
|
76
|
+
|
|
76
77
|
# Normalize
|
|
77
78
|
if high > low:
|
|
78
79
|
result = (data - low) / (high - low)
|
|
79
80
|
else:
|
|
80
81
|
result = np.zeros_like(data)
|
|
81
|
-
|
|
82
|
+
|
|
82
83
|
# Clip to 0-1
|
|
83
84
|
result = np.clip(result, 0, 1)
|
|
84
|
-
|
|
85
|
+
|
|
85
86
|
# Set NaN to 0 for display
|
|
86
87
|
result = np.nan_to_num(result, nan=0)
|
|
87
|
-
|
|
88
|
+
|
|
88
89
|
return result
|
|
89
90
|
|
|
90
91
|
|
|
@@ -95,81 +96,78 @@ def create_rgb_composite(
|
|
|
95
96
|
) -> dict:
|
|
96
97
|
"""
|
|
97
98
|
Create an RGB composite image from Sentinel-2 10m bands.
|
|
98
|
-
|
|
99
|
+
|
|
99
100
|
Uses B04 (Red), B03 (Green), B02 (Blue) bands.
|
|
100
|
-
|
|
101
|
+
|
|
101
102
|
Args:
|
|
102
103
|
zarr_url: URL to the SR_10m Zarr asset
|
|
103
104
|
output_path: Optional path to save the image
|
|
104
105
|
subset_size: Size to subset the image (for faster processing)
|
|
105
|
-
|
|
106
|
+
|
|
106
107
|
Returns:
|
|
107
108
|
Dictionary with path to saved image and metadata
|
|
108
109
|
"""
|
|
109
110
|
try:
|
|
111
|
+
import xarray as xr
|
|
110
112
|
import matplotlib.pyplot as plt
|
|
111
113
|
import zarr
|
|
112
|
-
|
|
114
|
+
|
|
113
115
|
# Open the Zarr store
|
|
114
116
|
# The SR_10m asset contains b02, b03, b04, b08
|
|
115
|
-
store = zarr.open(zarr_url, mode=
|
|
116
|
-
|
|
117
|
+
store = zarr.open(zarr_url, mode='r')
|
|
118
|
+
|
|
117
119
|
# Read the bands
|
|
118
120
|
# Note: Band names are lowercase in the Zarr structure
|
|
119
|
-
red = np.array(store[
|
|
120
|
-
green = np.array(store[
|
|
121
|
-
blue = np.array(store[
|
|
122
|
-
|
|
121
|
+
red = np.array(store['b04'])
|
|
122
|
+
green = np.array(store['b03'])
|
|
123
|
+
blue = np.array(store['b02'])
|
|
124
|
+
|
|
123
125
|
# Subset if requested (for faster processing)
|
|
124
126
|
if subset_size and red.shape[0] > subset_size:
|
|
125
127
|
# Take center subset
|
|
126
128
|
h, w = red.shape
|
|
127
129
|
start_h = (h - subset_size) // 2
|
|
128
130
|
start_w = (w - subset_size) // 2
|
|
129
|
-
red = red[start_h
|
|
130
|
-
green = green[
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
blue = blue[
|
|
134
|
-
start_h : start_h + subset_size, start_w : start_w + subset_size
|
|
135
|
-
]
|
|
136
|
-
|
|
131
|
+
red = red[start_h:start_h+subset_size, start_w:start_w+subset_size]
|
|
132
|
+
green = green[start_h:start_h+subset_size, start_w:start_w+subset_size]
|
|
133
|
+
blue = blue[start_h:start_h+subset_size, start_w:start_w+subset_size]
|
|
134
|
+
|
|
137
135
|
# Apply scale and offset
|
|
138
136
|
red = _apply_scale_offset(red)
|
|
139
137
|
green = _apply_scale_offset(green)
|
|
140
138
|
blue = _apply_scale_offset(blue)
|
|
141
|
-
|
|
139
|
+
|
|
142
140
|
# Normalize for display
|
|
143
141
|
red = _normalize_for_display(red)
|
|
144
142
|
green = _normalize_for_display(green)
|
|
145
143
|
blue = _normalize_for_display(blue)
|
|
146
|
-
|
|
144
|
+
|
|
147
145
|
# Stack into RGB
|
|
148
146
|
rgb = np.dstack([red, green, blue])
|
|
149
|
-
|
|
147
|
+
|
|
150
148
|
# Generate output path
|
|
151
149
|
if output_path is None:
|
|
152
150
|
output_path = OUTPUT_DIR / f"rgb_composite_{np.random.randint(10000)}.png"
|
|
153
151
|
else:
|
|
154
152
|
output_path = Path(output_path)
|
|
155
|
-
|
|
153
|
+
|
|
156
154
|
# Create figure
|
|
157
155
|
fig, ax = plt.subplots(figsize=(10, 10))
|
|
158
156
|
ax.imshow(rgb)
|
|
159
157
|
ax.set_title("Sentinel-2 RGB Composite (B4/B3/B2)")
|
|
160
|
-
ax.axis(
|
|
161
|
-
|
|
158
|
+
ax.axis('off')
|
|
159
|
+
|
|
162
160
|
# Save
|
|
163
|
-
plt.savefig(output_path, dpi=150, bbox_inches=
|
|
161
|
+
plt.savefig(output_path, dpi=150, bbox_inches='tight', pad_inches=0.1)
|
|
164
162
|
plt.close(fig)
|
|
165
|
-
|
|
163
|
+
|
|
166
164
|
return {
|
|
167
165
|
"success": True,
|
|
168
166
|
"output_path": str(output_path),
|
|
169
167
|
"image_size": rgb.shape[:2],
|
|
170
168
|
"bands_used": ["B04 (Red)", "B03 (Green)", "B02 (Blue)"],
|
|
171
169
|
}
|
|
172
|
-
|
|
170
|
+
|
|
173
171
|
except Exception as e:
|
|
174
172
|
return {
|
|
175
173
|
"success": False,
|
|
@@ -184,15 +182,15 @@ def calculate_ndvi(
|
|
|
184
182
|
) -> dict:
|
|
185
183
|
"""
|
|
186
184
|
Calculate NDVI (Normalized Difference Vegetation Index) from Sentinel-2 data.
|
|
187
|
-
|
|
185
|
+
|
|
188
186
|
NDVI = (NIR - Red) / (NIR + Red)
|
|
189
187
|
Uses B08 (NIR) and B04 (Red) bands.
|
|
190
|
-
|
|
188
|
+
|
|
191
189
|
Args:
|
|
192
190
|
zarr_url: URL to the SR_10m Zarr asset
|
|
193
191
|
output_path: Optional path to save the NDVI image
|
|
194
192
|
subset_size: Size to subset the image
|
|
195
|
-
|
|
193
|
+
|
|
196
194
|
Returns:
|
|
197
195
|
Dictionary with NDVI statistics and output path
|
|
198
196
|
"""
|
|
@@ -200,33 +198,33 @@ def calculate_ndvi(
|
|
|
200
198
|
import zarr
|
|
201
199
|
import matplotlib.pyplot as plt
|
|
202
200
|
from matplotlib.colors import LinearSegmentedColormap
|
|
203
|
-
|
|
201
|
+
|
|
204
202
|
# Open the Zarr store
|
|
205
|
-
store = zarr.open(zarr_url, mode=
|
|
206
|
-
|
|
203
|
+
store = zarr.open(zarr_url, mode='r')
|
|
204
|
+
|
|
207
205
|
# Read the bands
|
|
208
|
-
nir = np.array(store[
|
|
209
|
-
red = np.array(store[
|
|
210
|
-
|
|
206
|
+
nir = np.array(store['b08']) # NIR
|
|
207
|
+
red = np.array(store['b04']) # Red
|
|
208
|
+
|
|
211
209
|
# Subset if requested
|
|
212
210
|
if subset_size and nir.shape[0] > subset_size:
|
|
213
211
|
h, w = nir.shape
|
|
214
212
|
start_h = (h - subset_size) // 2
|
|
215
213
|
start_w = (w - subset_size) // 2
|
|
216
|
-
nir = nir[start_h
|
|
217
|
-
red = red[start_h
|
|
218
|
-
|
|
214
|
+
nir = nir[start_h:start_h+subset_size, start_w:start_w+subset_size]
|
|
215
|
+
red = red[start_h:start_h+subset_size, start_w:start_w+subset_size]
|
|
216
|
+
|
|
219
217
|
# Apply scale and offset
|
|
220
218
|
nir = _apply_scale_offset(nir)
|
|
221
219
|
red = _apply_scale_offset(red)
|
|
222
|
-
|
|
220
|
+
|
|
223
221
|
# Calculate NDVI
|
|
224
222
|
# Avoid division by zero
|
|
225
223
|
denominator = nir + red
|
|
226
224
|
denominator[denominator == 0] = np.nan
|
|
227
|
-
|
|
225
|
+
|
|
228
226
|
ndvi = (nir - red) / denominator
|
|
229
|
-
|
|
227
|
+
|
|
230
228
|
# NDVI statistics
|
|
231
229
|
valid_ndvi = ndvi[~np.isnan(ndvi)]
|
|
232
230
|
stats = {
|
|
@@ -235,38 +233,38 @@ def calculate_ndvi(
|
|
|
235
233
|
"mean": float(np.mean(valid_ndvi)) if len(valid_ndvi) > 0 else None,
|
|
236
234
|
"std": float(np.std(valid_ndvi)) if len(valid_ndvi) > 0 else None,
|
|
237
235
|
}
|
|
238
|
-
|
|
236
|
+
|
|
239
237
|
# Generate output path
|
|
240
238
|
if output_path is None:
|
|
241
239
|
output_path = OUTPUT_DIR / f"ndvi_{np.random.randint(10000)}.png"
|
|
242
240
|
else:
|
|
243
241
|
output_path = Path(output_path)
|
|
244
|
-
|
|
242
|
+
|
|
245
243
|
# Create NDVI colormap (brown -> yellow -> green)
|
|
246
|
-
colors = [
|
|
247
|
-
ndvi_cmap = LinearSegmentedColormap.from_list(
|
|
248
|
-
|
|
244
|
+
colors = ['#8B4513', '#D2691E', '#FFD700', '#ADFF2F', '#228B22', '#006400']
|
|
245
|
+
ndvi_cmap = LinearSegmentedColormap.from_list('ndvi', colors)
|
|
246
|
+
|
|
249
247
|
# Create figure
|
|
250
248
|
fig, ax = plt.subplots(figsize=(10, 10))
|
|
251
249
|
im = ax.imshow(ndvi, cmap=ndvi_cmap, vmin=-1, vmax=1)
|
|
252
250
|
ax.set_title("NDVI - Normalized Difference Vegetation Index")
|
|
253
|
-
ax.axis(
|
|
254
|
-
|
|
251
|
+
ax.axis('off')
|
|
252
|
+
|
|
255
253
|
# Add colorbar
|
|
256
254
|
cbar = plt.colorbar(im, ax=ax, shrink=0.8)
|
|
257
|
-
cbar.set_label(
|
|
258
|
-
|
|
255
|
+
cbar.set_label('NDVI')
|
|
256
|
+
|
|
259
257
|
# Save
|
|
260
|
-
plt.savefig(output_path, dpi=150, bbox_inches=
|
|
258
|
+
plt.savefig(output_path, dpi=150, bbox_inches='tight', pad_inches=0.1)
|
|
261
259
|
plt.close(fig)
|
|
262
|
-
|
|
260
|
+
|
|
263
261
|
return {
|
|
264
262
|
"success": True,
|
|
265
263
|
"output_path": str(output_path),
|
|
266
264
|
"statistics": stats,
|
|
267
265
|
"interpretation": _interpret_ndvi(stats["mean"]) if stats["mean"] else None,
|
|
268
266
|
}
|
|
269
|
-
|
|
267
|
+
|
|
270
268
|
except Exception as e:
|
|
271
269
|
return {
|
|
272
270
|
"success": False,
|
|
@@ -296,36 +294,36 @@ def get_band_statistics(
|
|
|
296
294
|
) -> dict:
|
|
297
295
|
"""
|
|
298
296
|
Get statistics for specified bands from a Sentinel-2 Zarr asset.
|
|
299
|
-
|
|
297
|
+
|
|
300
298
|
Args:
|
|
301
299
|
zarr_url: URL to the Zarr asset (e.g., SR_10m)
|
|
302
300
|
bands: List of band names (default: all available)
|
|
303
|
-
|
|
301
|
+
|
|
304
302
|
Returns:
|
|
305
303
|
Dictionary with statistics for each band
|
|
306
304
|
"""
|
|
307
305
|
try:
|
|
308
306
|
import zarr
|
|
309
|
-
|
|
310
|
-
store = zarr.open(zarr_url, mode=
|
|
311
|
-
|
|
307
|
+
|
|
308
|
+
store = zarr.open(zarr_url, mode='r')
|
|
309
|
+
|
|
312
310
|
# Get available bands if not specified
|
|
313
311
|
if bands is None:
|
|
314
|
-
bands = [key for key in store.keys() if key.startswith(
|
|
315
|
-
|
|
312
|
+
bands = [key for key in store.keys() if key.startswith('b')]
|
|
313
|
+
|
|
316
314
|
results = {}
|
|
317
|
-
|
|
315
|
+
|
|
318
316
|
for band in bands:
|
|
319
317
|
if band not in store:
|
|
320
318
|
results[band] = {"error": "Band not found"}
|
|
321
319
|
continue
|
|
322
|
-
|
|
320
|
+
|
|
323
321
|
data = np.array(store[band])
|
|
324
|
-
|
|
322
|
+
|
|
325
323
|
# Apply scale/offset
|
|
326
324
|
data = _apply_scale_offset(data)
|
|
327
325
|
valid = data[~np.isnan(data)]
|
|
328
|
-
|
|
326
|
+
|
|
329
327
|
if len(valid) > 0:
|
|
330
328
|
results[band] = {
|
|
331
329
|
"min": float(np.min(valid)),
|
|
@@ -336,12 +334,12 @@ def get_band_statistics(
|
|
|
336
334
|
}
|
|
337
335
|
else:
|
|
338
336
|
results[band] = {"error": "No valid data"}
|
|
339
|
-
|
|
337
|
+
|
|
340
338
|
return {
|
|
341
339
|
"success": True,
|
|
342
340
|
"band_statistics": results,
|
|
343
341
|
}
|
|
344
|
-
|
|
342
|
+
|
|
345
343
|
except Exception as e:
|
|
346
344
|
return {
|
|
347
345
|
"success": False,
|
geomind/tools/stac_search.py
CHANGED
|
@@ -24,7 +24,7 @@ def _get_stac_client() -> Client:
|
|
|
24
24
|
def _format_item(item) -> dict:
|
|
25
25
|
"""Format a STAC item into a simplified dictionary."""
|
|
26
26
|
props = item.properties
|
|
27
|
-
|
|
27
|
+
|
|
28
28
|
return {
|
|
29
29
|
"id": item.id,
|
|
30
30
|
"datetime": props.get("datetime"),
|
|
@@ -54,17 +54,17 @@ def search_imagery(
|
|
|
54
54
|
) -> dict:
|
|
55
55
|
"""
|
|
56
56
|
Search for Sentinel-2 L2A imagery in the EOPF STAC catalog.
|
|
57
|
-
|
|
57
|
+
|
|
58
58
|
Args:
|
|
59
59
|
bbox: Bounding box [min_lon, min_lat, max_lon, max_lat]
|
|
60
60
|
start_date: Start date in YYYY-MM-DD format
|
|
61
61
|
end_date: End date in YYYY-MM-DD format
|
|
62
62
|
max_cloud_cover: Maximum cloud cover percentage (0-100)
|
|
63
63
|
max_items: Maximum number of items to return
|
|
64
|
-
|
|
64
|
+
|
|
65
65
|
Returns:
|
|
66
66
|
Dictionary with search results including items found
|
|
67
|
-
|
|
67
|
+
|
|
68
68
|
Example:
|
|
69
69
|
>>> search_imagery(
|
|
70
70
|
... bbox=[-74.0, 40.7, -73.9, 40.8],
|
|
@@ -77,48 +77,48 @@ def search_imagery(
|
|
|
77
77
|
max_cloud_cover = DEFAULT_MAX_CLOUD_COVER
|
|
78
78
|
if max_items is None:
|
|
79
79
|
max_items = DEFAULT_MAX_ITEMS
|
|
80
|
-
|
|
80
|
+
|
|
81
81
|
# Build datetime string
|
|
82
82
|
datetime_str = None
|
|
83
83
|
if start_date or end_date:
|
|
84
84
|
start = start_date or "2015-01-01"
|
|
85
85
|
end = end_date or datetime.now().strftime("%Y-%m-%d")
|
|
86
86
|
datetime_str = f"{start}/{end}"
|
|
87
|
-
|
|
87
|
+
|
|
88
88
|
try:
|
|
89
89
|
client = _get_stac_client()
|
|
90
|
-
|
|
90
|
+
|
|
91
91
|
# Build search parameters
|
|
92
92
|
search_params = {
|
|
93
93
|
"collections": [STAC_COLLECTION],
|
|
94
94
|
"max_items": max_items,
|
|
95
95
|
}
|
|
96
|
-
|
|
96
|
+
|
|
97
97
|
if bbox:
|
|
98
98
|
search_params["bbox"] = bbox
|
|
99
|
-
|
|
99
|
+
|
|
100
100
|
if datetime_str:
|
|
101
101
|
search_params["datetime"] = datetime_str
|
|
102
|
-
|
|
102
|
+
|
|
103
103
|
# Execute search
|
|
104
104
|
search = client.search(**search_params)
|
|
105
105
|
items = list(search.items())
|
|
106
|
-
|
|
106
|
+
|
|
107
107
|
# Filter by cloud cover (post-filter since API may not support query param)
|
|
108
108
|
filtered_items = [
|
|
109
|
-
item
|
|
110
|
-
for item in items
|
|
109
|
+
item for item in items
|
|
111
110
|
if item.properties.get("eo:cloud_cover", 100) <= max_cloud_cover
|
|
112
111
|
]
|
|
113
|
-
|
|
112
|
+
|
|
114
113
|
# Sort by date (newest first)
|
|
115
114
|
filtered_items.sort(
|
|
116
|
-
key=lambda x: x.properties.get("datetime", ""),
|
|
115
|
+
key=lambda x: x.properties.get("datetime", ""),
|
|
116
|
+
reverse=True
|
|
117
117
|
)
|
|
118
|
-
|
|
118
|
+
|
|
119
119
|
# Format results
|
|
120
120
|
formatted_items = [_format_item(item) for item in filtered_items]
|
|
121
|
-
|
|
121
|
+
|
|
122
122
|
return {
|
|
123
123
|
"success": True,
|
|
124
124
|
"total_found": len(items),
|
|
@@ -130,7 +130,7 @@ def search_imagery(
|
|
|
130
130
|
"max_cloud_cover": max_cloud_cover,
|
|
131
131
|
},
|
|
132
132
|
}
|
|
133
|
-
|
|
133
|
+
|
|
134
134
|
except Exception as e:
|
|
135
135
|
return {
|
|
136
136
|
"success": False,
|
|
@@ -142,28 +142,30 @@ def search_imagery(
|
|
|
142
142
|
def get_item_details(item_id: str) -> dict:
|
|
143
143
|
"""
|
|
144
144
|
Get detailed information about a specific STAC item.
|
|
145
|
-
|
|
145
|
+
|
|
146
146
|
Args:
|
|
147
147
|
item_id: The STAC item ID (e.g., "S2B_MSIL2A_20251218T110359_...")
|
|
148
|
-
|
|
148
|
+
|
|
149
149
|
Returns:
|
|
150
150
|
Dictionary with full item details including all assets
|
|
151
151
|
"""
|
|
152
152
|
try:
|
|
153
|
+
client = _get_stac_client()
|
|
154
|
+
collection = client.get_collection(STAC_COLLECTION)
|
|
155
|
+
|
|
153
156
|
# Get the item
|
|
154
157
|
item_url = f"{STAC_API_URL}/collections/{STAC_COLLECTION}/items/{item_id}"
|
|
155
|
-
|
|
158
|
+
|
|
156
159
|
import requests
|
|
157
|
-
|
|
158
160
|
response = requests.get(item_url)
|
|
159
161
|
response.raise_for_status()
|
|
160
162
|
item_data = response.json()
|
|
161
|
-
|
|
163
|
+
|
|
162
164
|
return {
|
|
163
165
|
"success": True,
|
|
164
166
|
"item": item_data,
|
|
165
167
|
}
|
|
166
|
-
|
|
168
|
+
|
|
167
169
|
except Exception as e:
|
|
168
170
|
return {
|
|
169
171
|
"success": False,
|
|
@@ -179,28 +181,28 @@ def list_recent_imagery(
|
|
|
179
181
|
) -> dict:
|
|
180
182
|
"""
|
|
181
183
|
List recent Sentinel-2 imagery, optionally for a specific location.
|
|
182
|
-
|
|
184
|
+
|
|
183
185
|
This is a convenience function that combines geocoding and search.
|
|
184
|
-
|
|
186
|
+
|
|
185
187
|
Args:
|
|
186
188
|
location_name: Optional place name to search around
|
|
187
189
|
days: Number of days to look back (default: 7)
|
|
188
190
|
max_cloud_cover: Maximum cloud cover percentage
|
|
189
191
|
max_items: Maximum items to return
|
|
190
|
-
|
|
192
|
+
|
|
191
193
|
Returns:
|
|
192
194
|
Dictionary with recent imagery items
|
|
193
195
|
"""
|
|
194
196
|
from .geocoding import get_bbox_from_location
|
|
195
|
-
|
|
197
|
+
|
|
196
198
|
# Calculate date range
|
|
197
199
|
end_date = datetime.now()
|
|
198
200
|
start_date = end_date - timedelta(days=days)
|
|
199
|
-
|
|
201
|
+
|
|
200
202
|
# Get bbox if location provided
|
|
201
203
|
bbox = None
|
|
202
204
|
location_info = None
|
|
203
|
-
|
|
205
|
+
|
|
204
206
|
if location_name:
|
|
205
207
|
bbox_result = get_bbox_from_location(location_name)
|
|
206
208
|
if bbox_result["success"]:
|
|
@@ -215,7 +217,7 @@ def list_recent_imagery(
|
|
|
215
217
|
"success": False,
|
|
216
218
|
"error": f"Could not geocode location: {location_name}",
|
|
217
219
|
}
|
|
218
|
-
|
|
220
|
+
|
|
219
221
|
# Search for imagery
|
|
220
222
|
result = search_imagery(
|
|
221
223
|
bbox=bbox,
|
|
@@ -224,8 +226,8 @@ def list_recent_imagery(
|
|
|
224
226
|
max_cloud_cover=max_cloud_cover,
|
|
225
227
|
max_items=max_items,
|
|
226
228
|
)
|
|
227
|
-
|
|
229
|
+
|
|
228
230
|
if location_info:
|
|
229
231
|
result["location"] = location_info
|
|
230
|
-
|
|
232
|
+
|
|
231
233
|
return result
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: geomind-ai
|
|
3
|
-
Version: 1.0.
|
|
4
|
-
Summary: AI agent for geospatial analysis
|
|
3
|
+
Version: 1.0.1
|
|
4
|
+
Summary: AI agent for geospatial analysis with Sentinel-2 satellite imagery
|
|
5
5
|
Author: Harsh Shinde
|
|
6
6
|
License-Expression: MIT
|
|
7
|
-
Project-URL: Homepage, https://
|
|
7
|
+
Project-URL: Homepage, https://github.com/HarshShinde0/GeoMind
|
|
8
8
|
Project-URL: Repository, https://github.com/HarshShinde0/GeoMind
|
|
9
9
|
Project-URL: Documentation, https://github.com/HarshShinde0/GeoMind#readme
|
|
10
10
|
Project-URL: Issues, https://github.com/HarshShinde0/GeoMind/issues
|
|
@@ -36,27 +36,20 @@ Requires-Dist: numpy>=1.26.0
|
|
|
36
36
|
Requires-Dist: python-dotenv>=1.0.0
|
|
37
37
|
Dynamic: license-file
|
|
38
38
|
|
|
39
|
-
###
|
|
39
|
+
### Install Dependencies
|
|
40
40
|
|
|
41
41
|
```bash
|
|
42
42
|
pip install -r requirements.txt
|
|
43
43
|
```
|
|
44
44
|
|
|
45
|
-
###
|
|
46
|
-
|
|
47
|
-
Set your HuggingFace API key in the environment or update `config.py`:
|
|
48
|
-
|
|
49
|
-
```python
|
|
50
|
-
# In geomind/config.py
|
|
51
|
-
HF_API_KEY = "your_huggingface_api_key"
|
|
52
|
-
```
|
|
53
|
-
|
|
54
|
-
Get a free API key from [HuggingFace](https://huggingface.co/settings/tokens).
|
|
55
|
-
|
|
56
|
-
### 3. Run the Agent
|
|
45
|
+
### Run the Agent
|
|
57
46
|
|
|
58
47
|
```bash
|
|
59
|
-
|
|
48
|
+
# Interactive mode
|
|
49
|
+
geomind
|
|
50
|
+
|
|
51
|
+
# Single query
|
|
52
|
+
geomind --query "Find recent imagery of Paris"
|
|
60
53
|
```
|
|
61
54
|
|
|
62
55
|
## Example Queries
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
geomind/__init__.py,sha256=jgrcaJGNFjwFADhRcgEfc7AUX1HkhTbJKqwjjxRUJH0,174
|
|
2
|
+
geomind/agent.py,sha256=bp-nQa2go41vsI1vQKHQ9X6h08UDMBh6XEUVmQV3u5U,15696
|
|
3
|
+
geomind/cli.py,sha256=gBNwUXFXJyWX5z3kIC6YFBoUPo7iQCp-LXnVnQZFU5k,3393
|
|
4
|
+
geomind/config.py,sha256=E97f1yooN2NqUlREPhO3RKFQXwnvLUrTAdxuqjH6RCk,2066
|
|
5
|
+
geomind/tools/__init__.py,sha256=C_7XGAMNkoapEpQ-5wkrMlskb_Nhew-VUjVZaYQo_Ks,747
|
|
6
|
+
geomind/tools/geocoding.py,sha256=1cbEdLug4E4eoqpeLUnZhlgAnGGxEpBl3sIJXbtqFtE,3217
|
|
7
|
+
geomind/tools/processing.py,sha256=t-TSdnaQ_Za9_nqwbqMdqm_10gGDc8_4GpGWxIRyUv8,10615
|
|
8
|
+
geomind/tools/stac_search.py,sha256=dOHdUiK5SAOD-rFjARNzNPEYJBtM3OR4M7z2HjL_5xo,6884
|
|
9
|
+
geomind_ai-1.0.1.dist-info/licenses/LICENSE,sha256=wVJsn_q_iEJVaeb9LpHj1v5Ljg14FkTR-AhRPQqwj_g,1090
|
|
10
|
+
geomind_ai-1.0.1.dist-info/METADATA,sha256=35300Ii3nfZr9Txv_oTQ0L2S1-jOJo56NDA0CheCXio,2311
|
|
11
|
+
geomind_ai-1.0.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
12
|
+
geomind_ai-1.0.1.dist-info/entry_points.txt,sha256=2nPR3faYKl0-1epccvzMJ2xdi-Q1Vt7aOSvA84oIWnw,45
|
|
13
|
+
geomind_ai-1.0.1.dist-info/top_level.txt,sha256=rjKWNSNRhq4R9xJoZGsG-eAaH7BmTVNvfrrbcaJMIIs,8
|
|
14
|
+
geomind_ai-1.0.1.dist-info/RECORD,,
|
|
@@ -1,14 +0,0 @@
|
|
|
1
|
-
geomind/__init__.py,sha256=ID2iX-nI4mf0RqErFeJm2buXYowD1EMDW-zk2lxNBA0,162
|
|
2
|
-
geomind/agent.py,sha256=Go4ilgUXYLSWMNxAe_zYT-hB6lMJWkD2LUsPGrR-y5I,15816
|
|
3
|
-
geomind/cli.py,sha256=Xu83Mn3rT4CpeHYCKPts3Kl-Y7XDLXVmjH_7hZd8QaY,3408
|
|
4
|
-
geomind/config.py,sha256=E97f1yooN2NqUlREPhO3RKFQXwnvLUrTAdxuqjH6RCk,2066
|
|
5
|
-
geomind/tools/__init__.py,sha256=C_7XGAMNkoapEpQ-5wkrMlskb_Nhew-VUjVZaYQo_Ks,747
|
|
6
|
-
geomind/tools/geocoding.py,sha256=0tHschi3jSwyWPzkxNtrw6ziZwUxMtiElElsnRVcsYQ,3115
|
|
7
|
-
geomind/tools/processing.py,sha256=3-sydyOfvjkz_-B9cEATAqcSEv4EXVR3bIA42H-eIv0,10275
|
|
8
|
-
geomind/tools/stac_search.py,sha256=KpznD91Z6WlhC3JAjl0i1IQ3AnF-wWoiyvymy8xRKJw,6586
|
|
9
|
-
geomind_ai-1.0.0.dist-info/licenses/LICENSE,sha256=wVJsn_q_iEJVaeb9LpHj1v5Ljg14FkTR-AhRPQqwj_g,1090
|
|
10
|
-
geomind_ai-1.0.0.dist-info/METADATA,sha256=3mHcocB4-vFl2HHZa8ptX8mVGxtoJ_zRcuvFR8jPdvU,2466
|
|
11
|
-
geomind_ai-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
12
|
-
geomind_ai-1.0.0.dist-info/entry_points.txt,sha256=2nPR3faYKl0-1epccvzMJ2xdi-Q1Vt7aOSvA84oIWnw,45
|
|
13
|
-
geomind_ai-1.0.0.dist-info/top_level.txt,sha256=rjKWNSNRhq4R9xJoZGsG-eAaH7BmTVNvfrrbcaJMIIs,8
|
|
14
|
-
geomind_ai-1.0.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|