geoai-py 0.13.0__py2.py3-none-any.whl → 0.13.2__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- geoai/__init__.py +1 -1
- geoai/agents/geo_agents.py +35 -20
- geoai/agents/map_tools.py +1 -1
- geoai/train.py +118 -8
- geoai/utils.py +15 -1
- {geoai_py-0.13.0.dist-info → geoai_py-0.13.2.dist-info}/METADATA +1 -1
- {geoai_py-0.13.0.dist-info → geoai_py-0.13.2.dist-info}/RECORD +11 -11
- {geoai_py-0.13.0.dist-info → geoai_py-0.13.2.dist-info}/WHEEL +0 -0
- {geoai_py-0.13.0.dist-info → geoai_py-0.13.2.dist-info}/entry_points.txt +0 -0
- {geoai_py-0.13.0.dist-info → geoai_py-0.13.2.dist-info}/licenses/LICENSE +0 -0
- {geoai_py-0.13.0.dist-info → geoai_py-0.13.2.dist-info}/top_level.txt +0 -0
geoai/__init__.py
CHANGED
geoai/agents/geo_agents.py
CHANGED
@@ -190,6 +190,8 @@ class GeoAgent(Agent):
|
|
190
190
|
*,
|
191
191
|
model: str = "llama3.1",
|
192
192
|
map_instance: Optional[leafmap.Map] = None,
|
193
|
+
system_prompt: str = "default",
|
194
|
+
model_args: dict = None,
|
193
195
|
**kwargs: Any,
|
194
196
|
) -> None:
|
195
197
|
"""Initialize the GeoAgent.
|
@@ -197,21 +199,25 @@ class GeoAgent(Agent):
|
|
197
199
|
Args:
|
198
200
|
model: Model identifier (default: "llama3.1").
|
199
201
|
map_instance: Optional existing map instance.
|
202
|
+
model_args: Additional keyword arguments for the model.
|
200
203
|
**kwargs: Additional keyword arguments for the model.
|
201
204
|
"""
|
202
205
|
self.session: MapSession = MapSession(map_instance)
|
203
206
|
self.tools: MapTools = MapTools(self.session)
|
204
207
|
|
208
|
+
if model_args is None:
|
209
|
+
model_args = {}
|
210
|
+
|
205
211
|
# --- save a model factory we can call each turn ---
|
206
212
|
if model == "llama3.1":
|
207
213
|
self._model_factory: Callable[[], OllamaModel] = (
|
208
214
|
lambda: create_ollama_model(
|
209
|
-
host="http://localhost:11434", model_id=model, **
|
215
|
+
host="http://localhost:11434", model_id=model, **model_args
|
210
216
|
)
|
211
217
|
)
|
212
218
|
elif isinstance(model, str):
|
213
219
|
self._model_factory: Callable[[], BedrockModel] = (
|
214
|
-
lambda: create_bedrock_model(model_id=model, **
|
220
|
+
lambda: create_bedrock_model(model_id=model, **model_args)
|
215
221
|
)
|
216
222
|
elif isinstance(model, OllamaModel):
|
217
223
|
# Extract configuration from existing OllamaModel and create new instances
|
@@ -220,7 +226,7 @@ class GeoAgent(Agent):
|
|
220
226
|
client_args = model.client_args
|
221
227
|
self._model_factory: Callable[[], OllamaModel] = (
|
222
228
|
lambda: create_ollama_model(
|
223
|
-
host=host, model_id=model_id, client_args=client_args, **
|
229
|
+
host=host, model_id=model_id, client_args=client_args, **model_args
|
224
230
|
)
|
225
231
|
)
|
226
232
|
elif isinstance(model, OpenAIModel):
|
@@ -229,7 +235,7 @@ class GeoAgent(Agent):
|
|
229
235
|
client_args = model.client_args.copy()
|
230
236
|
self._model_factory: Callable[[], OpenAIModel] = (
|
231
237
|
lambda mid=model_id, client_args=client_args: create_openai_model(
|
232
|
-
model_id=mid, client_args=client_args, **
|
238
|
+
model_id=mid, client_args=client_args, **model_args
|
233
239
|
)
|
234
240
|
)
|
235
241
|
elif isinstance(model, AnthropicModel):
|
@@ -238,7 +244,7 @@ class GeoAgent(Agent):
|
|
238
244
|
client_args = model.client_args.copy()
|
239
245
|
self._model_factory: Callable[[], AnthropicModel] = (
|
240
246
|
lambda mid=model_id, client_args=client_args: create_anthropic_model(
|
241
|
-
model_id=mid, client_args=client_args, **
|
247
|
+
model_id=mid, client_args=client_args, **model_args
|
242
248
|
)
|
243
249
|
)
|
244
250
|
else:
|
@@ -247,6 +253,28 @@ class GeoAgent(Agent):
|
|
247
253
|
# build initial model (first turn)
|
248
254
|
model = self._model_factory()
|
249
255
|
|
256
|
+
if system_prompt == "default":
|
257
|
+
system_prompt = """
|
258
|
+
You are a map control agent. Call tools with MINIMAL parameters only.
|
259
|
+
|
260
|
+
CRITICAL: Treat all kwargs parameters as optional parameters.
|
261
|
+
CRITICAL: NEVER include optional parameters unless user explicitly asks for them.
|
262
|
+
|
263
|
+
TOOL CALL RULES:
|
264
|
+
- zoom_to(zoom=N) - ONLY zoom parameter, OMIT options completely
|
265
|
+
- add_cog_layer(url='X') - NEVER include bands, nodata, opacity, etc.
|
266
|
+
- fly_to(longitude=N, latitude=N) - NEVER include zoom parameter
|
267
|
+
- add_basemap(name='X') - NEVER include any other parameters
|
268
|
+
- add_marker(lng_lat=[lon,lat]) - NEVER include popup or options
|
269
|
+
|
270
|
+
- remove_layer(name='X') - call get_layer_names() to get the layer name closest to
|
271
|
+
the name of the layer you want to remove before calling this tool
|
272
|
+
|
273
|
+
- add_overture_3d_buildings(kwargs={}) - kwargs parameter required by tool validation
|
274
|
+
FORBIDDEN: Optional parameters, string representations like '{}' or '[1,2,3]'
|
275
|
+
REQUIRED: Minimal tool calls with only what's absolutely necessary
|
276
|
+
"""
|
277
|
+
|
250
278
|
super().__init__(
|
251
279
|
name="Leafmap Visualization Agent",
|
252
280
|
model=model,
|
@@ -276,20 +304,7 @@ class GeoAgent(Agent):
|
|
276
304
|
self.tools.add_marker,
|
277
305
|
self.tools.set_pitch,
|
278
306
|
],
|
279
|
-
system_prompt=
|
280
|
-
+ "CRITICAL: Treat all kwargs parameters as optional parameters.\n"
|
281
|
-
+ "CRITICAL: NEVER include optional parameters unless user explicitly asks for them.\n\n"
|
282
|
-
+ "TOOL CALL RULES:\n"
|
283
|
-
+ "- zoom_to(zoom=N) - ONLY zoom parameter, OMIT options completely\n"
|
284
|
-
+ "- add_cog_layer(url='X') - NEVER include bands, nodata, opacity, etc.\n"
|
285
|
-
+ "- fly_to(longitude=N, latitude=N) - NEVER include zoom parameter\n"
|
286
|
-
+ "- add_basemap(name='X') - NEVER include any other parameters\n"
|
287
|
-
+ "- add_marker(lng_lat=[lon,lat]) - NEVER include popup or options\n\n"
|
288
|
-
+ "- remove_layer(name='X') - call get_layer_names() to get the layer name closest to"
|
289
|
-
+ "the name of the layer you want to remove before calling this tool\n\n"
|
290
|
-
+ "- add_overture_3d_buildings(kwargs={}) - kwargs parameter required by tool validation\n"
|
291
|
-
+ "FORBIDDEN: Optional parameters, string representations like '{}' or '[1,2,3]'\n"
|
292
|
-
+ "REQUIRED: Minimal tool calls with only what's absolutely necessary",
|
307
|
+
system_prompt=system_prompt,
|
293
308
|
callback_handler=None,
|
294
309
|
)
|
295
310
|
|
@@ -389,7 +404,7 @@ class GeoAgent(Agent):
|
|
389
404
|
),
|
390
405
|
(
|
391
406
|
"Add GeoJSON",
|
392
|
-
"Add
|
407
|
+
"Add GeoJSON layer: https://github.com/opengeos/datasets/releases/download/us/us_states.geojson",
|
393
408
|
),
|
394
409
|
("Remove layer", "Remove layer OpenTopoMap"),
|
395
410
|
("Save map", "Save the map as demo.html and return the path"),
|
geoai/agents/map_tools.py
CHANGED
@@ -115,7 +115,7 @@ class MapTools:
|
|
115
115
|
visible: bool = True,
|
116
116
|
bands: Optional[List[int]] = None,
|
117
117
|
nodata: Optional[Union[int, float]] = 0,
|
118
|
-
titiler_endpoint: str =
|
118
|
+
titiler_endpoint: str = None,
|
119
119
|
) -> str:
|
120
120
|
"""Add a Cloud Optimized GeoTIFF (COG) layer to the map.
|
121
121
|
|
geoai/train.py
CHANGED
@@ -2625,6 +2625,8 @@ def semantic_inference_on_geotiff(
|
|
2625
2625
|
num_channels: int = 3,
|
2626
2626
|
num_classes: int = 2,
|
2627
2627
|
device: Optional[torch.device] = None,
|
2628
|
+
probability_path: Optional[str] = None,
|
2629
|
+
probability_threshold: Optional[float] = None,
|
2628
2630
|
quiet: bool = False,
|
2629
2631
|
**kwargs: Any,
|
2630
2632
|
) -> Tuple[str, float]:
|
@@ -2641,6 +2643,11 @@ def semantic_inference_on_geotiff(
|
|
2641
2643
|
num_channels (int): Number of channels to use from the input image.
|
2642
2644
|
num_classes (int): Number of classes in the model output.
|
2643
2645
|
device (torch.device, optional): Device to run inference on.
|
2646
|
+
probability_path (str, optional): Path to save probability map. If provided,
|
2647
|
+
the normalized class probabilities will be saved as a multi-band raster.
|
2648
|
+
probability_threshold (float, optional): Probability threshold for binary classification.
|
2649
|
+
Only used when num_classes=2. If provided, pixels with class 1 probability >= threshold
|
2650
|
+
are classified as class 1, otherwise class 0. If None (default), uses argmax.
|
2644
2651
|
quiet (bool): If True, suppress progress bar. Defaults to False.
|
2645
2652
|
**kwargs: Additional arguments.
|
2646
2653
|
|
@@ -2811,10 +2818,19 @@ def semantic_inference_on_geotiff(
|
|
2811
2818
|
/ count_accumulator[valid_pixels]
|
2812
2819
|
)
|
2813
2820
|
|
2814
|
-
#
|
2815
|
-
|
2816
|
-
|
2817
|
-
|
2821
|
+
# Apply threshold for binary classification or use argmax
|
2822
|
+
if probability_threshold is not None and num_classes == 2:
|
2823
|
+
# Use threshold: classify as class 1 if probability >= threshold
|
2824
|
+
mask[valid_pixels] = (
|
2825
|
+
normalized_probs[1, valid_pixels] >= probability_threshold
|
2826
|
+
).astype(np.uint8)
|
2827
|
+
if not quiet:
|
2828
|
+
print(f"Using probability threshold: {probability_threshold}")
|
2829
|
+
else:
|
2830
|
+
# Take argmax to get final class predictions
|
2831
|
+
mask[valid_pixels] = np.argmax(
|
2832
|
+
normalized_probs[:, valid_pixels], axis=0
|
2833
|
+
).astype(np.uint8)
|
2818
2834
|
|
2819
2835
|
# Check class distribution in predictions (summary only)
|
2820
2836
|
unique_classes, class_counts = np.unique(
|
@@ -2839,6 +2855,29 @@ def semantic_inference_on_geotiff(
|
|
2839
2855
|
if not quiet:
|
2840
2856
|
print(f"Saved prediction to {output_path}")
|
2841
2857
|
|
2858
|
+
# Save probability map if requested
|
2859
|
+
if probability_path is not None:
|
2860
|
+
prob_dir = os.path.abspath(os.path.dirname(probability_path))
|
2861
|
+
os.makedirs(prob_dir, exist_ok=True)
|
2862
|
+
|
2863
|
+
# Prepare probability output metadata
|
2864
|
+
prob_meta = meta.copy()
|
2865
|
+
prob_meta.update({"count": num_classes, "dtype": "float32"})
|
2866
|
+
|
2867
|
+
# Save normalized probabilities
|
2868
|
+
with rasterio.open(probability_path, "w", **prob_meta) as dst:
|
2869
|
+
for class_idx in range(num_classes):
|
2870
|
+
# Normalize probabilities
|
2871
|
+
prob_band = np.zeros((height, width), dtype=np.float32)
|
2872
|
+
prob_band[valid_pixels] = (
|
2873
|
+
prob_accumulator[class_idx, valid_pixels]
|
2874
|
+
/ count_accumulator[valid_pixels]
|
2875
|
+
)
|
2876
|
+
dst.write(prob_band, class_idx + 1)
|
2877
|
+
|
2878
|
+
if not quiet:
|
2879
|
+
print(f"Saved probability map to {probability_path}")
|
2880
|
+
|
2842
2881
|
return output_path, inference_time
|
2843
2882
|
|
2844
2883
|
|
@@ -2853,6 +2892,8 @@ def semantic_inference_on_image(
|
|
2853
2892
|
num_classes: int = 2,
|
2854
2893
|
device: Optional[torch.device] = None,
|
2855
2894
|
binary_output: bool = True,
|
2895
|
+
probability_path: Optional[str] = None,
|
2896
|
+
probability_threshold: Optional[float] = None,
|
2856
2897
|
quiet: bool = False,
|
2857
2898
|
**kwargs: Any,
|
2858
2899
|
) -> Tuple[str, float]:
|
@@ -2870,6 +2911,11 @@ def semantic_inference_on_image(
|
|
2870
2911
|
num_classes (int): Number of classes in the model output.
|
2871
2912
|
device (torch.device, optional): Device to run inference on.
|
2872
2913
|
binary_output (bool): If True, convert multi-class output to binary (class > 0).
|
2914
|
+
probability_path (str, optional): Path to save probability map. If provided,
|
2915
|
+
the normalized class probabilities will be saved as a multi-band raster.
|
2916
|
+
probability_threshold (float, optional): Probability threshold for binary classification.
|
2917
|
+
Only used when num_classes=2. If provided, pixels with class 1 probability >= threshold
|
2918
|
+
are classified as class 1, otherwise class 0. If None (default), uses argmax.
|
2873
2919
|
quiet (bool): If True, suppress progress bar. Defaults to False.
|
2874
2920
|
**kwargs: Additional arguments.
|
2875
2921
|
|
@@ -3056,10 +3102,19 @@ def semantic_inference_on_image(
|
|
3056
3102
|
/ count_accumulator[valid_pixels]
|
3057
3103
|
)
|
3058
3104
|
|
3059
|
-
#
|
3060
|
-
|
3061
|
-
|
3062
|
-
|
3105
|
+
# Apply threshold for binary classification or use argmax
|
3106
|
+
if probability_threshold is not None and num_classes == 2:
|
3107
|
+
# Use threshold: classify as class 1 if probability >= threshold
|
3108
|
+
mask[valid_pixels] = (
|
3109
|
+
normalized_probs[1, valid_pixels] >= probability_threshold
|
3110
|
+
).astype(np.uint8)
|
3111
|
+
if not quiet:
|
3112
|
+
print(f"Using probability threshold: {probability_threshold}")
|
3113
|
+
else:
|
3114
|
+
# Take argmax to get final class predictions
|
3115
|
+
mask[valid_pixels] = np.argmax(
|
3116
|
+
normalized_probs[:, valid_pixels], axis=0
|
3117
|
+
).astype(np.uint8)
|
3063
3118
|
|
3064
3119
|
# Check class distribution in predictions before binary conversion
|
3065
3120
|
unique_classes, class_counts = np.unique(mask, return_counts=True)
|
@@ -3116,6 +3171,40 @@ def semantic_inference_on_image(
|
|
3116
3171
|
if not quiet:
|
3117
3172
|
print(f"Saved prediction to {output_path}")
|
3118
3173
|
|
3174
|
+
# Save probability map if requested
|
3175
|
+
if probability_path is not None:
|
3176
|
+
prob_dir = os.path.abspath(os.path.dirname(probability_path))
|
3177
|
+
os.makedirs(prob_dir, exist_ok=True)
|
3178
|
+
|
3179
|
+
# For regular images, we'll save as a multi-channel TIFF
|
3180
|
+
# since we need to preserve floating point values
|
3181
|
+
import rasterio
|
3182
|
+
from rasterio.transform import from_bounds
|
3183
|
+
|
3184
|
+
# Create a simple affine transform (identity transform for pixel coordinates)
|
3185
|
+
transform = from_bounds(0, 0, width, height, width, height)
|
3186
|
+
|
3187
|
+
# Prepare probability output metadata
|
3188
|
+
prob_meta = {
|
3189
|
+
"driver": "GTiff",
|
3190
|
+
"height": height,
|
3191
|
+
"width": width,
|
3192
|
+
"count": num_classes,
|
3193
|
+
"dtype": "float32",
|
3194
|
+
"transform": transform,
|
3195
|
+
}
|
3196
|
+
|
3197
|
+
# Save normalized probabilities
|
3198
|
+
with rasterio.open(probability_path, "w", **prob_meta) as dst:
|
3199
|
+
for class_idx in range(num_classes):
|
3200
|
+
# Normalize probabilities
|
3201
|
+
prob_band = np.zeros((height, width), dtype=np.float32)
|
3202
|
+
prob_band[valid_pixels] = normalized_probs[class_idx, valid_pixels]
|
3203
|
+
dst.write(prob_band, class_idx + 1)
|
3204
|
+
|
3205
|
+
if not quiet:
|
3206
|
+
print(f"Saved probability map to {probability_path}")
|
3207
|
+
|
3119
3208
|
return output_path, inference_time
|
3120
3209
|
|
3121
3210
|
|
@@ -3131,6 +3220,8 @@ def semantic_segmentation(
|
|
3131
3220
|
overlap: int = 256,
|
3132
3221
|
batch_size: int = 4,
|
3133
3222
|
device: Optional[torch.device] = None,
|
3223
|
+
probability_path: Optional[str] = None,
|
3224
|
+
probability_threshold: Optional[float] = None,
|
3134
3225
|
quiet: bool = False,
|
3135
3226
|
**kwargs: Any,
|
3136
3227
|
) -> None:
|
@@ -3152,6 +3243,12 @@ def semantic_segmentation(
|
|
3152
3243
|
overlap (int): Overlap between adjacent windows.
|
3153
3244
|
batch_size (int): Batch size for inference.
|
3154
3245
|
device (torch.device, optional): Device to run inference on.
|
3246
|
+
probability_path (str, optional): Path to save probability map. If provided,
|
3247
|
+
the normalized class probabilities will be saved as a multi-band raster.
|
3248
|
+
probability_threshold (float, optional): Probability threshold for binary classification.
|
3249
|
+
Only used when num_classes=2. If provided, pixels with class 1 probability >= threshold
|
3250
|
+
are classified as class 1, otherwise class 0. If None (default), uses argmax.
|
3251
|
+
Must be between 0 and 1.
|
3155
3252
|
quiet (bool): If True, suppress progress bar. Defaults to False.
|
3156
3253
|
**kwargs: Additional arguments.
|
3157
3254
|
|
@@ -3205,6 +3302,15 @@ def semantic_segmentation(
|
|
3205
3302
|
model.to(device)
|
3206
3303
|
model.eval()
|
3207
3304
|
|
3305
|
+
# Validate probability_threshold
|
3306
|
+
if probability_threshold is not None:
|
3307
|
+
if not (0 <= probability_threshold <= 1):
|
3308
|
+
raise ValueError("probability_threshold must be between 0 and 1")
|
3309
|
+
if num_classes != 2:
|
3310
|
+
raise ValueError(
|
3311
|
+
"probability_threshold is only supported for binary classification (num_classes=2)"
|
3312
|
+
)
|
3313
|
+
|
3208
3314
|
# Use appropriate inference function based on file format
|
3209
3315
|
if is_geotiff:
|
3210
3316
|
semantic_inference_on_geotiff(
|
@@ -3217,6 +3323,8 @@ def semantic_segmentation(
|
|
3217
3323
|
num_channels=num_channels,
|
3218
3324
|
num_classes=num_classes,
|
3219
3325
|
device=device,
|
3326
|
+
probability_path=probability_path,
|
3327
|
+
probability_threshold=probability_threshold,
|
3220
3328
|
quiet=quiet,
|
3221
3329
|
**kwargs,
|
3222
3330
|
)
|
@@ -3235,6 +3343,8 @@ def semantic_segmentation(
|
|
3235
3343
|
num_classes=num_classes,
|
3236
3344
|
device=device,
|
3237
3345
|
binary_output=True, # Convert to binary output for better visualization
|
3346
|
+
probability_path=probability_path,
|
3347
|
+
probability_threshold=probability_threshold,
|
3238
3348
|
quiet=quiet,
|
3239
3349
|
**kwargs,
|
3240
3350
|
)
|
geoai/utils.py
CHANGED
@@ -7522,7 +7522,11 @@ def write_colormap(
|
|
7522
7522
|
|
7523
7523
|
|
7524
7524
|
def plot_performance_metrics(
|
7525
|
-
history_path: str,
|
7525
|
+
history_path: str,
|
7526
|
+
figsize: Tuple[int, int] = (15, 5),
|
7527
|
+
verbose: bool = True,
|
7528
|
+
save_path: Optional[str] = None,
|
7529
|
+
kwargs: Optional[Dict] = None,
|
7526
7530
|
) -> None:
|
7527
7531
|
"""Plot performance metrics from a history object.
|
7528
7532
|
|
@@ -7531,6 +7535,8 @@ def plot_performance_metrics(
|
|
7531
7535
|
figsize: The figure size.
|
7532
7536
|
verbose: Whether to print the best and final metrics.
|
7533
7537
|
"""
|
7538
|
+
if kwargs is None:
|
7539
|
+
kwargs = {}
|
7534
7540
|
history = torch.load(history_path)
|
7535
7541
|
|
7536
7542
|
# Handle different key naming conventions
|
@@ -7579,6 +7585,14 @@ def plot_performance_metrics(
|
|
7579
7585
|
plt.grid(True)
|
7580
7586
|
|
7581
7587
|
plt.tight_layout()
|
7588
|
+
|
7589
|
+
if save_path:
|
7590
|
+
if "dpi" not in kwargs:
|
7591
|
+
kwargs["dpi"] = 150
|
7592
|
+
if "bbox_inches" not in kwargs:
|
7593
|
+
kwargs["bbox_inches"] = "tight"
|
7594
|
+
plt.savefig(save_path, **kwargs)
|
7595
|
+
|
7582
7596
|
plt.show()
|
7583
7597
|
|
7584
7598
|
if verbose:
|
@@ -1,4 +1,4 @@
|
|
1
|
-
geoai/__init__.py,sha256=
|
1
|
+
geoai/__init__.py,sha256=4gwrJIsRBRP9mcJZMjX3TXxCtV_61kmf7ewRRkcDjiM,3851
|
2
2
|
geoai/change_detection.py,sha256=XkJjMEU1nD8uX3-nQy7NEmz8cukVeSaRxKJHlrv8xPM,59636
|
3
3
|
geoai/classify.py,sha256=0DcComVR6vKU4qWtH2oHVeXc7ZTcV0mFvdXRtlNmolo,35637
|
4
4
|
geoai/detectron2.py,sha256=dOOFM9M9-6PV8q2A4-mnIPrz7yTo-MpEvDiAW34nl0w,14610
|
@@ -11,14 +11,14 @@ geoai/map_widgets.py,sha256=QLmkILsztNaRXRULHKOd7Glb7S0pEWXSK9-P8S5AuzQ,5856
|
|
11
11
|
geoai/sam.py,sha256=O6S-kGiFn7YEcFbfWFItZZQOhnsm6-GlunxQLY0daEs,34345
|
12
12
|
geoai/segment.py,sha256=yBGTxA-ti8lBpk7WVaBOp6yP23HkaulKJQk88acrmZ0,43788
|
13
13
|
geoai/segmentation.py,sha256=7yEzBSKCyHW1dNssoK0rdvhxi2IXsIQIFSga817KdI4,11535
|
14
|
-
geoai/train.py,sha256=
|
15
|
-
geoai/utils.py,sha256=
|
14
|
+
geoai/train.py,sha256=QDG0z6VmvHmzJC-oujZb2YSxnNEBloU_lmEgSygPM6U,141818
|
15
|
+
geoai/utils.py,sha256=lpyhytBeDLiqWz31syeRvpbT5AUn3cOblKU57uDD9sU,301265
|
16
16
|
geoai/agents/__init__.py,sha256=NndUtQ5-i8Zuim8CJftCZYKbCvrkDXj9iLVtiBtc_qE,178
|
17
|
-
geoai/agents/geo_agents.py,sha256=
|
18
|
-
geoai/agents/map_tools.py,sha256=
|
19
|
-
geoai_py-0.13.
|
20
|
-
geoai_py-0.13.
|
21
|
-
geoai_py-0.13.
|
22
|
-
geoai_py-0.13.
|
23
|
-
geoai_py-0.13.
|
24
|
-
geoai_py-0.13.
|
17
|
+
geoai/agents/geo_agents.py,sha256=4tLntKBL_FgTQsUVzReP9acbYotnfjMRc5BYwW9WEyE,21431
|
18
|
+
geoai/agents/map_tools.py,sha256=OK5uB0VUHjjUnc-DYRy2CQ__kyUIARSCPBucGabO0Xw,60669
|
19
|
+
geoai_py-0.13.2.dist-info/licenses/LICENSE,sha256=vN2L5U7cZ6ZkOHFmc8WiGlsogWsZc5dllMeNxnKVOZg,1070
|
20
|
+
geoai_py-0.13.2.dist-info/METADATA,sha256=wGO4OHdq1ubtroaqbEciKq1jHmJD8C_krbZTJByFP98,10345
|
21
|
+
geoai_py-0.13.2.dist-info/WHEEL,sha256=JNWh1Fm1UdwIQV075glCn4MVuCRs0sotJIq-J6rbxCU,109
|
22
|
+
geoai_py-0.13.2.dist-info/entry_points.txt,sha256=uGp3Az3HURIsRHP9v-ys0hIbUuBBNUfXv6VbYHIXeg4,41
|
23
|
+
geoai_py-0.13.2.dist-info/top_level.txt,sha256=1YkCUWu-ii-0qIex7kbwAvfei-gos9ycyDyUCJPNWHY,6
|
24
|
+
geoai_py-0.13.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|