giga-spatial 0.6.3__py3-none-any.whl → 0.6.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {giga_spatial-0.6.3.dist-info → giga_spatial-0.6.5.dist-info}/METADATA +2 -1
- giga_spatial-0.6.5.dist-info/RECORD +50 -0
- gigaspatial/__init__.py +1 -1
- gigaspatial/config.py +35 -4
- gigaspatial/core/io/__init__.py +1 -0
- gigaspatial/core/io/database.py +316 -0
- gigaspatial/generators/__init__.py +5 -1
- gigaspatial/generators/poi.py +228 -43
- gigaspatial/generators/zonal/__init__.py +2 -1
- gigaspatial/generators/zonal/admin.py +84 -0
- gigaspatial/generators/zonal/base.py +221 -64
- gigaspatial/generators/zonal/geometry.py +74 -31
- gigaspatial/generators/zonal/mercator.py +50 -19
- gigaspatial/grid/__init__.py +1 -1
- gigaspatial/grid/mercator_tiles.py +33 -10
- gigaspatial/handlers/__init__.py +5 -1
- gigaspatial/handlers/boundaries.py +226 -48
- gigaspatial/handlers/ghsl.py +79 -14
- gigaspatial/handlers/giga.py +641 -0
- gigaspatial/handlers/hdx.py +50 -51
- gigaspatial/handlers/maxar_image.py +1 -2
- gigaspatial/handlers/rwi.py +5 -2
- gigaspatial/processing/algorithms.py +188 -0
- gigaspatial/processing/geo.py +87 -25
- gigaspatial/processing/tif_processor.py +220 -45
- giga_spatial-0.6.3.dist-info/RECORD +0 -47
- {giga_spatial-0.6.3.dist-info → giga_spatial-0.6.5.dist-info}/WHEEL +0 -0
- {giga_spatial-0.6.3.dist-info → giga_spatial-0.6.5.dist-info}/licenses/LICENSE +0 -0
- {giga_spatial-0.6.3.dist-info → giga_spatial-0.6.5.dist-info}/top_level.txt +0 -0
gigaspatial/handlers/hdx.py
CHANGED
@@ -1,13 +1,9 @@
|
|
1
|
-
import os
|
2
1
|
import logging
|
3
2
|
from tqdm import tqdm
|
4
3
|
from pathlib import Path
|
5
|
-
from typing import List, Optional,
|
4
|
+
from typing import List, Optional, Union, Dict, Any, Iterable
|
6
5
|
import tempfile
|
7
|
-
import functools
|
8
|
-
import multiprocessing
|
9
6
|
|
10
|
-
import pandas as pd
|
11
7
|
import geopandas as gpd
|
12
8
|
from pydantic import Field, ConfigDict
|
13
9
|
from pydantic.dataclasses import dataclass
|
@@ -50,6 +46,48 @@ class HDXConfig(BaseHandlerConfig):
|
|
50
46
|
_hdx_configured: bool = Field(default=False, init=False)
|
51
47
|
dataset: Optional[Dataset] = Field(default=None, init=False)
|
52
48
|
|
49
|
+
@staticmethod
|
50
|
+
def search_datasets(
|
51
|
+
query: str,
|
52
|
+
rows: int = None,
|
53
|
+
sort: str = "relevance asc, metadata_modified desc",
|
54
|
+
hdx_site: str = "prod",
|
55
|
+
user_agent: str = "gigaspatial",
|
56
|
+
) -> List[Dict]:
|
57
|
+
"""Search for datasets in HDX before initializing the class.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
query: Search query string
|
61
|
+
rows: Number of results per page. Defaults to all datasets (sys.maxsize).
|
62
|
+
sort: Sort order - one of 'relevance', 'views_recent', 'views_total', 'last_modified' (default: 'relevance')
|
63
|
+
hdx_site: HDX site to use - 'prod' or 'test' (default: 'prod')
|
64
|
+
user_agent: User agent for HDX API requests (default: 'gigaspatial')
|
65
|
+
|
66
|
+
Returns:
|
67
|
+
List of dataset dictionaries containing search results
|
68
|
+
|
69
|
+
Example:
|
70
|
+
>>> results = HDXConfig.search_datasets("population", rows=5)
|
71
|
+
>>> for dataset in results:
|
72
|
+
>>> print(f"Name: {dataset['name']}, Title: {dataset['title']}")
|
73
|
+
"""
|
74
|
+
try:
|
75
|
+
Configuration.create(
|
76
|
+
hdx_site=hdx_site,
|
77
|
+
user_agent=user_agent,
|
78
|
+
hdx_read_only=True,
|
79
|
+
)
|
80
|
+
except:
|
81
|
+
pass
|
82
|
+
|
83
|
+
try:
|
84
|
+
results = Dataset.search_in_hdx(query=query, rows=rows, sort=sort)
|
85
|
+
|
86
|
+
return results
|
87
|
+
except Exception as e:
|
88
|
+
logging.error(f"Error searching HDX datasets: {str(e)}")
|
89
|
+
raise
|
90
|
+
|
53
91
|
def __post_init__(self):
|
54
92
|
super().__post_init__()
|
55
93
|
try:
|
@@ -85,7 +123,11 @@ class HDXConfig(BaseHandlerConfig):
|
|
85
123
|
self.logger.info(f"Fetching HDX dataset: {self.dataset_name}")
|
86
124
|
dataset = Dataset.read_from_hdx(self.dataset_name)
|
87
125
|
if not dataset:
|
88
|
-
raise ValueError(
|
126
|
+
raise ValueError(
|
127
|
+
f"Dataset '{self.dataset_name}' not found on HDX. "
|
128
|
+
"Please verify the dataset name or use search_datasets() "
|
129
|
+
"to find available datasets."
|
130
|
+
)
|
89
131
|
return dataset
|
90
132
|
except Exception as e:
|
91
133
|
self.logger.error(f"Error fetching HDX dataset: {str(e)}")
|
@@ -386,9 +428,9 @@ class HDXReader(BaseHandlerReader):
|
|
386
428
|
self, source_data_path: List[Union[str, Path]], **kwargs
|
387
429
|
) -> Any:
|
388
430
|
"""Load data from paths"""
|
389
|
-
if len(source_data_path)==1:
|
431
|
+
if len(source_data_path) == 1:
|
390
432
|
return read_dataset(self.data_store, source_data_path[0])
|
391
|
-
|
433
|
+
|
392
434
|
all_data = {}
|
393
435
|
for file_path in source_data_path:
|
394
436
|
try:
|
@@ -401,49 +443,6 @@ class HDXReader(BaseHandlerReader):
|
|
401
443
|
resources = self.config.list_resources()
|
402
444
|
return self.load_from_paths(resources)
|
403
445
|
|
404
|
-
# def read_resource(
|
405
|
-
# self, resource_file: str
|
406
|
-
# ) -> Union[pd.DataFrame, gpd.GeoDataFrame]:
|
407
|
-
# """Read a specific resource file from the dataset using the data_store."""
|
408
|
-
# if not self.dataset_path:
|
409
|
-
# raise ValueError("No dataset path configured")
|
410
|
-
|
411
|
-
# file_path = str(self.dataset_path / resource_file)
|
412
|
-
|
413
|
-
# if not self.data_store.file_exists(file_path):
|
414
|
-
# raise FileNotFoundError(
|
415
|
-
# f"Resource file {resource_file} not found in dataset"
|
416
|
-
# )
|
417
|
-
|
418
|
-
# try:
|
419
|
-
# return read_dataset(self.data_store, file_path)
|
420
|
-
# except Exception as e:
|
421
|
-
# raise ValueError(f"Could not read file {file_path}: {str(e)}")
|
422
|
-
|
423
|
-
# def read_all_resources(self) -> Dict[str, Union[pd.DataFrame, gpd.GeoDataFrame]]:
|
424
|
-
# """Read all resources in the dataset directory using the data_store."""
|
425
|
-
# resources = self.list_resources()
|
426
|
-
# result = {}
|
427
|
-
|
428
|
-
# for resource in resources:
|
429
|
-
# try:
|
430
|
-
# result[resource] = self.read_resource(resource)
|
431
|
-
# except Exception as e:
|
432
|
-
# self.logger.warning(f"Could not read resource {resource}: {str(e)}")
|
433
|
-
|
434
|
-
# return result
|
435
|
-
|
436
|
-
# def load_from_paths(
|
437
|
-
# self, source_data_path: List[Union[str, Path]], **kwargs
|
438
|
-
# ) -> Union[
|
439
|
-
# pd.DataFrame, gpd.GeoDataFrame, Dict[str, Union[pd.DataFrame, gpd.GeoDataFrame]]
|
440
|
-
# ]:
|
441
|
-
# """Load data from paths"""
|
442
|
-
# if len(source_data_path) == 1:
|
443
|
-
# return self.read_resource(str(source_data_path[0]))
|
444
|
-
# else:
|
445
|
-
# return self.read_all_resources()
|
446
|
-
|
447
446
|
|
448
447
|
class HDXHandler(BaseHandler):
|
449
448
|
"""Handler for HDX datasets"""
|
@@ -14,7 +14,6 @@ from gigaspatial.processing.geo import (
|
|
14
14
|
convert_to_geodataframe,
|
15
15
|
buffer_geodataframe,
|
16
16
|
)
|
17
|
-
from gigaspatial.processing.sat_images import calculate_pixels_at_location
|
18
17
|
from gigaspatial.config import config as global_config
|
19
18
|
|
20
19
|
|
@@ -142,7 +141,7 @@ class MaxarImageDownloader:
|
|
142
141
|
self.logger.warning(
|
143
142
|
f"Attempt {attempt + 1} of downloading {output_path.name} failed: {str(e)}"
|
144
143
|
)
|
145
|
-
if attempt < self.max_retries - 1:
|
144
|
+
if attempt < self.config.max_retries - 1:
|
146
145
|
sleep(self.config.retry_delay)
|
147
146
|
else:
|
148
147
|
self.logger.warning(
|
gigaspatial/handlers/rwi.py
CHANGED
@@ -2,6 +2,7 @@ import logging
|
|
2
2
|
from typing import List, Optional, Union, Literal
|
3
3
|
from pydantic.dataclasses import dataclass
|
4
4
|
from datetime import datetime
|
5
|
+
import pycountry
|
5
6
|
|
6
7
|
from hdx.data.resource import Resource
|
7
8
|
|
@@ -36,8 +37,10 @@ class RWIConfig(HDXConfig):
|
|
36
37
|
self, country: str, **kwargs
|
37
38
|
) -> List[Resource]:
|
38
39
|
"""Get relevant data units for a country, optionally filtering for latest version"""
|
39
|
-
|
40
|
-
|
40
|
+
country = pycountry.countries.lookup(country)
|
41
|
+
values = [country.alpha_3]
|
42
|
+
resources = self.get_dataset_resources(
|
43
|
+
filter={"url": values},
|
41
44
|
)
|
42
45
|
|
43
46
|
if self.latest_only and len(resources) > 1:
|
@@ -0,0 +1,188 @@
|
|
1
|
+
import sys, os
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
from typing import Literal, List, Tuple, Union, Optional
|
5
|
+
import geopandas as gpd
|
6
|
+
import pandas as pd
|
7
|
+
from scipy.spatial import cKDTree
|
8
|
+
import networkx as nx
|
9
|
+
|
10
|
+
from gigaspatial.processing.geo import (
|
11
|
+
convert_to_geodataframe,
|
12
|
+
)
|
13
|
+
from gigaspatial.config import config
|
14
|
+
|
15
|
+
LOGGER = config.get_logger("GigaSpatialProcessing")
|
16
|
+
|
17
|
+
|
18
|
+
def build_distance_graph(
|
19
|
+
left_df: Union[pd.DataFrame, gpd.GeoDataFrame],
|
20
|
+
right_df: Union[pd.DataFrame, gpd.GeoDataFrame],
|
21
|
+
distance_threshold: float,
|
22
|
+
max_k: int = 100,
|
23
|
+
return_dataframe: bool = False,
|
24
|
+
verbose: bool = True,
|
25
|
+
exclude_same_index: Optional[bool] = None,
|
26
|
+
) -> Union[nx.Graph, Tuple[nx.Graph, pd.DataFrame]]:
|
27
|
+
"""
|
28
|
+
Build a graph of spatial matches between two dataframes using KD-tree.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
left_df: Left dataframe to match from
|
32
|
+
right_df: Right dataframe to match to
|
33
|
+
distance_threshold: Maximum distance for matching (in meters)
|
34
|
+
max_k: Maximum number of neighbors to consider per point (default: 100)
|
35
|
+
return_dataframe: If True, also return the matches DataFrame
|
36
|
+
verbose: If True, print statistics about the graph
|
37
|
+
exclude_same_index: If True, exclude self-matches. If None, auto-detect based on df equality
|
38
|
+
|
39
|
+
Returns:
|
40
|
+
NetworkX Graph, or tuple of (Graph, DataFrame) if return_dataframe=True
|
41
|
+
|
42
|
+
Raises:
|
43
|
+
ValueError: If distance_threshold is negative or max_k is not positive
|
44
|
+
"""
|
45
|
+
|
46
|
+
# Input validation
|
47
|
+
if distance_threshold < 0:
|
48
|
+
raise ValueError("distance_threshold must be non-negative")
|
49
|
+
|
50
|
+
if max_k <= 0:
|
51
|
+
raise ValueError("max_k must be positive")
|
52
|
+
|
53
|
+
if left_df.empty or right_df.empty:
|
54
|
+
if verbose:
|
55
|
+
LOGGER.warning("Warning: One or both dataframes are empty")
|
56
|
+
G = nx.Graph()
|
57
|
+
return (G, pd.DataFrame()) if return_dataframe else G
|
58
|
+
|
59
|
+
def get_utm_coordinates(df: Union[pd.DataFrame, gpd.GeoDataFrame]) -> np.ndarray:
|
60
|
+
"""Extract coordinates as numpy array in UTM projection."""
|
61
|
+
if isinstance(df, pd.DataFrame):
|
62
|
+
gdf = convert_to_geodataframe(df)
|
63
|
+
else:
|
64
|
+
gdf = df.copy()
|
65
|
+
|
66
|
+
# More robust UTM CRS estimation
|
67
|
+
try:
|
68
|
+
gdf_utm = gdf.to_crs(gdf.estimate_utm_crs())
|
69
|
+
except Exception as e:
|
70
|
+
if verbose:
|
71
|
+
LOGGER.warning(
|
72
|
+
f"Warning: UTM CRS estimation failed, using Web Mercator. Error: {e}"
|
73
|
+
)
|
74
|
+
gdf_utm = gdf.to_crs("EPSG:3857") # Fallback to Web Mercator
|
75
|
+
|
76
|
+
return gdf_utm.get_coordinates().to_numpy()
|
77
|
+
|
78
|
+
# Auto-detect same dataframe case
|
79
|
+
if exclude_same_index is None:
|
80
|
+
exclude_same_index = left_df.equals(right_df)
|
81
|
+
if verbose and exclude_same_index:
|
82
|
+
LOGGER.info("Auto-detected same dataframe - excluding self-matches")
|
83
|
+
|
84
|
+
# Get coordinates
|
85
|
+
left_coords = get_utm_coordinates(left_df)
|
86
|
+
right_coords = (
|
87
|
+
get_utm_coordinates(right_df) if not exclude_same_index else left_coords
|
88
|
+
)
|
89
|
+
|
90
|
+
# Build KD-tree and query
|
91
|
+
kdtree = cKDTree(right_coords)
|
92
|
+
|
93
|
+
# Use the provided max_k parameter, but don't exceed available points
|
94
|
+
k_to_use = min(max_k, len(right_coords))
|
95
|
+
|
96
|
+
if verbose and k_to_use < max_k:
|
97
|
+
LOGGER.info(
|
98
|
+
f"Note: max_k ({max_k}) reduced to {k_to_use} (number of available points)"
|
99
|
+
)
|
100
|
+
|
101
|
+
# Note: Distance calculations here are based on Euclidean distance in UTM projection.
|
102
|
+
# This can introduce errors up to ~50 cm for a 50 meter threshold, especially near the poles where distortion increases.
|
103
|
+
distances, indices = kdtree.query(
|
104
|
+
left_coords, k=k_to_use, distance_upper_bound=distance_threshold
|
105
|
+
)
|
106
|
+
|
107
|
+
# Handle single k case (when k_to_use = 1, results are 1D)
|
108
|
+
if distances.ndim == 1:
|
109
|
+
distances = distances.reshape(-1, 1)
|
110
|
+
indices = indices.reshape(-1, 1)
|
111
|
+
|
112
|
+
# Extract valid pairs using vectorized operations
|
113
|
+
left_indices = np.arange(len(distances))[:, np.newaxis]
|
114
|
+
left_indices = np.broadcast_to(left_indices, distances.shape)
|
115
|
+
valid_mask = np.isfinite(distances)
|
116
|
+
|
117
|
+
if exclude_same_index:
|
118
|
+
same_index_mask = left_indices == indices
|
119
|
+
valid_mask = valid_mask & ~same_index_mask
|
120
|
+
|
121
|
+
valid_left = left_indices[valid_mask]
|
122
|
+
valid_right = indices[valid_mask]
|
123
|
+
valid_distances = distances[valid_mask]
|
124
|
+
|
125
|
+
# Map back to original indices
|
126
|
+
valid_left_indices = left_df.index.values[valid_left]
|
127
|
+
valid_right_indices = right_df.index.values[valid_right]
|
128
|
+
|
129
|
+
# Create matches DataFrame
|
130
|
+
matches_df = pd.DataFrame(
|
131
|
+
{
|
132
|
+
"left_idx": valid_left_indices,
|
133
|
+
"right_idx": valid_right_indices,
|
134
|
+
"distance": valid_distances,
|
135
|
+
}
|
136
|
+
)
|
137
|
+
|
138
|
+
# Build graph more efficiently
|
139
|
+
G = nx.from_pandas_edgelist(
|
140
|
+
matches_df,
|
141
|
+
source="left_idx",
|
142
|
+
target="right_idx",
|
143
|
+
edge_attr="distance",
|
144
|
+
create_using=nx.Graph(),
|
145
|
+
)
|
146
|
+
|
147
|
+
# Add isolated nodes (nodes without any matches within threshold)
|
148
|
+
# This ensures all original indices are represented in the graph
|
149
|
+
all_left_nodes = set(left_df.index.values)
|
150
|
+
all_right_nodes = set(right_df.index.values)
|
151
|
+
|
152
|
+
if not exclude_same_index:
|
153
|
+
all_nodes = all_left_nodes | all_right_nodes
|
154
|
+
else:
|
155
|
+
all_nodes = all_left_nodes # Same dataframe, so same node set
|
156
|
+
|
157
|
+
# Add nodes that don't have edges
|
158
|
+
existing_nodes = set(G.nodes())
|
159
|
+
isolated_nodes = all_nodes - existing_nodes
|
160
|
+
G.add_nodes_from(isolated_nodes)
|
161
|
+
|
162
|
+
# Print statistics
|
163
|
+
if verbose:
|
164
|
+
print(
|
165
|
+
f"Total potential matches: {len(left_df)} × {len(right_df)} = {len(left_df) * len(right_df):,}"
|
166
|
+
)
|
167
|
+
print(f"Matches found within {distance_threshold}m: {len(matches_df):,}")
|
168
|
+
print(f"Graph nodes: {G.number_of_nodes():,}")
|
169
|
+
print(f"Graph edges: {G.number_of_edges():,}")
|
170
|
+
|
171
|
+
components = list(nx.connected_components(G))
|
172
|
+
print(f"Connected components: {len(components):,}")
|
173
|
+
|
174
|
+
if len(components) > 1:
|
175
|
+
component_sizes = [len(c) for c in components]
|
176
|
+
print(f"Largest component size: {max(component_sizes):,}")
|
177
|
+
print(
|
178
|
+
f"Isolated nodes: {sum(1 for size in component_sizes if size == 1):,}"
|
179
|
+
)
|
180
|
+
|
181
|
+
if len(matches_df) > 0:
|
182
|
+
print(
|
183
|
+
f"Distance stats - min: {matches_df['distance'].min():.1f}m, "
|
184
|
+
f"max: {matches_df['distance'].max():.1f}m, "
|
185
|
+
f"mean: {matches_df['distance'].mean():.1f}m"
|
186
|
+
)
|
187
|
+
|
188
|
+
return (G, matches_df) if return_dataframe else G
|
gigaspatial/processing/geo.py
CHANGED
@@ -272,8 +272,13 @@ def buffer_geodataframe(
|
|
272
272
|
input_crs = gdf_work.crs
|
273
273
|
|
274
274
|
try:
|
275
|
-
|
276
|
-
|
275
|
+
try:
|
276
|
+
utm_crs = gdf_work.estimate_utm_crs()
|
277
|
+
except Exception as e:
|
278
|
+
LOGGER.warning(
|
279
|
+
f"Warning: UTM CRS estimation failed, using Web Mercator. Error: {e}"
|
280
|
+
)
|
281
|
+
utm_crs = "EPSG:3857" # Fallback to Web Mercator
|
277
282
|
|
278
283
|
# Transform to UTM, create buffer, and transform back
|
279
284
|
gdf_work = gdf_work.to_crs(utm_crs)
|
@@ -452,7 +457,13 @@ def add_area_in_meters(
|
|
452
457
|
gdf_with_area = gdf.copy()
|
453
458
|
|
454
459
|
# Calculate the UTM CRS for accurate area calculation
|
455
|
-
|
460
|
+
try:
|
461
|
+
utm_crs = gdf_with_area.estimate_utm_crs()
|
462
|
+
except Exception as e:
|
463
|
+
LOGGER.warning(
|
464
|
+
f"Warning: UTM CRS estimation failed, using Web Mercator. Error: {e}"
|
465
|
+
)
|
466
|
+
utm_crs = "EPSG:3857" # Fallback to Web Mercator
|
456
467
|
|
457
468
|
# Transform to UTM CRS and calculate the area in square meters
|
458
469
|
gdf_with_area[area_column_name] = gdf_with_area.to_crs(utm_crs).geometry.area
|
@@ -858,39 +869,79 @@ def aggregate_polygons_to_zones(
|
|
858
869
|
zones: gpd.GeoDataFrame,
|
859
870
|
value_columns: Union[str, List[str]],
|
860
871
|
aggregation: Union[str, Dict[str, str]] = "sum",
|
861
|
-
|
872
|
+
predicate: Literal["intersects", "within", "fractional"] = "intersects",
|
862
873
|
zone_id_column: str = "zone_id",
|
863
874
|
output_suffix: str = "",
|
864
875
|
drop_geometry: bool = False,
|
865
876
|
) -> gpd.GeoDataFrame:
|
866
877
|
"""
|
867
|
-
|
878
|
+
Aggregates polygon data to zones based on a specified spatial relationship.
|
868
879
|
|
869
|
-
This function
|
870
|
-
|
880
|
+
This function performs a spatial join between polygons and zones and then
|
881
|
+
aggregates values from the polygons to their corresponding zones. The aggregation
|
882
|
+
method depends on the `predicate` parameter, which determines the nature of the
|
883
|
+
spatial relationship.
|
871
884
|
|
872
885
|
Args:
|
873
|
-
polygons (Union[pd.DataFrame, gpd.GeoDataFrame]):
|
874
|
-
|
875
|
-
|
876
|
-
|
877
|
-
|
878
|
-
|
879
|
-
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
886
|
+
polygons (Union[pd.DataFrame, gpd.GeoDataFrame]):
|
887
|
+
Polygon data to aggregate. Must be a GeoDataFrame or convertible to one.
|
888
|
+
zones (gpd.GeoDataFrame):
|
889
|
+
The target zones to which the polygon data will be aggregated.
|
890
|
+
value_columns (Union[str, List[str]]):
|
891
|
+
The column(s) in `polygons` containing the numeric values to aggregate.
|
892
|
+
aggregation (Union[str, Dict[str, str]], optional):
|
893
|
+
The aggregation method(s) to use. Can be a single string (e.g., "sum",
|
894
|
+
"mean", "max") to apply the same method to all columns, or a dictionary
|
895
|
+
mapping column names to aggregation methods (e.g., `{'population': 'sum'}`).
|
896
|
+
Defaults to "sum".
|
897
|
+
predicate (Literal["intersects", "within", "fractional"], optional):
|
898
|
+
The spatial relationship to use for aggregation:
|
899
|
+
- "intersects": Aggregates values for any polygon that intersects a zone.
|
900
|
+
- "within": Aggregates values for polygons entirely contained within a zone.
|
901
|
+
- "fractional": Performs area-weighted aggregation. The value of a polygon
|
902
|
+
is distributed proportionally to the area of its overlap with each zone.
|
903
|
+
This requires calculating a UTM CRS for accurate area measurements.
|
904
|
+
Defaults to "intersects".
|
905
|
+
zone_id_column (str, optional):
|
906
|
+
The name of the column in `zones` that contains the unique zone identifiers.
|
907
|
+
Defaults to "zone_id".
|
908
|
+
output_suffix (str, optional):
|
909
|
+
A suffix to add to the names of the new aggregated columns in the output
|
910
|
+
GeoDataFrame. Defaults to "".
|
911
|
+
drop_geometry (bool, optional):
|
912
|
+
If True, the geometry column will be dropped from the output GeoDataFrame.
|
913
|
+
Defaults to False.
|
884
914
|
|
885
915
|
Returns:
|
886
|
-
gpd.GeoDataFrame:
|
916
|
+
gpd.GeoDataFrame:
|
917
|
+
The `zones` GeoDataFrame with new columns containing the aggregated values.
|
918
|
+
Zones with no intersecting or contained polygons will have `0` values.
|
919
|
+
|
920
|
+
Raises:
|
921
|
+
TypeError: If `zones` is not a GeoDataFrame or `polygons` cannot be converted.
|
922
|
+
ValueError: If `zone_id_column` or any `value_columns` are not found, or
|
923
|
+
if the geometry types in `polygons` are not polygons.
|
924
|
+
RuntimeError: If an error occurs during the area-weighted aggregation process.
|
887
925
|
|
888
926
|
Example:
|
889
|
-
>>>
|
927
|
+
>>> import geopandas as gpd
|
928
|
+
>>> # Assuming 'landuse_polygons' and 'grid_zones' are GeoDataFrames
|
929
|
+
>>> # Aggregate total population within each grid zone using area-weighting
|
930
|
+
>>> pop_by_zone = aggregate_polygons_to_zones(
|
931
|
+
... landuse_polygons,
|
932
|
+
... grid_zones,
|
933
|
+
... value_columns="population",
|
934
|
+
... predicate="fractional",
|
935
|
+
... aggregation="sum",
|
936
|
+
... output_suffix="_pop"
|
937
|
+
... )
|
938
|
+
>>> # Aggregate the count of landuse parcels intersecting each zone
|
939
|
+
>>> count_by_zone = aggregate_polygons_to_zones(
|
890
940
|
... landuse_polygons,
|
891
941
|
... grid_zones,
|
892
|
-
... value_columns=
|
893
|
-
...
|
942
|
+
... value_columns="parcel_id",
|
943
|
+
... predicate="intersects",
|
944
|
+
... aggregation="count"
|
894
945
|
... )
|
895
946
|
"""
|
896
947
|
# Input validation
|
@@ -900,6 +951,11 @@ def aggregate_polygons_to_zones(
|
|
900
951
|
if zone_id_column not in zones.columns:
|
901
952
|
raise ValueError(f"Zone ID column '{zone_id_column}' not found in zones")
|
902
953
|
|
954
|
+
if predicate not in ["intersects", "within", "fractional"]:
|
955
|
+
raise ValueError(
|
956
|
+
f"Unsupported predicate: {predicate}. Predicate can be one of `intersects`, `within`, `fractional`"
|
957
|
+
)
|
958
|
+
|
903
959
|
# Convert polygons to GeoDataFrame if necessary
|
904
960
|
if not isinstance(polygons, gpd.GeoDataFrame):
|
905
961
|
try:
|
@@ -956,11 +1012,17 @@ def aggregate_polygons_to_zones(
|
|
956
1012
|
# Create a copy of the zones
|
957
1013
|
result = zones.copy()
|
958
1014
|
|
959
|
-
if
|
1015
|
+
if predicate == "fractional":
|
960
1016
|
# Use area-weighted aggregation with polygon overlay
|
961
1017
|
try:
|
962
1018
|
# Compute UTM CRS for accurate area calculations
|
963
|
-
|
1019
|
+
try:
|
1020
|
+
overlay_utm_crs = polygons_gdf.estimate_utm_crs()
|
1021
|
+
except Exception as e:
|
1022
|
+
LOGGER.warning(
|
1023
|
+
f"Warning: UTM CRS estimation failed, using Web Mercator. Error: {e}"
|
1024
|
+
)
|
1025
|
+
overlay_utm_crs = "EPSG:3857" # Fallback to Web Mercator
|
964
1026
|
|
965
1027
|
# Prepare polygons for overlay
|
966
1028
|
polygons_utm = polygons_gdf.to_crs(overlay_utm_crs)
|
@@ -1020,7 +1082,7 @@ def aggregate_polygons_to_zones(
|
|
1020
1082
|
else:
|
1021
1083
|
# Non-weighted aggregation - simpler approach
|
1022
1084
|
# Perform spatial join
|
1023
|
-
joined = gpd.sjoin(polygons_gdf, zones, how="inner", predicate=
|
1085
|
+
joined = gpd.sjoin(polygons_gdf, zones, how="inner", predicate=predicate)
|
1024
1086
|
|
1025
1087
|
# Remove geometry column for aggregation
|
1026
1088
|
if "geometry" in joined.columns:
|