ObjectNat 1.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. objectnat/__init__.py +9 -0
  2. objectnat/_api.py +14 -0
  3. objectnat/_config.py +43 -0
  4. objectnat/_version.py +1 -0
  5. objectnat/methods/__init__.py +0 -0
  6. objectnat/methods/coverage_zones/__init__.py +3 -0
  7. objectnat/methods/coverage_zones/graph_coverage.py +105 -0
  8. objectnat/methods/coverage_zones/radius_voronoi_coverage.py +39 -0
  9. objectnat/methods/coverage_zones/stepped_coverage.py +136 -0
  10. objectnat/methods/isochrones/__init__.py +1 -0
  11. objectnat/methods/isochrones/isochrone_utils.py +167 -0
  12. objectnat/methods/isochrones/isochrones.py +282 -0
  13. objectnat/methods/noise/__init__.py +3 -0
  14. objectnat/methods/noise/noise_init_data.py +10 -0
  15. objectnat/methods/noise/noise_reduce.py +155 -0
  16. objectnat/methods/noise/noise_simulation.py +453 -0
  17. objectnat/methods/noise/noise_simulation_simplified.py +222 -0
  18. objectnat/methods/point_clustering/__init__.py +1 -0
  19. objectnat/methods/point_clustering/cluster_points_in_polygons.py +115 -0
  20. objectnat/methods/provision/__init__.py +1 -0
  21. objectnat/methods/provision/provision.py +213 -0
  22. objectnat/methods/provision/provision_exceptions.py +59 -0
  23. objectnat/methods/provision/provision_model.py +323 -0
  24. objectnat/methods/utils/__init__.py +1 -0
  25. objectnat/methods/utils/geom_utils.py +173 -0
  26. objectnat/methods/utils/graph_utils.py +306 -0
  27. objectnat/methods/utils/math_utils.py +32 -0
  28. objectnat/methods/visibility/__init__.py +6 -0
  29. objectnat/methods/visibility/visibility_analysis.py +485 -0
  30. objectnat-1.3.3.dist-info/METADATA +202 -0
  31. objectnat-1.3.3.dist-info/RECORD +33 -0
  32. objectnat-1.3.3.dist-info/WHEEL +4 -0
  33. objectnat-1.3.3.dist-info/licenses/LICENSE.txt +28 -0
@@ -0,0 +1,115 @@
1
+ from typing import Literal
2
+
3
+ import geopandas as gpd
4
+ import pandas as pd
5
+ from sklearn.cluster import DBSCAN, HDBSCAN
6
+
7
+ from objectnat import config
8
+
9
+ logger = config.logger
10
+
11
+
12
+ def _get_cluster(services_select, min_dist, min_point, method):
13
+ services_coords = pd.DataFrame(
14
+ {"x": services_select.geometry.representative_point().x, "y": services_select.geometry.representative_point().y}
15
+ )
16
+ if method == "DBSCAN":
17
+ db = DBSCAN(eps=min_dist, min_samples=min_point).fit(services_coords.to_numpy())
18
+ else:
19
+ db = HDBSCAN(min_cluster_size=min_point, cluster_selection_epsilon=min_dist).fit(services_coords.to_numpy())
20
+ services_select["cluster"] = db.labels_
21
+ return services_select
22
+
23
+
24
+ def _get_service_ratio(loc, service_code_column):
25
+ all_services = loc.shape[0]
26
+ loc[service_code_column] = loc[service_code_column].astype(str)
27
+ services_count = loc.groupby(service_code_column).size()
28
+ return (services_count / all_services).round(2)
29
+
30
+
31
+ def get_clusters_polygon(
32
+ points: gpd.GeoDataFrame,
33
+ min_dist: float | int = 100,
34
+ min_point: int = 5,
35
+ method: Literal["DBSCAN", "HDBSCAN"] = "HDBSCAN",
36
+ service_code_column: str = "service_code",
37
+ ) -> tuple[gpd.GeoDataFrame, gpd.GeoDataFrame]:
38
+ """
39
+ Generate cluster polygons for given points based on a specified minimum distance and minimum points per cluster.
40
+ Optionally, calculate the relative ratio between types of points within the clusters.
41
+
42
+ Args:
43
+ points (gpd.GeoDataFrame):
44
+ GeoDataFrame containing the points to be clustered.
45
+ Must include a 'service_code' column for service ratio calculations.
46
+ min_dist (float | int, optional):
47
+ Minimum distance between points to be considered part of the same cluster. Defaults to 100.
48
+ min_point (int, optional):
49
+ Minimum number of points required to form a cluster. Defaults to 5.
50
+ method:
51
+ The clustering method to use. Must be either "DBSCAN" or "HDBSCAN". Defaults to "HDBSCAN".
52
+ service_code_column (str, optional):
53
+ Column, containing service type for relative ratio in clasterized polygons. Defaults to "service_code".
54
+
55
+ Returns:
56
+ tuple[gpd.GeoDataFrame, gpd.GeoDataFrame]:
57
+ A tuple containing the clustered polygons GeoDataFrame and the original points GeoDataFrame with cluster labels.
58
+ """
59
+ if method not in ["DBSCAN", "HDBSCAN"]:
60
+ raise ValueError("Method must be either 'DBSCAN' or 'HDBSCAN'")
61
+ original_crs = points.crs
62
+ local_crs = points.estimate_utm_crs()
63
+ points = points.to_crs(local_crs)
64
+ services_select = _get_cluster(points, min_dist, min_point, method)
65
+
66
+ if service_code_column not in points.columns:
67
+ logger.warning(
68
+ f"No {service_code_column} column in provided GeoDataFrame, cluster polygons will be without relative ratio"
69
+ )
70
+ points[service_code_column] = service_code_column
71
+
72
+ points_normal = services_select[services_select["cluster"] != -1].copy()
73
+ points_outlier = services_select[services_select["cluster"] == -1].copy()
74
+
75
+ if len(points_normal) > 0:
76
+ cluster_service = points_normal.groupby("cluster", group_keys=True).apply(
77
+ _get_service_ratio, service_code_column=service_code_column
78
+ )
79
+ if isinstance(cluster_service, pd.Series):
80
+ cluster_service = cluster_service.unstack(level=1, fill_value=0)
81
+
82
+ polygons_normal = points_normal.dissolve("cluster").concave_hull(ratio=0.1, allow_holes=True)
83
+ df_clusters_normal = pd.concat([cluster_service, polygons_normal.rename("geometry")], axis=1)
84
+ cluster_normal = df_clusters_normal.index.max()
85
+ points_normal["outlier"] = False
86
+ df_clusters_normal["outlier"] = False
87
+ else:
88
+ df_clusters_normal = None
89
+ cluster_normal = 0
90
+
91
+ if len(points_outlier) > 0:
92
+ clusters_outlier = cluster_normal + 1
93
+ new_clusters = list(range(clusters_outlier, clusters_outlier + len(points_outlier)))
94
+ points_outlier.loc[:, "cluster"] = new_clusters
95
+
96
+ cluster_service = points_outlier.groupby("cluster", group_keys=True).apply(
97
+ _get_service_ratio, service_code_column=service_code_column
98
+ )
99
+ if isinstance(cluster_service, pd.Series):
100
+ cluster_service = cluster_service.unstack(level=1, fill_value=0)
101
+
102
+ df_clusters_outlier = cluster_service.join(points_outlier.set_index("cluster")["geometry"])
103
+ points_outlier["outlier"] = True
104
+ df_clusters_outlier["outlier"] = True
105
+ else:
106
+ points_outlier = None
107
+ df_clusters_outlier = None
108
+
109
+ df_clusters = pd.concat([df_clusters_normal, df_clusters_outlier]).fillna(0).set_geometry("geometry")
110
+ df_clusters["geometry"] = df_clusters["geometry"].buffer(min_dist / 2)
111
+ df_clusters = df_clusters.reset_index().rename(columns={"index": "cluster"})
112
+
113
+ points = pd.concat([points_normal, points_outlier])
114
+
115
+ return df_clusters.to_crs(original_crs), points.to_crs(original_crs)
@@ -0,0 +1 @@
1
+ from .provision import clip_provision, get_service_provision, recalculate_links
@@ -0,0 +1,213 @@
1
+ import geopandas as gpd
2
+ import numpy as np
3
+ import pandas as pd
4
+
5
+ from objectnat import config
6
+
7
+ from .provision_model import Provision
8
+
9
+ logger = config.logger
10
+
11
+
12
+ def get_service_provision(
13
+ buildings: gpd.GeoDataFrame,
14
+ adjacency_matrix: pd.DataFrame,
15
+ services: gpd.GeoDataFrame,
16
+ threshold: int,
17
+ buildings_demand_column: str = "demand",
18
+ services_capacity_column: str = "capacity",
19
+ ) -> tuple[gpd.GeoDataFrame, gpd.GeoDataFrame, gpd.GeoDataFrame]:
20
+ """
21
+ Compute service provision between demand locations (buildings) and service facilities.
22
+
23
+ The function implements a **gravity-based allocation model**: service capacity is
24
+ distributed across nearby demand points with weights that **decay with the square
25
+ of distance (or generalized cost)**. Closer buildings receive proportionally
26
+ higher shares of the available capacity.
27
+
28
+ Args:
29
+ buildings (gpd.GeoDataFrame):
30
+ GeoDataFrame of **demand locations** (e.g., residential buildings).
31
+ Must include a numeric column with demand values
32
+ (see ``buildings_demand_column``).
33
+ adjacency_matrix (pd.DataFrame):
34
+ A rectangular DataFrame representing **OD (origin–destination) costs**
35
+ between ``buildings`` (rows) and ``services`` (columns).
36
+ Units must match ``threshold`` (e.g., minutes or meters).
37
+ Missing or infinite values (``NaN`` or ``inf``) are treated as **unreachable**.
38
+ The row index must match ``buildings.index`` and column index must
39
+ match ``services.index``.
40
+ services (gpd.GeoDataFrame):
41
+ GeoDataFrame of **service facilities** (e.g., schools, clinics).
42
+ Must include a numeric column with service capacity
43
+ (see ``services_capacity_column``).
44
+ threshold (int):
45
+ Maximum allowed cost value for assignment.
46
+ Any OD entry **greater than this threshold** is considered unreachable.
47
+ Units are the same as in ``adjacency_matrix``.
48
+ buildings_demand_column (str):
49
+ Column name of building demand values. Default is ``"demand"``.
50
+ services_capacity_column (str):
51
+ Column name of service capacity values. Default is ``"capacity"``.
52
+
53
+ Returns:
54
+ Tuple[gpd.GeoDataFrame, gpd.GeoDataFrame, gpd.GeoDataFrame]:
55
+ A tuple of three GeoDataFrames:
56
+
57
+ - **buildings**: input buildings with updated provision metrics.
58
+ - **services**: input services with updated load and capacity metrics.
59
+ - **links**: building–service links within the threshold, containing
60
+ allocated demand shares and distances/costs based on the gravity model.
61
+
62
+ Notes:
63
+ - The model is **gravity-based**, with cost weights decaying by the **square of distance**.
64
+ - Unreachable OD pairs (``NaN`` or ``inf``) are ignored.
65
+ - The function does not perform routing; it expects a precomputed OD matrix.
66
+ """
67
+ buildings = buildings.copy()
68
+ services = services.copy()
69
+ adjacency_matrix = adjacency_matrix.copy()
70
+ buildings["demand"] = buildings[buildings_demand_column]
71
+ services["capacity"] = services[services_capacity_column]
72
+
73
+ provision_buildings, provision_services, provision_links = Provision(
74
+ services=services,
75
+ demanded_buildings=buildings,
76
+ adjacency_matrix=adjacency_matrix,
77
+ threshold=threshold,
78
+ ).run()
79
+ return provision_buildings, provision_services, provision_links
80
+
81
+
82
+ def clip_provision(
83
+ buildings: gpd.GeoDataFrame, services: gpd.GeoDataFrame, links: gpd.GeoDataFrame, selection_zone: gpd.GeoDataFrame
84
+ ) -> tuple[gpd.GeoDataFrame, gpd.GeoDataFrame, gpd.GeoDataFrame]:
85
+ """
86
+ Clip service provision results to a specific geographic boundary.
87
+
88
+ Keeps only:
89
+ * buildings that intersect the ``selection_zone``;
90
+ * links that connect to the kept buildings;
91
+ * services referenced by those links.
92
+
93
+ Args:
94
+ buildings:
95
+ GeoDataFrame of buildings **after** :func:`get_service_provision`.
96
+ services:
97
+ GeoDataFrame of services **after** :func:`get_service_provision`.
98
+ links:
99
+ GeoDataFrame of building–service links from
100
+ :func:`get_service_provision`. Must include indices or columns
101
+ to match buildings and services.
102
+ selection_zone:
103
+ GeoDataFrame (polygon or multipolygon) defining the clipping area.
104
+
105
+ Returns:
106
+ Tuple[gpd.GeoDataFrame, gpd.GeoDataFrame, gpd.GeoDataFrame]:
107
+ The filtered subsets of buildings, services, and links
108
+ that fall inside the specified zone.
109
+
110
+ Notes:
111
+ - The function performs **spatial filtering only**.
112
+ It does **not** recompute or redistribute demand/supply.
113
+ """
114
+ assert selection_zone.crs == buildings.crs == services.crs == links.crs, (
115
+ f"CRS mismatch: buildings_crs:{buildings.crs}, "
116
+ f"links_crs:{links.crs} , "
117
+ f"services_crs:{services.crs}, "
118
+ f"selection_zone_crs:{selection_zone.crs}"
119
+ )
120
+ buildings = buildings.copy()
121
+ links = links.copy()
122
+ services = services.copy()
123
+
124
+ s = buildings.intersects(selection_zone.union_all())
125
+ buildings = buildings.loc[s[s].index]
126
+ links = links[links["building_index"].isin(buildings.index.tolist())]
127
+ services_to_keep = set(links["service_index"].tolist())
128
+ services.drop(list(set(services.index.tolist()) - services_to_keep), inplace=True)
129
+ return buildings, services, links
130
+
131
+
132
+ def recalculate_links(
133
+ buildings: gpd.GeoDataFrame, services: gpd.GeoDataFrame, links: gpd.GeoDataFrame, new_max_dist: float
134
+ ) -> tuple[gpd.GeoDataFrame, gpd.GeoDataFrame, gpd.GeoDataFrame]:
135
+ """
136
+ Recalculate provision aggregates after tightening the accessibility threshold.
137
+
138
+ Removes all links whose cost (distance or time) exceeds ``new_max_dist``, then
139
+ updates demand and capacity aggregates accordingly. This is done **without
140
+ redistributing** removed demand to alternative services.
141
+
142
+ Args:
143
+ buildings:
144
+ GeoDataFrame of buildings after :func:`get_service_provision`.
145
+ Expected to include provision-related fields such as demand, demand_left,
146
+ supplied demand, and average distance/cost.
147
+
148
+ services:
149
+ GeoDataFrame of services after :func:`get_service_provision`, with
150
+ fields describing remaining capacity and service load.
151
+
152
+ links:
153
+ GeoDataFrame of building–service links containing at least:
154
+
155
+ - ``building_index``
156
+ - ``service_index``
157
+ - ``distance`` (or time cost, in the same units as ``new_max_dist``)
158
+ - ``demand`` (assigned portion)
159
+
160
+ new_max_dist:
161
+ New maximum allowed cost value (same units as OD/threshold).
162
+ Links with cost **greater than** this value are removed.
163
+
164
+ Returns:
165
+ tuple[gpd.GeoDataFrame, gpd.GeoDataFrame, gpd.GeoDataFrame]:
166
+ - **buildings**: updated aggregate demand metrics and recalculated
167
+ average cost.
168
+ - **services**: updated load and capacity fields after freeing excess capacity.
169
+ - **links**: subset of links that remain within the new threshold.
170
+
171
+ Notes:
172
+ - If no links exceed ``new_max_dist``, the function logs a warning
173
+ and returns the original inputs unchanged.
174
+ - Average cost values are recomputed based on remaining links.
175
+ If a building has no remaining assigned demand, ``avg_dist`` becomes ``NaN``.
176
+ - Removed demand is **not reallocated** to other services.
177
+ """
178
+ buildings = buildings.copy()
179
+ services = services.copy()
180
+ links = links.copy()
181
+
182
+ links_to_recalculate = links[links["distance"] > new_max_dist]
183
+ if len(links_to_recalculate) == 0:
184
+ logger.warning("To clip distance exceeds max links distance, returning full provision")
185
+ return buildings, services, links
186
+
187
+ links_to_keep = links[links["distance"] <= new_max_dist]
188
+ free_demand = links_to_recalculate.groupby("building_index").agg({"demand": list, "distance": list})
189
+ free_demand["distance"] = free_demand.apply(
190
+ lambda x: sum((x1 * x2) for x1, x2 in zip(x.demand, x.distance)), axis=1
191
+ )
192
+ free_demand["demand"] = free_demand["demand"].apply(sum)
193
+ free_demand = free_demand.reindex(buildings.index, fill_value=0)
194
+ new_sum_time = (buildings["supplied_demands_within"] + buildings["supplied_demands_without"]) * buildings[
195
+ "avg_dist"
196
+ ] - free_demand["distance"]
197
+
198
+ buildings["demand_left"] = buildings["demand_left"] + free_demand["demand"]
199
+ buildings["supplied_demands_without"] = buildings["supplied_demands_without"] - free_demand["demand"]
200
+ buildings["avg_dist"] = new_sum_time / (
201
+ buildings["supplied_demands_without"] + buildings["supplied_demands_within"]
202
+ )
203
+ buildings["avg_dist"] = buildings.apply(
204
+ lambda x: np.nan if (x["demand"] == x["demand_left"]) else round(x["avg_dist"], 2), axis=1
205
+ )
206
+
207
+ free_capacity = links_to_recalculate.groupby("service_index").agg({"demand": "sum"})
208
+ free_capacity = free_capacity.reindex(services.index, fill_value=0)
209
+ services["capacity_left"] = services["capacity_left"] + free_capacity["demand"]
210
+ services["carried_capacity_without"] = services["carried_capacity_without"] - free_capacity["demand"]
211
+ services["service_load"] = services["service_load"] - free_capacity["demand"]
212
+
213
+ return buildings, services, links_to_keep
@@ -0,0 +1,59 @@
1
+ class CapacityKeyError(KeyError):
2
+ def __init__(self, *args):
3
+ if args:
4
+ self.message = args[0]
5
+ else:
6
+ self.message = None
7
+
8
+ def __str__(self):
9
+ if self.message:
10
+ return f"CapacityKeyError, {self.message} "
11
+
12
+ return (
13
+ "Column 'capacity' was not found in provided 'services' GeoDataFrame. This attribute "
14
+ "corresponds to the total capacity for each service."
15
+ )
16
+
17
+
18
+ class CapacityValueError(ValueError):
19
+ def __init__(self, *args):
20
+ if args:
21
+ self.message = args[0]
22
+ else:
23
+ self.message = None
24
+
25
+ def __str__(self):
26
+ if self.message:
27
+ return f"CapacityValueError, {self.message} "
28
+
29
+ return "Column 'capacity' in 'services' GeoDataFrame has no valid value."
30
+
31
+
32
+ class DemandKeyError(KeyError):
33
+ def __init__(self, *args):
34
+ if args:
35
+ self.message = args[0]
36
+ else:
37
+ self.message = None
38
+
39
+ def __str__(self):
40
+ if self.message:
41
+ return f"DemandKeyError, {self.message} "
42
+
43
+ return (
44
+ "The column 'demand' was not found in the provided 'demanded_buildings' GeoDataFrame. "
45
+ "This attribute corresponds to the number of demands for the selected service in each building."
46
+ )
47
+
48
+
49
+ class DemandValueError(ValueError):
50
+ def __init__(self, *args):
51
+ if args:
52
+ self.message = args[0]
53
+ else:
54
+ self.message = None
55
+
56
+ def __str__(self):
57
+ if self.message:
58
+ return f"DemandValueError, {self.message} "
59
+ return "Column 'demand' in 'demanded_buildings' GeoDataFrame has no valid value."