ObjectNat 1.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- objectnat/__init__.py +9 -0
- objectnat/_api.py +14 -0
- objectnat/_config.py +43 -0
- objectnat/_version.py +1 -0
- objectnat/methods/__init__.py +0 -0
- objectnat/methods/coverage_zones/__init__.py +3 -0
- objectnat/methods/coverage_zones/graph_coverage.py +105 -0
- objectnat/methods/coverage_zones/radius_voronoi_coverage.py +39 -0
- objectnat/methods/coverage_zones/stepped_coverage.py +136 -0
- objectnat/methods/isochrones/__init__.py +1 -0
- objectnat/methods/isochrones/isochrone_utils.py +167 -0
- objectnat/methods/isochrones/isochrones.py +282 -0
- objectnat/methods/noise/__init__.py +3 -0
- objectnat/methods/noise/noise_init_data.py +10 -0
- objectnat/methods/noise/noise_reduce.py +155 -0
- objectnat/methods/noise/noise_simulation.py +453 -0
- objectnat/methods/noise/noise_simulation_simplified.py +222 -0
- objectnat/methods/point_clustering/__init__.py +1 -0
- objectnat/methods/point_clustering/cluster_points_in_polygons.py +115 -0
- objectnat/methods/provision/__init__.py +1 -0
- objectnat/methods/provision/provision.py +213 -0
- objectnat/methods/provision/provision_exceptions.py +59 -0
- objectnat/methods/provision/provision_model.py +323 -0
- objectnat/methods/utils/__init__.py +1 -0
- objectnat/methods/utils/geom_utils.py +173 -0
- objectnat/methods/utils/graph_utils.py +306 -0
- objectnat/methods/utils/math_utils.py +32 -0
- objectnat/methods/visibility/__init__.py +6 -0
- objectnat/methods/visibility/visibility_analysis.py +485 -0
- objectnat-1.3.3.dist-info/METADATA +202 -0
- objectnat-1.3.3.dist-info/RECORD +33 -0
- objectnat-1.3.3.dist-info/WHEEL +4 -0
- objectnat-1.3.3.dist-info/licenses/LICENSE.txt +28 -0
|
@@ -0,0 +1,453 @@
|
|
|
1
|
+
import concurrent.futures
|
|
2
|
+
import math
|
|
3
|
+
import multiprocessing
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
import geopandas as gpd
|
|
7
|
+
import pandas as pd
|
|
8
|
+
from shapely import GEOSException
|
|
9
|
+
from shapely.geometry import GeometryCollection, MultiPolygon, Point, Polygon
|
|
10
|
+
from shapely.ops import polygonize, unary_union
|
|
11
|
+
from tqdm import tqdm
|
|
12
|
+
|
|
13
|
+
from objectnat import config
|
|
14
|
+
from objectnat.methods.noise.noise_reduce import dist_to_target_db, green_noise_reduce_db
|
|
15
|
+
from objectnat.methods.noise.noise_simulation_simplified import _eval_donuts_gdf
|
|
16
|
+
from objectnat.methods.utils.geom_utils import (
|
|
17
|
+
gdf_to_circle_zones_from_point,
|
|
18
|
+
get_point_from_a_thorough_b,
|
|
19
|
+
polygons_to_multilinestring,
|
|
20
|
+
)
|
|
21
|
+
from objectnat.methods.visibility.visibility_analysis import get_visibility_accurate
|
|
22
|
+
|
|
23
|
+
logger = config.logger
|
|
24
|
+
|
|
25
|
+
MAX_DB_VALUE = 194
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def simulate_noise(
|
|
29
|
+
source_points: gpd.GeoDataFrame,
|
|
30
|
+
obstacles: gpd.GeoDataFrame,
|
|
31
|
+
source_noise_db: float = None,
|
|
32
|
+
geometric_mean_freq_hz: float = None,
|
|
33
|
+
**kwargs,
|
|
34
|
+
):
|
|
35
|
+
"""
|
|
36
|
+
Simulates noise propagation from a set of source points considering obstacles, trees, and environmental factors.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
source_points (gpd.GeoDataFrame):
|
|
40
|
+
A GeoDataFrame with one or more point geometries representing noise sources.
|
|
41
|
+
Optionally, it can include 'source_noise_db' and 'geometric_mean_freq_hz' columns for per-point simulation.
|
|
42
|
+
obstacles (gpd.GeoDataFrame):
|
|
43
|
+
A GeoDataFrame representing obstacles in the environment. If a column with sound absorption coefficients
|
|
44
|
+
is present, its name should be provided in the `absorb_ratio_column` argument.
|
|
45
|
+
Missing values will be filled with the `standart_absorb_ratio`.
|
|
46
|
+
source_noise_db (float, optional):
|
|
47
|
+
Default noise level (dB) to use if not specified per-point. Decibels are logarithmic units used to measure
|
|
48
|
+
sound intensity. A value of 20 dB represents a barely audible whisper, while 140 dB is comparable to the
|
|
49
|
+
noise of jet engines.
|
|
50
|
+
geometric_mean_freq_hz (float, optional):
|
|
51
|
+
Default frequency (Hz) to use if not specified per-point. This parameter influences the sound wave's
|
|
52
|
+
propagation and scattering in the presence of trees. Lower frequencies travel longer distances than higher
|
|
53
|
+
frequencies. It's recommended to use values between 63 Hz and 8000 Hz; values outside this range will be
|
|
54
|
+
clamped to the nearest boundary for the sound absorption coefficient calculation.
|
|
55
|
+
|
|
56
|
+
Keyword Args:
|
|
57
|
+
absorb_ratio_column (str, optional): The name of the column in the `obstacles` GeoDataFrame that contains the
|
|
58
|
+
sound absorption coefficients for each obstacle. Default is None. If not specified, all obstacles will have
|
|
59
|
+
the `standart_absorb_ratio`.
|
|
60
|
+
standart_absorb_ratio (float, optional): The default sound absorption coefficient to use for obstacles without
|
|
61
|
+
specified values in the `absorb_ratio_column`. Default is 0.05, which is a typical value for concrete walls.
|
|
62
|
+
trees (gpd.GeoDataFrame, optional): A GeoDataFrame containing trees or dense vegetation along the sound wave's
|
|
63
|
+
path. Trees will scatter and absorb sound waves.
|
|
64
|
+
tree_resolution (int, optional): A resolution parameter for simulating tree interactions with sound waves.
|
|
65
|
+
Recommended values are between 2 and 16, with higher values providing more accurate simulation results.
|
|
66
|
+
air_temperature (float, optional): The air temperature in degrees Celsius. The recommended range is from 0 to
|
|
67
|
+
30 degrees Celsius, as temperatures outside this range will be clipped. Temperature affects the sound
|
|
68
|
+
propagation in the air.
|
|
69
|
+
target_noise_db (float, optional): The target noise level (in dB) for the simulation. Default is 40 dB.
|
|
70
|
+
Lower values may not be relevant for further analysis, as they are near the threshold of human hearing.
|
|
71
|
+
db_sim_step (float, optional): The step size in decibels for the noise simulation. Default is 1. For more
|
|
72
|
+
precise analysis, this can be adjusted. If the difference between `source_noise_db` and `target_noise_db`
|
|
73
|
+
is not divisible by the step size, the function will raise an error.
|
|
74
|
+
reflection_n (int, optional): The maximum number of reflections (bounces) to simulate for each sound wave.
|
|
75
|
+
Recommended values are between 1 and 3. Larger values will result in longer simulation times.
|
|
76
|
+
dead_area_r (float, optional): A debugging parameter that defines the radius of the "dead zone" for reflections.
|
|
77
|
+
Points within this area will not generate reflections. This is useful to prevent the algorithm from getting
|
|
78
|
+
stuck in corners or along building walls.
|
|
79
|
+
use_parallel (bool, optional): Whether to use ProcessPool for task distribution or not. Default is True.
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
gpd.GeoDataFrame: A GeoDataFrame containing the noise simulation results, including noise levels and geometries
|
|
83
|
+
of the affected areas. Each point's simulation results will be merged into a single GeoDataFrame.
|
|
84
|
+
"""
|
|
85
|
+
# Obstacles args
|
|
86
|
+
absorb_ratio_column = kwargs.get("absorb_ratio_column", None)
|
|
87
|
+
standart_absorb_ratio = kwargs.get("standart_absorb_ratio", 0.05)
|
|
88
|
+
|
|
89
|
+
# Trees args
|
|
90
|
+
trees = kwargs.get("trees", None)
|
|
91
|
+
tree_res = kwargs.get("tree_resolution", 4)
|
|
92
|
+
|
|
93
|
+
# Simulation conditions
|
|
94
|
+
air_temperature = kwargs.get("air_temperature", 20)
|
|
95
|
+
target_noise_db = kwargs.get("target_noise_db", 40)
|
|
96
|
+
|
|
97
|
+
# Simulation params
|
|
98
|
+
db_sim_step = kwargs.get("db_sim_step", 1)
|
|
99
|
+
reflection_n = kwargs.get("reflection_n", 3)
|
|
100
|
+
dead_area_r = kwargs.get("dead_area_r", 5)
|
|
101
|
+
|
|
102
|
+
# Use paralleling
|
|
103
|
+
use_parallel = kwargs.get("use_parallel", True)
|
|
104
|
+
|
|
105
|
+
# Validate optional columns or default values
|
|
106
|
+
use_column_db = False
|
|
107
|
+
if "source_noise_db" in source_points.columns:
|
|
108
|
+
if (source_points["source_noise_db"] > MAX_DB_VALUE).any():
|
|
109
|
+
raise ValueError(
|
|
110
|
+
f"One or more values in 'source_noise_db' column exceed the physical limit of {MAX_DB_VALUE} dB."
|
|
111
|
+
)
|
|
112
|
+
if source_points["source_noise_db"].isnull().any():
|
|
113
|
+
raise ValueError(f"Column 'source_noise_db' contains missing (NaN) values")
|
|
114
|
+
use_column_db = True
|
|
115
|
+
|
|
116
|
+
use_column_freq = False
|
|
117
|
+
if "geometric_mean_freq_hz" in source_points.columns:
|
|
118
|
+
if source_points["geometric_mean_freq_hz"].isnull().any():
|
|
119
|
+
raise ValueError(f"Column 'geometric_mean_freq_hz' contains missing (NaN) values")
|
|
120
|
+
use_column_freq = True
|
|
121
|
+
|
|
122
|
+
if not use_column_db:
|
|
123
|
+
if source_noise_db is None:
|
|
124
|
+
raise ValueError(
|
|
125
|
+
"Either `source_noise_db` must be provided or the `source_points` must contain a 'source_noise_db' column."
|
|
126
|
+
)
|
|
127
|
+
if source_noise_db > MAX_DB_VALUE:
|
|
128
|
+
raise ValueError(
|
|
129
|
+
f"source_noise_db ({source_noise_db} dB) exceeds the physical limit of {MAX_DB_VALUE} dB in air."
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
if not use_column_freq:
|
|
133
|
+
if geometric_mean_freq_hz is None:
|
|
134
|
+
raise ValueError(
|
|
135
|
+
"Either `geometric_mean_freq_hz` must be provided or the `source_points` must contain a 'geometric_mean_freq_hz' column."
|
|
136
|
+
)
|
|
137
|
+
if not use_column_db and not use_column_freq and len(source_points) > 1:
|
|
138
|
+
logger.warning(
|
|
139
|
+
"`source_noise_db` and `geometric_mean_freq_hz` will be used for all points. Per-point simulation parameters not found."
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
original_crs = source_points.crs
|
|
143
|
+
source_points = source_points.copy()
|
|
144
|
+
|
|
145
|
+
source_points = source_points.copy()
|
|
146
|
+
if len(obstacles) > 0:
|
|
147
|
+
obstacles = obstacles.copy()
|
|
148
|
+
local_crs = obstacles.estimate_utm_crs()
|
|
149
|
+
obstacles.to_crs(local_crs, inplace=True)
|
|
150
|
+
obstacles.geometry = obstacles.geometry.simplify(tolerance=1)
|
|
151
|
+
source_points.to_crs(local_crs, inplace=True)
|
|
152
|
+
else:
|
|
153
|
+
local_crs = source_points.estimate_utm_crs()
|
|
154
|
+
source_points.to_crs(local_crs, inplace=True)
|
|
155
|
+
source_points.reset_index(drop=True)
|
|
156
|
+
source_points.geometry = source_points.centroid
|
|
157
|
+
|
|
158
|
+
# Simplifying trees
|
|
159
|
+
if trees is not None:
|
|
160
|
+
trees = trees.copy()
|
|
161
|
+
trees.to_crs(local_crs, inplace=True)
|
|
162
|
+
trees.geometry = trees.geometry.simplify(tolerance=1)
|
|
163
|
+
else:
|
|
164
|
+
trees = gpd.GeoDataFrame()
|
|
165
|
+
|
|
166
|
+
if absorb_ratio_column is None:
|
|
167
|
+
obstacles["absorb_ratio"] = standart_absorb_ratio
|
|
168
|
+
else:
|
|
169
|
+
obstacles["absorb_ratio"] = obstacles[absorb_ratio_column].fillna(standart_absorb_ratio)
|
|
170
|
+
obstacles = obstacles[["absorb_ratio", "geometry"]]
|
|
171
|
+
|
|
172
|
+
# creating initial task and simulating for each point
|
|
173
|
+
task_queue = multiprocessing.Queue()
|
|
174
|
+
dead_area_dict = {}
|
|
175
|
+
for ind, row in source_points.iterrows():
|
|
176
|
+
source_point = row.geometry
|
|
177
|
+
local_db = row["source_noise_db"] if use_column_db else source_noise_db
|
|
178
|
+
local_freq = row["geometric_mean_freq_hz"] if use_column_freq else geometric_mean_freq_hz
|
|
179
|
+
|
|
180
|
+
# calculating layer dist and db values
|
|
181
|
+
dist_db = [(0, local_db)]
|
|
182
|
+
cur_db = local_db - db_sim_step
|
|
183
|
+
while cur_db > target_noise_db - db_sim_step:
|
|
184
|
+
if cur_db - db_sim_step < target_noise_db:
|
|
185
|
+
cur_db = target_noise_db
|
|
186
|
+
max_dist = dist_to_target_db(local_db, cur_db, local_freq, air_temperature)
|
|
187
|
+
dist_db.append((max_dist, cur_db))
|
|
188
|
+
cur_db -= db_sim_step
|
|
189
|
+
|
|
190
|
+
args = (source_point, obstacles, trees, 0, 0, dist_db)
|
|
191
|
+
kwargs = {
|
|
192
|
+
"reflection_n": reflection_n,
|
|
193
|
+
"geometric_mean_freq_hz": local_freq,
|
|
194
|
+
"tree_res": tree_res,
|
|
195
|
+
"min_db": target_noise_db,
|
|
196
|
+
"simulation_ind": ind,
|
|
197
|
+
}
|
|
198
|
+
task_queue.put((_noise_from_point_task, args, kwargs))
|
|
199
|
+
dead_area_dict[ind] = source_point.buffer(dead_area_r, resolution=2)
|
|
200
|
+
|
|
201
|
+
noise_gdf = _recursive_simulation_queue(
|
|
202
|
+
task_queue, dead_area_dict=dead_area_dict, dead_area_r=dead_area_r, use_parallel=use_parallel
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
noise_gdf = gpd.GeoDataFrame(pd.concat(noise_gdf, ignore_index=True), crs=local_crs)
|
|
206
|
+
polygons = gpd.GeoDataFrame(
|
|
207
|
+
geometry=list(polygonize(noise_gdf.geometry.apply(polygons_to_multilinestring).union_all())), crs=local_crs
|
|
208
|
+
)
|
|
209
|
+
polygons_points = polygons.copy()
|
|
210
|
+
polygons_points.geometry = polygons.representative_point()
|
|
211
|
+
sim_result = polygons_points.sjoin(noise_gdf, predicate="within").reset_index()
|
|
212
|
+
sim_result = sim_result.groupby("index").agg({"noise_level": "max"})
|
|
213
|
+
sim_result["geometry"] = polygons
|
|
214
|
+
sim_result = (
|
|
215
|
+
gpd.GeoDataFrame(sim_result, geometry="geometry", crs=local_crs).dissolve(by="noise_level").reset_index()
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
return sim_result.to_crs(original_crs)
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _noise_from_point_task(task, **kwargs) -> tuple[gpd.GeoDataFrame, list[tuple] | None]:
|
|
222
|
+
# Unpacking task
|
|
223
|
+
point_from, obstacles, trees_orig, passed_dist, deep, dist_db = task
|
|
224
|
+
|
|
225
|
+
def donuts_dist_values(dist_db, passed_dist, max_view_dist):
|
|
226
|
+
new_dist_db = dist_db + [(passed_dist, None), (max_view_dist + passed_dist, None)]
|
|
227
|
+
new_dist_db = sorted(new_dist_db, key=lambda x: x[0])
|
|
228
|
+
start = None
|
|
229
|
+
end = None
|
|
230
|
+
for i, (dist, db) in enumerate(new_dist_db[:-1]):
|
|
231
|
+
if db is None:
|
|
232
|
+
if start is None:
|
|
233
|
+
new_dist_db[i] = (dist, new_dist_db[i - 1][1])
|
|
234
|
+
start = i
|
|
235
|
+
else:
|
|
236
|
+
new_dist_db[i] = (dist, new_dist_db[i + 1][1])
|
|
237
|
+
end = i + 1
|
|
238
|
+
break
|
|
239
|
+
return [(dist - passed_dist, db) for dist, db in new_dist_db[start:end]]
|
|
240
|
+
|
|
241
|
+
max_dist = max(dist_db, key=lambda x: x[0])[0]
|
|
242
|
+
min_db = kwargs.get("min_db")
|
|
243
|
+
reflection_n = kwargs.get("reflection_n")
|
|
244
|
+
geometric_mean_freq_hz = kwargs.get("geometric_mean_freq_hz")
|
|
245
|
+
tree_res = kwargs.get("tree_res")
|
|
246
|
+
local_crs = obstacles.crs
|
|
247
|
+
dist = round(max_dist - passed_dist, 1)
|
|
248
|
+
|
|
249
|
+
obstacles = obstacles[obstacles.intersects(point_from.buffer(dist, resolution=8))]
|
|
250
|
+
|
|
251
|
+
if len(obstacles) == 0:
|
|
252
|
+
obstacles_union = Polygon()
|
|
253
|
+
else:
|
|
254
|
+
obstacles_union = obstacles.union_all()
|
|
255
|
+
|
|
256
|
+
vis_poly, max_view_dist = get_visibility_accurate(point_from, obstacles, dist, return_max_view_dist=True)
|
|
257
|
+
|
|
258
|
+
donuts_dist_values = donuts_dist_values(dist_db, passed_dist, max_view_dist)
|
|
259
|
+
|
|
260
|
+
allowed_geom_types = ["MultiPolygon", "Polygon"]
|
|
261
|
+
|
|
262
|
+
# Trees noise reduce
|
|
263
|
+
reduce_polygons = []
|
|
264
|
+
if len(trees_orig) > 0:
|
|
265
|
+
trees_orig = trees_orig[trees_orig.intersects(point_from.buffer(dist, resolution=8))]
|
|
266
|
+
if len(trees_orig) > 0:
|
|
267
|
+
try:
|
|
268
|
+
trees = gdf_to_circle_zones_from_point(trees_orig, point_from, dist, resolution=tree_res)
|
|
269
|
+
trees = trees.clip(vis_poly, keep_geom_type=True).explode(index_parts=False)
|
|
270
|
+
except TypeError:
|
|
271
|
+
trees = gpd.GeoDataFrame()
|
|
272
|
+
|
|
273
|
+
for _, row in trees.iterrows():
|
|
274
|
+
tree_geom = row.geometry
|
|
275
|
+
if tree_geom.area < 1:
|
|
276
|
+
continue
|
|
277
|
+
dist_to_centroid = tree_geom.centroid.distance(point_from)
|
|
278
|
+
|
|
279
|
+
points_with_angle = [
|
|
280
|
+
(
|
|
281
|
+
Point(pt),
|
|
282
|
+
round(abs(math.atan2(pt[1] - point_from.y, pt[0] - point_from.x)), 5),
|
|
283
|
+
Point(pt).distance(point_from),
|
|
284
|
+
)
|
|
285
|
+
for pt in tree_geom.exterior.coords
|
|
286
|
+
]
|
|
287
|
+
|
|
288
|
+
p0_1 = max(points_with_angle, key=lambda x: (x[1], x[2]))
|
|
289
|
+
p0_2 = min(points_with_angle, key=lambda x: (x[1], -x[2]))
|
|
290
|
+
delta_angle = 2 * math.pi + p0_1[1] - p0_2[1]
|
|
291
|
+
if delta_angle > math.pi:
|
|
292
|
+
delta_angle = 2 * math.pi - delta_angle
|
|
293
|
+
|
|
294
|
+
a = math.sqrt((dist**2) * (1 + (math.tan(delta_angle / 2) ** 2)))
|
|
295
|
+
p1 = get_point_from_a_thorough_b(point_from, p0_1[0], a)
|
|
296
|
+
p2 = get_point_from_a_thorough_b(point_from, p0_2[0], a)
|
|
297
|
+
red_polygon = unary_union([Polygon([p0_1[0], p1, p2, p0_2[0]]).intersection(vis_poly), tree_geom])
|
|
298
|
+
if isinstance(red_polygon, GeometryCollection):
|
|
299
|
+
red_polygon = max(
|
|
300
|
+
((poly, poly.area) for poly in red_polygon.geoms if isinstance(poly, (MultiPolygon, Polygon))),
|
|
301
|
+
key=lambda x: x[1],
|
|
302
|
+
)[0]
|
|
303
|
+
if isinstance(red_polygon, MultiPolygon):
|
|
304
|
+
red_polygon = red_polygon.buffer(0.1, resolution=1).buffer(-0.1, resolution=1)
|
|
305
|
+
if isinstance(red_polygon, MultiPolygon):
|
|
306
|
+
red_polygon = max(((poly, poly.area) for poly in red_polygon.geoms), key=lambda x: x[1])[0]
|
|
307
|
+
if isinstance(red_polygon, Polygon) and not red_polygon.is_empty:
|
|
308
|
+
red_polygon = Polygon(red_polygon.exterior)
|
|
309
|
+
r_tree_new = round(
|
|
310
|
+
tree_geom.area / (2 * dist_to_centroid * math.sin(abs(p0_1[1] - p0_2[1]) / 2)), 2
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
noise_reduce = int(round(green_noise_reduce_db(geometric_mean_freq_hz, r_tree_new)))
|
|
314
|
+
reduce_polygons.append((red_polygon, noise_reduce))
|
|
315
|
+
|
|
316
|
+
noise_from_point = _eval_donuts_gdf(point_from, donuts_dist_values, local_crs, vis_poly)
|
|
317
|
+
# intersect noise poly with noise reduce
|
|
318
|
+
if len(reduce_polygons) > 0:
|
|
319
|
+
reduce_polygons = gpd.GeoDataFrame(
|
|
320
|
+
reduce_polygons, columns=["geometry", "reduce"], geometry="geometry", crs=local_crs
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
all_lines = (
|
|
324
|
+
reduce_polygons.geometry.apply(polygons_to_multilinestring).tolist()
|
|
325
|
+
+ noise_from_point.geometry.apply(polygons_to_multilinestring).tolist()
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
cutted_polygons = gpd.GeoDataFrame(geometry=list(polygonize(unary_union(all_lines))), crs=local_crs)
|
|
329
|
+
|
|
330
|
+
cutted_polygons_points = cutted_polygons.copy()
|
|
331
|
+
cutted_polygons_points.geometry = cutted_polygons.representative_point()
|
|
332
|
+
|
|
333
|
+
joined = (
|
|
334
|
+
cutted_polygons_points.sjoin(noise_from_point, predicate="within", how="left")
|
|
335
|
+
.drop(columns="index_right")
|
|
336
|
+
.sjoin(reduce_polygons, predicate="within", how="left")
|
|
337
|
+
.drop(columns="index_right")
|
|
338
|
+
)
|
|
339
|
+
joined.geometry = cutted_polygons.geometry
|
|
340
|
+
joined = (
|
|
341
|
+
joined.reset_index().groupby("index").agg({"geometry": "first", "reduce": "sum", "noise_level": "first"})
|
|
342
|
+
)
|
|
343
|
+
joined = gpd.GeoDataFrame(joined, geometry="geometry", crs=local_crs)
|
|
344
|
+
noise_from_point = joined.copy()
|
|
345
|
+
|
|
346
|
+
noise_from_point = noise_from_point.dropna(subset=["noise_level"])
|
|
347
|
+
|
|
348
|
+
noise_from_point["reduce"] = noise_from_point["reduce"].fillna(0)
|
|
349
|
+
noise_from_point["noise_level"] = noise_from_point["noise_level"] - noise_from_point["reduce"]
|
|
350
|
+
else:
|
|
351
|
+
noise_from_point["reduce"] = 0
|
|
352
|
+
noise_from_point = noise_from_point[noise_from_point.geom_type.isin(allowed_geom_types)]
|
|
353
|
+
noise_from_point = noise_from_point[noise_from_point["noise_level"] >= min_db]
|
|
354
|
+
if deep == reflection_n:
|
|
355
|
+
return noise_from_point, None
|
|
356
|
+
|
|
357
|
+
if isinstance(vis_poly, Polygon):
|
|
358
|
+
vis_poly_points = [Point(coords) for coords in vis_poly.exterior.coords]
|
|
359
|
+
else:
|
|
360
|
+
vis_poly_points = [Point(coords) for geom in vis_poly.geoms for coords in geom.exterior.coords]
|
|
361
|
+
vis_poly_points = gpd.GeoDataFrame(geometry=vis_poly_points, crs=local_crs)
|
|
362
|
+
|
|
363
|
+
# Generating reflection points
|
|
364
|
+
vis_poly_points["point"] = vis_poly_points["geometry"].copy()
|
|
365
|
+
vis_poly_points.geometry = vis_poly_points.geometry.buffer(1, resolution=1)
|
|
366
|
+
vis_poly_points = vis_poly_points.sjoin(obstacles, predicate="intersects").drop(columns="index_right")
|
|
367
|
+
vis_poly_points = vis_poly_points[~vis_poly_points.index.duplicated(keep="first")]
|
|
368
|
+
vis_poly_points.dropna(subset=["absorb_ratio"], inplace=True)
|
|
369
|
+
nearby_poly = point_from.buffer(1.1, resolution=2)
|
|
370
|
+
try:
|
|
371
|
+
vis_poly_points.geometry = (
|
|
372
|
+
vis_poly_points.difference(vis_poly).difference(obstacles_union).difference(nearby_poly)
|
|
373
|
+
)
|
|
374
|
+
except GEOSException:
|
|
375
|
+
return noise_from_point, None
|
|
376
|
+
vis_poly_points = vis_poly_points[~vis_poly_points.is_empty]
|
|
377
|
+
vis_poly_points = vis_poly_points[vis_poly_points.area >= 0.01]
|
|
378
|
+
vis_poly_points["geometry"] = vis_poly_points["point"]
|
|
379
|
+
vis_poly_points["dist"] = vis_poly_points.distance(point_from)
|
|
380
|
+
vis_poly_points = vis_poly_points[vis_poly_points["dist"] < max_dist - 5]
|
|
381
|
+
vis_poly_points = vis_poly_points.sjoin(noise_from_point, predicate="intersects", how="left")
|
|
382
|
+
|
|
383
|
+
if len(vis_poly_points) == 0:
|
|
384
|
+
return noise_from_point, None
|
|
385
|
+
|
|
386
|
+
new_obs = pd.concat([obstacles, gpd.GeoDataFrame(geometry=[vis_poly], crs=local_crs)], ignore_index=True)
|
|
387
|
+
|
|
388
|
+
# Creating new reflection tasks
|
|
389
|
+
new_tasks = []
|
|
390
|
+
for _, loc in vis_poly_points.iterrows():
|
|
391
|
+
if not isinstance(loc.geometry, Point):
|
|
392
|
+
continue
|
|
393
|
+
new_passed_dist = round(loc.dist + passed_dist, 2)
|
|
394
|
+
dist_last = max_dist - new_passed_dist
|
|
395
|
+
if dist_last > 1:
|
|
396
|
+
db_change = loc["reduce"]
|
|
397
|
+
dist_change = loc["absorb_ratio"] * dist_last
|
|
398
|
+
new_dist_db = [(dist - dist_change, db - db_change) for dist, db in dist_db]
|
|
399
|
+
task_obs = new_obs.copy()
|
|
400
|
+
task_obs.geometry = task_obs.difference(loc.geometry.buffer(1, resolution=1))
|
|
401
|
+
new_tasks.append(
|
|
402
|
+
(
|
|
403
|
+
_noise_from_point_task,
|
|
404
|
+
(loc.geometry, task_obs, trees_orig, new_passed_dist, deep + 1, new_dist_db),
|
|
405
|
+
kwargs,
|
|
406
|
+
)
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
return noise_from_point, new_tasks
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
def _recursive_simulation_queue(
|
|
413
|
+
task_queue: multiprocessing.Queue, dead_area_dict: dict, dead_area_r: int, use_parallel: bool
|
|
414
|
+
):
|
|
415
|
+
results = []
|
|
416
|
+
total_tasks = task_queue.qsize()
|
|
417
|
+
|
|
418
|
+
with tqdm(total=total_tasks, desc="Simulating noise") as pbar:
|
|
419
|
+
if use_parallel:
|
|
420
|
+
executor_class = concurrent.futures.ProcessPoolExecutor()
|
|
421
|
+
else:
|
|
422
|
+
executor_class = concurrent.futures.ThreadPoolExecutor()
|
|
423
|
+
with executor_class as executor:
|
|
424
|
+
future_to_task = {}
|
|
425
|
+
while True:
|
|
426
|
+
while not task_queue.empty() and len(future_to_task) < executor._max_workers:
|
|
427
|
+
func, task, kwargs = task_queue.get_nowait()
|
|
428
|
+
future = executor.submit(func, task, **kwargs)
|
|
429
|
+
future_to_task[future] = kwargs["simulation_ind"]
|
|
430
|
+
done, _ = concurrent.futures.wait(future_to_task.keys(), return_when=concurrent.futures.FIRST_COMPLETED)
|
|
431
|
+
for future in done:
|
|
432
|
+
simulation_ind = future_to_task.pop(future)
|
|
433
|
+
result, new_tasks = future.result()
|
|
434
|
+
if new_tasks:
|
|
435
|
+
new_tasks_n = 0
|
|
436
|
+
local_dead_area = dead_area_dict.get(simulation_ind)
|
|
437
|
+
new_dead_area_points = [local_dead_area]
|
|
438
|
+
for func, new_task, new_kwargs in new_tasks:
|
|
439
|
+
new_point = new_task[0]
|
|
440
|
+
if not local_dead_area.covers(new_point):
|
|
441
|
+
task_queue.put((func, new_task, new_kwargs))
|
|
442
|
+
new_dead_area_points.append(new_point.buffer(dead_area_r, resolution=2))
|
|
443
|
+
new_tasks_n += 1
|
|
444
|
+
dead_area_dict[simulation_ind] = unary_union(new_dead_area_points)
|
|
445
|
+
total_tasks += new_tasks_n
|
|
446
|
+
pbar.total = total_tasks
|
|
447
|
+
pbar.refresh()
|
|
448
|
+
results.append(result)
|
|
449
|
+
pbar.update(1)
|
|
450
|
+
time.sleep(0.01)
|
|
451
|
+
if not future_to_task and task_queue.empty():
|
|
452
|
+
break
|
|
453
|
+
return results
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
# simplified version
|
|
2
|
+
import geopandas as gpd
|
|
3
|
+
import pandas as pd
|
|
4
|
+
from shapely.ops import polygonize, unary_union
|
|
5
|
+
from tqdm.auto import tqdm
|
|
6
|
+
|
|
7
|
+
from objectnat.methods.noise.noise_reduce import dist_to_target_db
|
|
8
|
+
from objectnat.methods.utils.geom_utils import (
|
|
9
|
+
distribute_points_on_linestrings,
|
|
10
|
+
distribute_points_on_polygons,
|
|
11
|
+
polygons_to_multilinestring,
|
|
12
|
+
)
|
|
13
|
+
from objectnat.methods.visibility.visibility_analysis import get_visibility_accurate
|
|
14
|
+
|
|
15
|
+
MAX_DB_VALUE = 194
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def calculate_simplified_noise_frame(
|
|
19
|
+
noise_sources: gpd.GeoDataFrame, obstacles: gpd.GeoDataFrame, air_temperature, **kwargs
|
|
20
|
+
) -> gpd.GeoDataFrame:
|
|
21
|
+
"""
|
|
22
|
+
Calculates a simplified environmental noise frame using static noise source geometries without simulating
|
|
23
|
+
full sound wave propagation or reflections.
|
|
24
|
+
|
|
25
|
+
This function provides a fast approximation of noise dispersion from a variety of source geometries, including
|
|
26
|
+
points (e.g., traffic noise measurement points), lines (e.g., roads or railways), and polygons (e.g., industrial
|
|
27
|
+
zones or buildings). Instead of simulating detailed wave interactions and reflections, it constructs an
|
|
28
|
+
envelope of potential noise exposure by buffering the source geometry and applying simplified decay formulas
|
|
29
|
+
based on sound power, frequency and temperature.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
noise_sources (gpd.GeoDataFrame): A GeoDataFrame containing geometries of noise sources (Point, LineString,
|
|
33
|
+
or Polygon). Each feature must have the following two columns:
|
|
34
|
+
|
|
35
|
+
- 'source_noise_db': Initial sound level at the source, in decibels (dB).
|
|
36
|
+
- 'geometric_mean_freq_hz': Characteristic sound frequency (Hz) used to model distance-based attenuation.
|
|
37
|
+
|
|
38
|
+
Values in 'source_noise_db' must not exceed the physical maximum of 194 dB. Missing or NaN values in
|
|
39
|
+
required fields will raise an error.
|
|
40
|
+
|
|
41
|
+
obstacles (gpd.GeoDataFrame): A GeoDataFrame representing physical obstructions in the environment
|
|
42
|
+
(e.g., buildings, walls, terrain). These are used to build visibility masks that affect where sound can
|
|
43
|
+
propagate. Geometry will be simplified for performance using a default tolerance of 1 unit.
|
|
44
|
+
|
|
45
|
+
air_temperature (float): The ambient air temperature in degrees Celsius. This value influences the
|
|
46
|
+
attenuation model of sound in the atmosphere. Temperatures significantly outside the typical 0–30°C
|
|
47
|
+
range may lead to inaccurate results.
|
|
48
|
+
|
|
49
|
+
Keyword Args:
|
|
50
|
+
target_noise_db (float, optional): The minimum sound level threshold (in dB) to be modeled. Any value below
|
|
51
|
+
this threshold is considered insignificant and will be excluded from the resulting noise frame.
|
|
52
|
+
Default is 40 dB.
|
|
53
|
+
db_sim_step (float, optional): The simulation step size (in dB) used to discretize sound levels into
|
|
54
|
+
spatial layers. Default is 5. Smaller values produce more detailed output but increase computation time.
|
|
55
|
+
linestring_point_radius (float, optional): The spacing radius (in meters) used when converting LineString
|
|
56
|
+
geometries into distributed point sources for simulation. Default is 30. Reducing this value improves
|
|
57
|
+
detail along long lines.
|
|
58
|
+
polygon_point_radius (float, optional): The point spacing (in meters) for distributing sources within
|
|
59
|
+
Polygon geometries. Default is 15. Points are sampled across the polygon’s surface and perimeter to
|
|
60
|
+
represent the full sound-emitting area.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
gpd.GeoDataFrame: A GeoDataFrame representing simplified noise distribution areas. The output geometries
|
|
64
|
+
are polygons where each polygon is associated with the maximum sound level (in dB) present in that area,
|
|
65
|
+
as derived from overlapping source zones. The resulting data is dissolved by noise level and returned in
|
|
66
|
+
the original coordinate reference system (CRS) of the input sources.
|
|
67
|
+
|
|
68
|
+
Notes:
|
|
69
|
+
- The function does not model reflections or complex diffraction effects. It uses straight-line
|
|
70
|
+
visibility (line-of-sight) and a layered distance-decay approach for rapid estimation.
|
|
71
|
+
- Obstacles are used for visibility masking only, not as reflectors or absorbers.
|
|
72
|
+
- Output resolution and accuracy depend heavily on the geometry type and point distribution settings.
|
|
73
|
+
- Results are useful for quick noise mapping or for generating initial noise envelopes prior to more
|
|
74
|
+
detailed simulations.
|
|
75
|
+
"""
|
|
76
|
+
target_noise_db = kwargs.get("target_noise_db", 40)
|
|
77
|
+
db_sim_step = kwargs.get("db_sim_step", 5)
|
|
78
|
+
linestring_point_radius = kwargs.get("linestring_point_radius", 30)
|
|
79
|
+
polygon_point_radius = kwargs.get("polygon_point_radius", 15)
|
|
80
|
+
|
|
81
|
+
required_columns = ["source_noise_db", "geometric_mean_freq_hz"]
|
|
82
|
+
for col in required_columns:
|
|
83
|
+
if col not in noise_sources.columns:
|
|
84
|
+
raise ValueError(f"'{col}' column is missing in provided GeoDataFrame")
|
|
85
|
+
if noise_sources[col].isnull().any():
|
|
86
|
+
raise ValueError(f"Column '{col}' contains missing (NaN) values")
|
|
87
|
+
if (noise_sources["source_noise_db"] > MAX_DB_VALUE).any():
|
|
88
|
+
raise ValueError(
|
|
89
|
+
f"One or more values in 'source_noise_db' column exceed the physical limit of {MAX_DB_VALUE} dB."
|
|
90
|
+
)
|
|
91
|
+
original_crs = noise_sources.crs
|
|
92
|
+
if len(obstacles) > 0:
|
|
93
|
+
obstacles = obstacles.copy()
|
|
94
|
+
obstacles.geometry = obstacles.geometry.simplify(tolerance=1)
|
|
95
|
+
local_crs = obstacles.estimate_utm_crs()
|
|
96
|
+
obstacles.to_crs(local_crs, inplace=True)
|
|
97
|
+
noise_sources.to_crs(local_crs, inplace=True)
|
|
98
|
+
else:
|
|
99
|
+
local_crs = noise_sources.estimate_utm_crs()
|
|
100
|
+
noise_sources.to_crs(local_crs, inplace=True)
|
|
101
|
+
noise_sources.reset_index(drop=True)
|
|
102
|
+
|
|
103
|
+
noise_sources = noise_sources.explode(ignore_index=True)
|
|
104
|
+
noise_sources["geom_type"] = noise_sources.geom_type
|
|
105
|
+
|
|
106
|
+
grouped_sources = noise_sources.groupby(by=["source_noise_db", "geometric_mean_freq_hz", "geom_type"])
|
|
107
|
+
|
|
108
|
+
frame_result = []
|
|
109
|
+
total_tasks = 0
|
|
110
|
+
with tqdm(total=total_tasks, desc="Simulating noise") as pbar:
|
|
111
|
+
for (source_db, freq_hz, geom_type), group_gdf in grouped_sources:
|
|
112
|
+
# calculating layer dist and db values
|
|
113
|
+
dist_db = [(0, source_db)]
|
|
114
|
+
cur_db = source_db - db_sim_step
|
|
115
|
+
max_dist = 0
|
|
116
|
+
while cur_db > target_noise_db - db_sim_step:
|
|
117
|
+
if cur_db - db_sim_step < target_noise_db:
|
|
118
|
+
cur_db = target_noise_db
|
|
119
|
+
max_dist = dist_to_target_db(source_db, cur_db, freq_hz, air_temperature)
|
|
120
|
+
dist_db.append((max_dist, cur_db))
|
|
121
|
+
cur_db -= db_sim_step
|
|
122
|
+
|
|
123
|
+
# increasing max_dist for extra view
|
|
124
|
+
max_dist = max_dist * 1.2
|
|
125
|
+
|
|
126
|
+
if geom_type == "Point":
|
|
127
|
+
total_tasks += len(group_gdf)
|
|
128
|
+
pbar.total = total_tasks
|
|
129
|
+
pbar.refresh()
|
|
130
|
+
for _, row in group_gdf.iterrows():
|
|
131
|
+
point_from = row.geometry
|
|
132
|
+
point_buffer = point_from.buffer(max_dist, resolution=16)
|
|
133
|
+
local_obstacles = obstacles[obstacles.intersects(point_buffer)]
|
|
134
|
+
vis_poly_gdf = get_visibility_accurate(
|
|
135
|
+
gpd.GeoDataFrame(geometry=[point_from], crs=local_crs),
|
|
136
|
+
obstacles=local_obstacles,
|
|
137
|
+
view_distance=max_dist,
|
|
138
|
+
)
|
|
139
|
+
if len(vis_poly_gdf) > 0:
|
|
140
|
+
noise_from_feature = _eval_donuts_gdf(
|
|
141
|
+
point_from, dist_db, local_crs, vis_poly_gdf.iloc[0].geometry
|
|
142
|
+
)
|
|
143
|
+
frame_result.append(noise_from_feature)
|
|
144
|
+
pbar.update(1)
|
|
145
|
+
|
|
146
|
+
elif geom_type == "LineString":
|
|
147
|
+
layer_points = distribute_points_on_linestrings(
|
|
148
|
+
group_gdf, radius=linestring_point_radius, lloyd_relax_n=1
|
|
149
|
+
)
|
|
150
|
+
total_tasks += len(layer_points)
|
|
151
|
+
pbar.total = total_tasks
|
|
152
|
+
pbar.refresh()
|
|
153
|
+
noise_from_feature = _process_lines_or_polygons(
|
|
154
|
+
group_gdf, max_dist, obstacles, layer_points, dist_db, local_crs, pbar
|
|
155
|
+
)
|
|
156
|
+
frame_result.append(noise_from_feature)
|
|
157
|
+
elif geom_type == "Polygon":
|
|
158
|
+
group_gdf.geometry = group_gdf.buffer(0.1, resolution=2)
|
|
159
|
+
layer_points = distribute_points_on_polygons(
|
|
160
|
+
group_gdf, only_exterior=False, radius=polygon_point_radius, lloyd_relax_n=1
|
|
161
|
+
)
|
|
162
|
+
total_tasks += len(layer_points)
|
|
163
|
+
pbar.total = total_tasks
|
|
164
|
+
pbar.refresh()
|
|
165
|
+
noise_from_feature = _process_lines_or_polygons(
|
|
166
|
+
group_gdf, max_dist, obstacles, layer_points, dist_db, local_crs, pbar
|
|
167
|
+
)
|
|
168
|
+
frame_result.append(noise_from_feature)
|
|
169
|
+
else:
|
|
170
|
+
pass
|
|
171
|
+
|
|
172
|
+
noise_gdf = gpd.GeoDataFrame(pd.concat(frame_result, ignore_index=True), crs=local_crs)
|
|
173
|
+
polygons = gpd.GeoDataFrame(
|
|
174
|
+
geometry=list(polygonize(noise_gdf.geometry.apply(polygons_to_multilinestring).union_all())), crs=local_crs
|
|
175
|
+
)
|
|
176
|
+
polygons_points = polygons.copy()
|
|
177
|
+
polygons_points.geometry = polygons.representative_point()
|
|
178
|
+
sim_result = polygons_points.sjoin(noise_gdf, predicate="within").reset_index()
|
|
179
|
+
sim_result = sim_result.groupby("index").agg({"noise_level": "max"})
|
|
180
|
+
sim_result["geometry"] = polygons
|
|
181
|
+
sim_result = (
|
|
182
|
+
gpd.GeoDataFrame(sim_result, geometry="geometry", crs=local_crs).dissolve(by="noise_level").reset_index()
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
return sim_result.to_crs(original_crs)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _process_lines_or_polygons(
|
|
189
|
+
group_gdf, max_dist, obstacles, layer_points, dist_db, local_crs, pbar
|
|
190
|
+
) -> gpd.GeoDataFrame:
|
|
191
|
+
features_vision_polys = []
|
|
192
|
+
layer_buffer = group_gdf.buffer(max_dist, resolution=16).union_all()
|
|
193
|
+
local_obstacles = obstacles[obstacles.intersects(layer_buffer)]
|
|
194
|
+
for _, row in layer_points.iterrows():
|
|
195
|
+
point_from = row.geometry
|
|
196
|
+
vis_poly_gdf = get_visibility_accurate(
|
|
197
|
+
gpd.GeoDataFrame(geometry=[point_from], crs=local_crs),
|
|
198
|
+
obstacles=local_obstacles,
|
|
199
|
+
view_distance=max_dist,
|
|
200
|
+
)
|
|
201
|
+
if len(vis_poly_gdf) > 0:
|
|
202
|
+
features_vision_polys.append(vis_poly_gdf.iloc[0].geometry)
|
|
203
|
+
pbar.update(1)
|
|
204
|
+
features_vision_polys = unary_union(features_vision_polys)
|
|
205
|
+
return _eval_donuts_gdf(group_gdf.union_all(), dist_db, local_crs, features_vision_polys)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def _eval_donuts_gdf(initial_geometry, dist_db, local_crs, clip_poly) -> gpd.GeoDataFrame:
|
|
209
|
+
donuts = []
|
|
210
|
+
don_values = []
|
|
211
|
+
to_cut_off = initial_geometry
|
|
212
|
+
for i in range(len(dist_db[:-1])):
|
|
213
|
+
cur_buffer = initial_geometry.buffer(dist_db[i + 1][0])
|
|
214
|
+
donuts.append(cur_buffer.difference(to_cut_off))
|
|
215
|
+
don_values.append(dist_db[i][1])
|
|
216
|
+
to_cut_off = cur_buffer
|
|
217
|
+
noise_from_feature = (
|
|
218
|
+
gpd.GeoDataFrame(geometry=donuts, data={"noise_level": don_values}, crs=local_crs)
|
|
219
|
+
.clip(clip_poly, keep_geom_type=True)
|
|
220
|
+
.explode(ignore_index=True)
|
|
221
|
+
)
|
|
222
|
+
return noise_from_feature
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .cluster_points_in_polygons import get_clusters_polygon
|