BERATools 0.2.2__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. beratools/__init__.py +8 -3
  2. beratools/core/{algo_footprint_rel.py → algo_canopy_footprint_exp.py} +176 -139
  3. beratools/core/algo_centerline.py +61 -77
  4. beratools/core/algo_common.py +48 -57
  5. beratools/core/algo_cost.py +18 -25
  6. beratools/core/algo_dijkstra.py +37 -45
  7. beratools/core/algo_line_grouping.py +100 -100
  8. beratools/core/algo_merge_lines.py +40 -8
  9. beratools/core/algo_split_with_lines.py +289 -304
  10. beratools/core/algo_vertex_optimization.py +25 -46
  11. beratools/core/canopy_threshold_relative.py +755 -0
  12. beratools/core/constants.py +8 -9
  13. beratools/{tools → core}/line_footprint_functions.py +411 -258
  14. beratools/core/logger.py +18 -2
  15. beratools/core/tool_base.py +17 -75
  16. beratools/gui/assets/BERALogo.ico +0 -0
  17. beratools/gui/assets/BERA_Splash.gif +0 -0
  18. beratools/gui/assets/BERA_WizardImage.png +0 -0
  19. beratools/gui/assets/beratools.json +475 -2171
  20. beratools/gui/bt_data.py +585 -234
  21. beratools/gui/bt_gui_main.py +129 -91
  22. beratools/gui/main.py +4 -7
  23. beratools/gui/tool_widgets.py +530 -354
  24. beratools/tools/__init__.py +0 -7
  25. beratools/tools/{line_footprint_absolute.py → canopy_footprint_absolute.py} +81 -56
  26. beratools/tools/canopy_footprint_exp.py +113 -0
  27. beratools/tools/centerline.py +30 -37
  28. beratools/tools/check_seed_line.py +127 -0
  29. beratools/tools/common.py +65 -586
  30. beratools/tools/{line_footprint_fixed.py → ground_footprint.py} +140 -117
  31. beratools/tools/line_footprint_relative.py +64 -35
  32. beratools/tools/tool_template.py +48 -40
  33. beratools/tools/vertex_optimization.py +20 -34
  34. beratools/utility/env_checks.py +53 -0
  35. beratools/utility/spatial_common.py +210 -0
  36. beratools/utility/tool_args.py +138 -0
  37. beratools-0.2.4.dist-info/METADATA +134 -0
  38. beratools-0.2.4.dist-info/RECORD +50 -0
  39. {beratools-0.2.2.dist-info → beratools-0.2.4.dist-info}/WHEEL +1 -1
  40. beratools-0.2.4.dist-info/entry_points.txt +3 -0
  41. beratools-0.2.4.dist-info/licenses/LICENSE +674 -0
  42. beratools/core/algo_tiler.py +0 -428
  43. beratools/gui/__init__.py +0 -11
  44. beratools/gui/batch_processing_dlg.py +0 -513
  45. beratools/gui/map_window.py +0 -162
  46. beratools/tools/Beratools_r_script.r +0 -1120
  47. beratools/tools/Ht_metrics.py +0 -116
  48. beratools/tools/batch_processing.py +0 -136
  49. beratools/tools/canopy_threshold_relative.py +0 -672
  50. beratools/tools/canopycostraster.py +0 -222
  51. beratools/tools/fl_regen_csf.py +0 -428
  52. beratools/tools/forest_line_attributes.py +0 -408
  53. beratools/tools/line_grouping.py +0 -45
  54. beratools/tools/ln_relative_metrics.py +0 -615
  55. beratools/tools/r_cal_lpi_elai.r +0 -25
  56. beratools/tools/r_generate_pd_focalraster.r +0 -101
  57. beratools/tools/r_interface.py +0 -80
  58. beratools/tools/r_point_density.r +0 -9
  59. beratools/tools/rpy_chm2trees.py +0 -86
  60. beratools/tools/rpy_dsm_chm_by.py +0 -81
  61. beratools/tools/rpy_dtm_by.py +0 -63
  62. beratools/tools/rpy_find_cellsize.py +0 -43
  63. beratools/tools/rpy_gnd_csf.py +0 -74
  64. beratools/tools/rpy_hummock_hollow.py +0 -85
  65. beratools/tools/rpy_hummock_hollow_raster.py +0 -71
  66. beratools/tools/rpy_las_info.py +0 -51
  67. beratools/tools/rpy_laz2las.py +0 -40
  68. beratools/tools/rpy_lpi_elai_lascat.py +0 -466
  69. beratools/tools/rpy_normalized_lidar_by.py +0 -56
  70. beratools/tools/rpy_percent_above_dbh.py +0 -80
  71. beratools/tools/rpy_points2trees.py +0 -88
  72. beratools/tools/rpy_vegcoverage.py +0 -94
  73. beratools/tools/tiler.py +0 -48
  74. beratools/tools/zonal_threshold.py +0 -144
  75. beratools-0.2.2.dist-info/METADATA +0 -108
  76. beratools-0.2.2.dist-info/RECORD +0 -74
  77. beratools-0.2.2.dist-info/entry_points.txt +0 -2
  78. beratools-0.2.2.dist-info/licenses/LICENSE +0 -22
@@ -0,0 +1,755 @@
1
+ import argparse
2
+ import json
3
+ import math
4
+ import os.path
5
+ import sys
6
+ import time
7
+ from multiprocessing.pool import Pool
8
+ from pathlib import Path
9
+
10
+ import geopandas as gpd
11
+ import numpy as np
12
+ import pandas as pd
13
+ import shapely
14
+
15
+ from beratools.core.constants import *
16
+ from beratools.tools.common import *
17
+ from beratools.utility.spatial_common import *
18
+ from beratools.core.tool_base import parallel_mode
19
+
20
+
21
+ class OperationCancelledException(Exception):
22
+ pass
23
+
24
+
25
+ def main_canopy_threshold_relative(
26
+ in_line: str,
27
+ in_chm: str,
28
+ canopy_percentile: int,
29
+ canopy_thresh_percentage: int,
30
+ full_step: bool,
31
+ processes:int,
32
+ verbose: bool,
33
+ out_DynCenterline: str=None, #for Test tool only
34
+ )-> str | None:
35
+ """
36
+ This is a function finding approximate surrounding forest canopy height
37
+ and surrounding forest edge distance from input CHM
38
+ Args:
39
+ in_line: Path like string
40
+ in_chm: Path like string
41
+ canopy_percentile: Percentile as integer range 1-100
42
+ canopy_thresh_percentage: Percentage as integer range 1-100
43
+
44
+ Returns:
45
+ Path like string of the saved centerlines with extra attributes
46
+ """
47
+ file_path, in_file_name = os.path.split(Path(in_line))
48
+ out_file = os.path.join(Path(file_path), "DynCanTh_" + in_file_name)
49
+ in_file, layer = decode_file_layer(in_line)
50
+ out_cl_file, out_layer = decode_file_layer(out_file)
51
+ line_seg = gpd.GeoDataFrame.from_file(in_file, layer=layer)
52
+ _,processes=parallel_mode(processes)
53
+ # check coordinate systems between line and raster features
54
+ # with rasterio.open(in_chm) as in_raster:
55
+ if compare_crs(vector_crs(in_file), raster_crs(in_chm)):
56
+ pass
57
+ else:
58
+ print("Line and raster spatial references are not same, please check.")
59
+ exit()
60
+
61
+ # Check the canopy threshold percent in 0-100 range. If it is not, 50% will be applied
62
+ if not 100 >= int(canopy_percentile) > 0:
63
+ canopy_percentile = 50
64
+
65
+ # Check the Dynamic Canopy threshold column in data. If it is not, new column will be created
66
+ if "DynCanTh" not in line_seg.columns.array:
67
+ print("New column created: {}".format("DynCanTh"))
68
+ line_seg["DynCanTh"] = np.nan
69
+
70
+ # Check the OLnFID column in data. If it is not, column will be created
71
+ if "OLnFID" not in line_seg.columns.array:
72
+ print("New column created: {}".format("OLnFID"))
73
+ line_seg["OLnFID"] = line_seg.index
74
+
75
+ # Check the OLnSEG column in data. If it is not, column will be created
76
+ if "OLnSEG" not in line_seg.columns.array:
77
+ print("New column created: {}".format("OLnSEG"))
78
+ line_seg["OLnSEG"] = 0
79
+
80
+ # Check input line for multipart
81
+ line_seg = chk_df_multipart(line_seg, "LineString")[0]
82
+
83
+ #Not splitting lines
84
+ proc_segments = False
85
+ if proc_segments:
86
+ line_seg = split_into_segments(line_seg)
87
+ else:
88
+ pass
89
+
90
+ # copy original line input to another GeoDataframe and simplify the line geometry for buffering
91
+ workln_df_c = gpd.GeoDataFrame.copy(line_seg, deep=True)
92
+ workln_df_c.geometry = workln_df_c.geometry.simplify(tolerance=0.5, preserve_topology=True)
93
+
94
+ print("{}%".format(5))
95
+ # copy simplified line input for ring buffer for two sides buffering
96
+ worklnbuffer_df_l_ring = gpd.GeoDataFrame.copy((workln_df_c),deep=True)
97
+ worklnbuffer_df_r_ring = gpd.GeoDataFrame.copy((workln_df_c),deep=True)
98
+
99
+ print("Create ring buffer for input line to find the forest edge....")
100
+
101
+ def multiringbuffer(df: gpd.GeoDataFrame,
102
+ nrings:int,
103
+ ringdist:int)->list:
104
+ """
105
+ Buffers an input dataframes geometry nring (number of rings) times, with a distance between
106
+ rings of ringdist and returns a list of non overlapping buffers
107
+ """
108
+
109
+ rings = [] # A list to hold the individual buffers
110
+ for ring in np.arange(0, ringdist, nrings): # For each ring (1, 2, 3, ..., nrings)
111
+ big_ring = df["geometry"].buffer(
112
+ nrings + ring, single_sided=True, cap_style="flat"
113
+ ) # Create one big buffer
114
+ small_ring = df["geometry"].buffer(
115
+ ring, single_sided=True, cap_style="flat"
116
+ ) # Create one smaller one
117
+ the_ring = big_ring.difference(small_ring) # Difference the big with the small to create a ring
118
+ if (
119
+ ~shapely.is_empty(the_ring)
120
+ or ~shapely.is_missing(the_ring)
121
+ or not None
122
+ or ~the_ring.area == 0
123
+ ):
124
+ if isinstance(the_ring, shapely.MultiPolygon) or isinstance(the_ring, shapely.Polygon):
125
+ rings.append(the_ring) # Append the ring to the rings list
126
+ else:
127
+ if isinstance(the_ring, shapely.GeometryCollection):
128
+ for i in range(0, len(the_ring.geoms)):
129
+ if not isinstance(the_ring.geoms[i], shapely.LineString):
130
+ rings.append(the_ring.geoms[i])
131
+ print(" {}% ".format((ring / ringdist) * 100))
132
+
133
+ return rings # return the list
134
+
135
+ # Create a column with the rings as a list
136
+ print("Create rings buffer to forest edge on one side....")
137
+ worklnbuffer_df_l_ring["mgeometry"] = worklnbuffer_df_l_ring.apply(
138
+ lambda x: multiringbuffer(df=x, nrings=1, ringdist=15), axis=1
139
+ )
140
+
141
+ # Explode to create a row for each ring
142
+ worklnbuffer_df_l_ring = worklnbuffer_df_l_ring.explode("mgeometry")
143
+ worklnbuffer_df_l_ring = worklnbuffer_df_l_ring.set_geometry("mgeometry")
144
+ worklnbuffer_df_l_ring = (
145
+ worklnbuffer_df_l_ring.drop(columns=["geometry"]).rename_geometry("geometry").set_crs(workln_df_c.crs)
146
+ )
147
+ worklnbuffer_df_l_ring["iRing"] = worklnbuffer_df_l_ring.groupby(["OLnFID", "OLnSEG"]).cumcount()
148
+ worklnbuffer_df_l_ring = worklnbuffer_df_l_ring.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
149
+ worklnbuffer_df_l_ring = worklnbuffer_df_l_ring.reset_index(drop=True)
150
+
151
+ print("Create rings buffer to forest edge on the other side....")
152
+ worklnbuffer_df_r_ring["mgeometry"] = worklnbuffer_df_r_ring.apply(
153
+ lambda x: multiringbuffer(df=x, nrings=-1, ringdist=-15), axis=1
154
+ )
155
+
156
+ worklnbuffer_df_r_ring = worklnbuffer_df_r_ring.explode("mgeometry") # Explode to create a row for each ring
157
+ worklnbuffer_df_r_ring = worklnbuffer_df_r_ring.set_geometry("mgeometry")
158
+ worklnbuffer_df_r_ring = (
159
+ worklnbuffer_df_r_ring.drop(columns=["geometry"]).rename_geometry("geometry").set_crs(workln_df_c.crs)
160
+ )
161
+ worklnbuffer_df_r_ring["iRing"] = worklnbuffer_df_r_ring.groupby(["OLnFID", "OLnSEG"]).cumcount()
162
+ worklnbuffer_df_r_ring = worklnbuffer_df_r_ring.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
163
+ worklnbuffer_df_r_ring = worklnbuffer_df_r_ring.reset_index(drop=True)
164
+
165
+ print("Create rings buffer.... done.")
166
+ print("{}%".format(20))
167
+
168
+ worklnbuffer_df_r_ring["Percentile_RRing"] = np.nan
169
+ worklnbuffer_df_l_ring["Percentile_LRing"] = np.nan
170
+ line_seg["CL_CutHt"] = np.nan
171
+ line_seg["CR_CutHt"] = np.nan
172
+ line_seg["RDist_Cut"] = np.nan
173
+ line_seg["LDist_Cut"] = np.nan
174
+ print("{}%".format(30))
175
+
176
+ # calculate the Height percentile for each parallel area using CHM
177
+ worklnbuffer_df_l_ring = multiprocessing_Percentile(
178
+ worklnbuffer_df_l_ring,
179
+ int(canopy_percentile),
180
+ int(canopy_thresh_percentage),
181
+ in_chm,
182
+ processes,
183
+ side="LRing",
184
+ )
185
+
186
+ worklnbuffer_df_l_ring = worklnbuffer_df_l_ring.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
187
+ worklnbuffer_df_l_ring = worklnbuffer_df_l_ring.reset_index(drop=True)
188
+ print("{}%".format(60))
189
+ worklnbuffer_df_r_ring = multiprocessing_Percentile(
190
+ worklnbuffer_df_r_ring,
191
+ int(canopy_percentile),
192
+ float(canopy_thresh_percentage),
193
+ in_chm,
194
+ processes,
195
+ side="RRing",
196
+ )
197
+
198
+ worklnbuffer_df_r_ring = worklnbuffer_df_r_ring.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
199
+ worklnbuffer_df_r_ring = worklnbuffer_df_r_ring.reset_index(drop=True)
200
+
201
+ result = multiprocessing_RofC(line_seg, worklnbuffer_df_l_ring, worklnbuffer_df_r_ring, processes)
202
+ print("{}%".format(80))
203
+ print("Calculating forest population done.")
204
+
205
+ print("Saving percentile information to input line ...")
206
+ gpd.GeoDataFrame.to_file(result, out_cl_file, layer=out_layer)
207
+ print("Saving percentile information to input line ...done.")
208
+
209
+ if full_step:
210
+ return out_file # TODO: make sure this is correct
211
+
212
+ print("{}%".format(100))
213
+
214
+
215
+ def rate_of_change(in_arg): # ,max_chmht):
216
+ x = in_arg[0]
217
+ Olnfid = in_arg[1]
218
+ Olnseg = in_arg[2]
219
+ side = in_arg[3]
220
+ df = in_arg[4]
221
+ index = in_arg[5]
222
+
223
+ # Since the x interval is 1 unit, the array 'diff' is the rate of change (slope)
224
+ diff = np.ediff1d(x)
225
+ cut_dist = len(x) / 5
226
+
227
+ median_percentile = np.nanmedian(x)
228
+ if not np.isnan(median_percentile):
229
+ cut_percentile = math.floor(median_percentile)
230
+ else:
231
+ cut_percentile = 0.5
232
+ found = False
233
+ changes = 1.50
234
+ Change = np.insert(diff, 0, 0)
235
+ scale_down = 1
236
+
237
+ # test the rate of change is > than 150% (1.5), if it is
238
+ # no result found then lower to 140% (1.4) until 110% (1.1)
239
+ try:
240
+ while not found and changes >= 1.1:
241
+ for ii in range(0, len(Change) - 1):
242
+ if x[ii] >= 0.5:
243
+ if (Change[ii]) >= changes:
244
+ cut_dist = (ii + 1) * scale_down
245
+ cut_percentile = math.floor(x[ii])
246
+ # median_diff=(cut_percentile-median_percentile)
247
+ if 0.5 >= cut_percentile:
248
+ if cut_dist > 5:
249
+ cut_percentile = 2
250
+ cut_dist = cut_dist * scale_down**3
251
+ print(
252
+ "{}: OLnFID:{}, OLnSEG: {} @<0.5 found and modified".format(
253
+ side, Olnfid, Olnseg
254
+ ),
255
+ flush=True,
256
+ )
257
+ elif 0.5 < cut_percentile <= 5.0:
258
+ if cut_dist > 6:
259
+ cut_dist = cut_dist * scale_down**3 # 4.0
260
+ print(
261
+ "{}: OLnFID:{}, OLnSEG: {} @0.5-5.0 found and modified".format(
262
+ side, Olnfid, Olnseg
263
+ ),
264
+ flush=True,
265
+ )
266
+ elif 5.0 < cut_percentile <= 10.0:
267
+ if cut_dist > 8: # 5
268
+ cut_dist = cut_dist * scale_down**3
269
+ print(
270
+ "{}: OLnFID:{}, OLnSEG: {} @5-10 found and modified".format(
271
+ side, Olnfid, Olnseg
272
+ ),
273
+ flush=True,
274
+ )
275
+ elif 10.0 < cut_percentile <= 15:
276
+ if cut_dist > 5:
277
+ cut_dist = cut_dist * scale_down**3 # 5.5
278
+ print(
279
+ "{}: OLnFID:{}, OLnSEG: {} @10-15 found and modified".format(
280
+ side, Olnfid, Olnseg
281
+ ),
282
+ flush=True,
283
+ )
284
+ elif 15 < cut_percentile:
285
+ if cut_dist > 4:
286
+ cut_dist = cut_dist * scale_down**2
287
+ cut_percentile = 15.5
288
+ print(
289
+ "{}: OLnFID:{}, OLnSEG: {} @>15 found and modified".format(
290
+ side, Olnfid, Olnseg
291
+ ),
292
+ flush=True,
293
+ )
294
+ found = True
295
+ print(
296
+ "{}: OLnFID:{}, OLnSEG: {} rate of change found".format(side, Olnfid, Olnseg),
297
+ flush=True,
298
+ )
299
+ break
300
+ changes = changes - 0.1
301
+
302
+ except IndexError:
303
+ pass
304
+
305
+ # if still is no result found, lower to 10% (1.1), if no result found then default is used
306
+ if not found:
307
+ if 0.5 >= median_percentile:
308
+ cut_dist = 4 * scale_down # 3
309
+ cut_percentile = 0.5
310
+ elif 0.5 < median_percentile <= 5.0:
311
+ cut_dist = 4.5 * scale_down # 4.0
312
+ cut_percentile = math.floor(median_percentile)
313
+ elif 5.0 < median_percentile <= 10.0:
314
+ cut_dist = 5.5 * scale_down # 5
315
+ cut_percentile = math.floor(median_percentile)
316
+ elif 10.0 < median_percentile <= 15:
317
+ cut_dist = 6 * scale_down # 5.5
318
+ cut_percentile = math.floor(median_percentile)
319
+ elif 15 < median_percentile:
320
+ cut_dist = 5 * scale_down # 5
321
+ cut_percentile = 15.5
322
+ print(
323
+ "{}: OLnFID:{}, OLnSEG: {} Estimated".format(side, Olnfid, Olnseg),
324
+ flush=True,
325
+ )
326
+ if side == "Right":
327
+ df["RDist_Cut"] = cut_dist
328
+ df["CR_CutHt"] = cut_percentile
329
+ elif side == "Left":
330
+ df["LDist_Cut"] = cut_dist
331
+ df["CL_CutHt"] = cut_percentile
332
+
333
+ return df
334
+
335
+
336
+ def multiprocessing_RofC(line_seg, worklnbuffer_dfLRing, worklnbuffer_dfRRing, processes):
337
+ in_argsL = []
338
+ in_argsR = []
339
+
340
+ for index in line_seg.index:
341
+ resultsL = []
342
+ resultsR = []
343
+ Olnfid = int(line_seg.OLnFID.iloc[index])
344
+ Olnseg = int(line_seg.OLnSEG.iloc[index])
345
+ sql_dfL = worklnbuffer_dfLRing.loc[
346
+ (worklnbuffer_dfLRing["OLnFID"] == Olnfid) & (worklnbuffer_dfLRing["OLnSEG"] == Olnseg)
347
+ ].sort_values(by=["iRing"])
348
+ PLRing = list(sql_dfL["Percentile_LRing"])
349
+ sql_dfR = worklnbuffer_dfRRing.loc[
350
+ (worklnbuffer_dfRRing["OLnFID"] == Olnfid) & (worklnbuffer_dfRRing["OLnSEG"] == Olnseg)
351
+ ].sort_values(by=["iRing"])
352
+ PRRing = list(sql_dfR["Percentile_RRing"])
353
+ in_argsL.append([PLRing, Olnfid, Olnseg, "Left", line_seg.loc[index], index])
354
+ in_argsR.append([PRRing, Olnfid, Olnseg, "Right", line_seg.loc[index], index])
355
+ print(' "PROGRESS_LABEL Preparing grouped buffer areas...." ', flush=True)
356
+ print(" {}% ".format(((index + 1) / len(line_seg)) * 100))
357
+
358
+ total_steps = len(in_argsL) + len(in_argsR)
359
+ featuresL = []
360
+ featuresR = []
361
+
362
+ if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
363
+ with Pool(processes=int(processes)) as pool:
364
+ step = 0
365
+ # execute tasks in order, process results out of order
366
+ try:
367
+ for resultL in pool.imap_unordered(rate_of_change, in_argsL):
368
+ if BT_DEBUGGING:
369
+ print("Got result: {}".format(resultL), flush=True)
370
+ featuresL.append(resultL)
371
+ step += 1
372
+ print(
373
+ ' "PROGRESS_LABEL Calculate Rate of Change In Buffer Area {} of {}" '.format(
374
+ step, total_steps
375
+ ),
376
+ flush=True,
377
+ )
378
+ print("{}%".format(step / total_steps * 100), flush=True)
379
+ except Exception:
380
+ print(Exception)
381
+ raise
382
+
383
+ gpdL = gpd.GeoDataFrame(pd.concat(featuresL, axis=1).T)
384
+ with Pool(processes=int(processes)) as pool:
385
+ step=0
386
+ try:
387
+ for resultR in pool.imap_unordered(rate_of_change, in_argsR):
388
+ if BT_DEBUGGING:
389
+ print("Got result: {}".format(resultR), flush=True)
390
+ featuresR.append(resultR)
391
+ step += 1
392
+ print(
393
+ ' "PROGRESS_LABEL Calculate Rate of Change Area {} of {}" '.format(
394
+ step + len(in_argsL), total_steps
395
+ ),
396
+ flush=True,
397
+ )
398
+ print(
399
+ "{}%".format((step + len(in_argsL)) / total_steps * 100),
400
+ flush=True,
401
+ )
402
+ except Exception:
403
+ print(Exception)
404
+ raise
405
+ gpdR = gpd.GeoDataFrame(pd.concat(featuresR, axis=1).T)
406
+ else:
407
+ for rowL in in_argsL:
408
+ featuresL.append(rate_of_change(rowL))
409
+
410
+ for rowR in in_argsR:
411
+ featuresR.append(rate_of_change(rowR))
412
+
413
+ gpdL = gpd.GeoDataFrame(pd.concat(featuresL, axis=1).T)
414
+ gpdR = gpd.GeoDataFrame(pd.concat(featuresR, axis=1).T)
415
+
416
+ line_seg = line_seg.set_index(['OLnFID', 'OLnSEG'], drop=True)
417
+ gpdL['geometry'] = gpdL['geometry'].normalize()
418
+ gpdL['wkt'] = gpdL['geometry'].apply(lambda x: x.wkt)
419
+ deduplicated_gpdL = gpdL.drop_duplicates(subset=['wkt'], keep='first')
420
+ deduplicated_gpdL = deduplicated_gpdL.drop(columns=['wkt']).reset_index(drop=True)
421
+ gpdR['geometry'] = gpdR['geometry'].normalize()
422
+ gpdR['wkt'] = gpdR['geometry'].apply(lambda x: x.wkt)
423
+ deduplicated_gpdR = gpdR.drop_duplicates(subset=['wkt'], keep='first')
424
+ deduplicated_gpdR = deduplicated_gpdR.drop(columns=['wkt']).reset_index(drop=True)
425
+
426
+ mapping_left_ldist_cut = deduplicated_gpdL.set_index(['OLnFID', 'OLnSEG'])['LDist_Cut']
427
+ mapping_left_rdist_cut = deduplicated_gpdR.set_index(['OLnFID', 'OLnSEG'])['RDist_Cut']
428
+ mapping_left_lcl_cut = deduplicated_gpdL.set_index(['OLnFID', 'OLnSEG'])['CL_CutHt']
429
+ mapping_left_rcl_cut = deduplicated_gpdR.set_index(['OLnFID', 'OLnSEG'])['CR_CutHt']
430
+
431
+ condition_ldist_cut = line_seg.index.isin(mapping_left_ldist_cut.index)
432
+ condition_rdist_cut = line_seg.index.isin(mapping_left_rdist_cut.index)
433
+ condition_lcl_cut = line_seg.index.isin(mapping_left_lcl_cut.index)
434
+ condition_rcl_cut = line_seg.index.isin(mapping_left_rcl_cut.index)
435
+
436
+ line_seg.loc[condition_ldist_cut, "LDist_Cut"] = line_seg.index.map(mapping_left_ldist_cut)
437
+ line_seg.loc[condition_rdist_cut, "RDist_Cut"] = line_seg.index.map(mapping_left_rdist_cut)
438
+ line_seg.loc[condition_lcl_cut, "CL_CutHt"] = line_seg.index.map(mapping_left_lcl_cut)
439
+ line_seg.loc[condition_rcl_cut, "CR_CutHt"] = line_seg.index.map(mapping_left_rcl_cut)
440
+ line_seg["DynCanTh"] = (line_seg["CL_CutHt"] + line_seg["CR_CutHt"]) / 2
441
+ line_seg=line_seg.reset_index(drop=False)
442
+
443
+
444
+ return line_seg
445
+
446
+
447
+ def split_line_fc(line):
448
+ if line:
449
+ return list(map(shapely.LineString, zip(line.coords[:-1], line.coords[1:])))
450
+ else:
451
+ return None
452
+
453
+
454
+ def split_into_segments(df):
455
+ odf = df
456
+ crs = odf.crs
457
+ if "OLnSEG" not in odf.columns.array:
458
+ df["OLnSEG"] = np.nan
459
+ else:
460
+ pass
461
+ df = odf.assign(geometry=odf.apply(lambda x: split_line_fc(x.geometry), axis=1))
462
+ df = df.explode()
463
+
464
+ df["OLnSEG"] = df.groupby("OLnFID").cumcount()
465
+ gdf = gpd.GeoDataFrame(df, geometry=df.geometry, crs=crs)
466
+ gdf = gdf.sort_values(by=["OLnFID", "OLnSEG"])
467
+ gdf = gdf.reset_index(drop=True)
468
+ return gdf
469
+
470
+
471
+ def multiprocessing_copyparallel_lineLRC(dfL, dfR, dfc, processes, left_dis, right_dist, center_dist):
472
+ try:
473
+ line_arg = []
474
+ total_steps = len(dfL)
475
+
476
+ for item in dfL.index:
477
+ item_list = [dfL, dfR, dfc, left_dis, right_dist, center_dist, item]
478
+ line_arg.append(item_list)
479
+
480
+ featuresL = []
481
+ featuresR = []
482
+ result = None
483
+ step = 0
484
+
485
+ if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
486
+ with Pool(processes=int(processes)) as pool:
487
+ # execute tasks in order, process results out of order
488
+ for result in pool.imap_unordered(copyparallel_lineLRC, line_arg):
489
+ if BT_DEBUGGING:
490
+ print(f"Got result: {result}", flush=True)
491
+ if result:
492
+ featuresL.append(result[0]) # resultL
493
+ featuresR.append(result[1]) # resultR
494
+ step += 1
495
+ print(f" {step / total_steps * 100}% ")
496
+
497
+ return gpd.GeoDataFrame(pd.concat(featuresL)), gpd.GeoDataFrame(
498
+ pd.concat(featuresR)
499
+ ) # , gpd.GeoDataFrame(pd.concat(featuresC))
500
+ elif PARALLEL_MODE == ParallelMode.SEQUENTIAL:
501
+ for line in line_arg:
502
+ result = copyparallel_lineLRC(line)
503
+ if BT_DEBUGGING:
504
+ print(f"Got result: {result}", flush=True)
505
+ if result:
506
+ featuresL.append(result[0]) # resultL
507
+ featuresR.append(result[1]) # resultR
508
+ step += 1
509
+ print(f" {step / total_steps * 100}% ")
510
+
511
+ return gpd.GeoDataFrame(pd.concat(featuresL)), gpd.GeoDataFrame(
512
+ pd.concat(featuresR)
513
+ ) # , gpd.GeoDataFrame(pd.concat(featuresC))
514
+
515
+ except OperationCancelledException:
516
+ print("Operation cancelled")
517
+
518
+
519
+ def multiprocessing_Percentile(df:gpd.GeoDataFrame,
520
+ CanPercentile:int,
521
+ CanThrPercentage:int,
522
+ in_CHM: str,
523
+ processes:int,
524
+ side:int)->gpd.GeoDataFrame | None:
525
+ try:
526
+ line_arg = []
527
+ total_steps = len(df)
528
+ cal_percentile = cal_percentileRing
529
+ which_side = side
530
+ if side == "LRing":
531
+ PerCol = "Percentile_LRing"
532
+ which_side = "left"
533
+ elif side == "RRing":
534
+ PerCol = "Percentile_RRing"
535
+ which_side = "right"
536
+
537
+ print("Calculating surrounding ({}) forest population for buffer area ...".format(which_side))
538
+
539
+ for item in df.index:
540
+ item_list = [
541
+ df.iloc[[item]],
542
+ CanPercentile,
543
+ CanThrPercentage,
544
+ in_CHM,
545
+ item,
546
+ PerCol,
547
+ ]
548
+ line_arg.append(item_list)
549
+ print(
550
+ ' "PROGRESS_LABEL Preparing lines... {} of {}" '.format(item + 1, len(df)),
551
+ flush=True,
552
+ )
553
+ print(" {}% ".format(item / len(df) * 100), flush=True)
554
+
555
+ features = []
556
+
557
+ if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
558
+ with Pool(processes=int(processes)) as pool:
559
+ step = 0
560
+ # execute tasks in order, process results out of order
561
+ try:
562
+ for result in pool.imap_unordered(cal_percentile, line_arg):
563
+ if BT_DEBUGGING:
564
+ print("Got result: {}".format(result), flush=True)
565
+ features.append(result)
566
+ step += 1
567
+ print(
568
+ ' "PROGRESS_LABEL Calculate Percentile In Buffer Area {} of {}" '.format(
569
+ step, total_steps
570
+ ),
571
+ flush=True,
572
+ )
573
+ print("{}%".format(step / total_steps * 100), flush=True)
574
+ except Exception:
575
+ print(Exception)
576
+ raise
577
+ del line_arg
578
+
579
+ return gpd.GeoDataFrame(pd.concat(features))
580
+ else:
581
+ verbose = False
582
+ total_steps = len(line_arg)
583
+ step = 0
584
+ for row in line_arg:
585
+ features.append(cal_percentile(row))
586
+ step += 1
587
+ if verbose:
588
+ print(
589
+ ' "PROGRESS_LABEL Calculate Percentile on line {} of {}" '.format(step, total_steps),
590
+ flush=True,
591
+ )
592
+ print(" {}% ".format(step / total_steps * 100), flush=True)
593
+ return gpd.GeoDataFrame(pd.concat(features))
594
+
595
+ except OperationCancelledException:
596
+ print("Operation cancelled")
597
+ return None
598
+
599
+
600
+ def cal_percentileLR(line_arg):
601
+ from shapely import ops
602
+
603
+ try:
604
+ df = line_arg[0]
605
+ CanPercentile = line_arg[1]
606
+ CanThrPercentage = line_arg[2]
607
+ in_CHM = line_arg[3]
608
+ row_index = line_arg[4]
609
+ PerCol = line_arg[5]
610
+ line_buffer = df.loc[row_index, "geometry"]
611
+
612
+ if line_buffer.is_empty or shapely.is_missing(line_buffer):
613
+ return None
614
+ if line_buffer.has_z:
615
+ line_buffer = ops.transform(lambda x, y, z=None: (x, y), line_buffer)
616
+ except Exception as e:
617
+ print(e)
618
+ print("Assigning variable on index:{} Error: ".format(line_arg) + sys.exc_info())
619
+ exit()
620
+
621
+ # TODO: temporary workaround for exception causing not percentile defined
622
+ percentile = 0
623
+ Dyn_Canopy_Threshold = 0.05
624
+ try:
625
+ with rasterio.open(in_CHM) as raster:
626
+ clipped_raster, out_transform = rasterio.mask.mask(
627
+ raster, [line_buffer], crop=True, nodata=BT_NODATA, filled=True
628
+ )
629
+ clipped_raster = np.squeeze(clipped_raster, axis=0)
630
+
631
+ # mask all -9999 (nodata) value cells
632
+ masked_raster = np.ma.masked_where(clipped_raster == BT_NODATA, clipped_raster)
633
+ filled_raster = np.ma.filled(masked_raster, np.nan)
634
+
635
+ # Calculate the percentile
636
+ # masked_mean = np.ma.mean(masked_raster)
637
+ percentile = np.nanpercentile(filled_raster, CanPercentile) # ,method='hazen')
638
+ median = np.nanmedian(filled_raster)
639
+ if percentile > 0.05: # (percentile+median)>0.0:
640
+ Dyn_Canopy_Threshold = percentile * (CanThrPercentage / 100.0)
641
+ else:
642
+ # print("(percentile)<0.05 @ {}".format(row_index))
643
+ Dyn_Canopy_Threshold = 0.05
644
+
645
+ del clipped_raster, out_transform
646
+ del raster
647
+ # return the generated value
648
+ except Exception as e:
649
+ print(e)
650
+ # print(sys.exc_info())
651
+ percentile = 0
652
+ Dyn_Canopy_Threshold = 0
653
+
654
+ try:
655
+ df.loc[row_index, PerCol] = percentile
656
+ df.loc[row_index, "DynCanTh"] = Dyn_Canopy_Threshold
657
+ return df
658
+ except Exception as e:
659
+ print("Error writing Percentile and Dynamic Canopy into table: " + sys.exc_info())
660
+
661
+
662
+ def cal_percentileRing(line_arg):
663
+ from shapely import ops
664
+
665
+ try:
666
+ df = line_arg[0]
667
+ CanPercentile = line_arg[1]
668
+ CanThrPercentage = line_arg[2]
669
+ in_CHM = line_arg[3]
670
+ row_index = line_arg[4]
671
+ PerCol = line_arg[5]
672
+
673
+ line_buffer = df.loc[row_index, "geometry"]
674
+ if line_buffer.is_empty or shapely.is_missing(line_buffer):
675
+ return None
676
+ if line_buffer.has_z:
677
+ line_buffer = ops.transform(lambda x, y, z=None: (x, y), line_buffer)
678
+
679
+ except Exception as e:
680
+ print(e)
681
+ print("Assigning variable on index:{} Error: ".format(line_arg) + sys.exc_info())
682
+ exit()
683
+
684
+ # TODO: temporary workaround for exception causing not percentile defined
685
+ if isinstance(CanPercentile,int):
686
+ if 100>CanPercentile>0:
687
+ pass
688
+ else:
689
+ CanPercentile = 50
690
+ else:
691
+ CanPercentile =50
692
+ Dyn_Canopy_Threshold = 0.05
693
+ try:
694
+
695
+ clipped_raster, out_meta = clip_raster(in_CHM, line_buffer, 0)
696
+ clipped_raster = np.squeeze(clipped_raster, axis=0)
697
+
698
+ # mask all -9999 (nodata) value cells
699
+ masked_raster = np.ma.masked_where(clipped_raster == BT_NODATA, clipped_raster)
700
+ filled_raster = np.ma.filled(masked_raster, np.nan)
701
+
702
+ percentile = np.nanpercentile(filled_raster, CanPercentile)
703
+
704
+ if percentile > 1:
705
+ Dyn_Canopy_Threshold = percentile * (CanThrPercentage/100)
706
+ else:
707
+ Dyn_Canopy_Threshold = 1
708
+
709
+ del clipped_raster, out_meta
710
+
711
+ # return the generated value
712
+ except Exception as e:
713
+ print(e)
714
+ print("Default values are used.")
715
+
716
+ finally:
717
+ df.loc[row_index, PerCol] = percentile
718
+ df.loc[row_index, "DynCanTh"] = Dyn_Canopy_Threshold
719
+ return df
720
+
721
+
722
+ def copyparallel_lineLRC(line_arg):
723
+ dfL = line_arg[0]
724
+ dfR = line_arg[1]
725
+
726
+ # Simplify input center lines
727
+ geom = dfL.loc[line_arg[6], "geometry"]
728
+ if not geom:
729
+ return None
730
+
731
+ lineL = dfL.loc[line_arg[6], "geometry"].simplify(tolerance=0.05, preserve_topology=True)
732
+ lineR = dfR.loc[line_arg[6], "geometry"].simplify(tolerance=0.05, preserve_topology=True)
733
+ # lineC = dfC.loc[line_arg[6], 'geometry'].simplify(tolerance=0.05, preserve_topology=True)
734
+ offset_distL = float(line_arg[3])
735
+ offset_distR = float(line_arg[4])
736
+
737
+ # Older alternative method to the offset_curve() method,
738
+ # but uses resolution instead of quad_segs and a side keyword (‘left’ or ‘right’) instead
739
+ # of sign of the distance. This method is kept for backwards compatibility for now,
740
+ # but it is recommended to use offset_curve() instead.
741
+ # (ref: https://shapely.readthedocs.io/en/stable/manual.html#object.offset_curve)
742
+ parallel_lineL = lineL.parallel_offset(
743
+ distance=offset_distL, side="left", join_style=shapely.BufferJoinStyle.mitre
744
+ )
745
+
746
+ parallel_lineR = lineR.parallel_offset(
747
+ distance=-offset_distR, side="right", join_style=shapely.BufferJoinStyle.mitre
748
+ )
749
+
750
+ if not parallel_lineL.is_empty:
751
+ dfL.loc[line_arg[6], "geometry"] = parallel_lineL
752
+ if not parallel_lineR.is_empty:
753
+ dfR.loc[line_arg[6], "geometry"] = parallel_lineR
754
+
755
+ return dfL.iloc[[line_arg[6]]], dfR.iloc[[line_arg[6]]]