BERATools 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. beratools/__init__.py +3 -0
  2. beratools/core/__init__.py +0 -0
  3. beratools/core/algo_centerline.py +476 -0
  4. beratools/core/algo_common.py +489 -0
  5. beratools/core/algo_cost.py +185 -0
  6. beratools/core/algo_dijkstra.py +492 -0
  7. beratools/core/algo_footprint_rel.py +693 -0
  8. beratools/core/algo_line_grouping.py +941 -0
  9. beratools/core/algo_merge_lines.py +255 -0
  10. beratools/core/algo_split_with_lines.py +296 -0
  11. beratools/core/algo_vertex_optimization.py +451 -0
  12. beratools/core/constants.py +56 -0
  13. beratools/core/logger.py +92 -0
  14. beratools/core/tool_base.py +126 -0
  15. beratools/gui/__init__.py +11 -0
  16. beratools/gui/assets/BERALogo.png +0 -0
  17. beratools/gui/assets/beratools.json +471 -0
  18. beratools/gui/assets/closed.gif +0 -0
  19. beratools/gui/assets/closed.png +0 -0
  20. beratools/gui/assets/gui.json +8 -0
  21. beratools/gui/assets/open.gif +0 -0
  22. beratools/gui/assets/open.png +0 -0
  23. beratools/gui/assets/tool.gif +0 -0
  24. beratools/gui/assets/tool.png +0 -0
  25. beratools/gui/bt_data.py +485 -0
  26. beratools/gui/bt_gui_main.py +700 -0
  27. beratools/gui/main.py +27 -0
  28. beratools/gui/tool_widgets.py +730 -0
  29. beratools/tools/__init__.py +7 -0
  30. beratools/tools/canopy_threshold_relative.py +769 -0
  31. beratools/tools/centerline.py +127 -0
  32. beratools/tools/check_seed_line.py +48 -0
  33. beratools/tools/common.py +622 -0
  34. beratools/tools/line_footprint_absolute.py +203 -0
  35. beratools/tools/line_footprint_fixed.py +480 -0
  36. beratools/tools/line_footprint_functions.py +884 -0
  37. beratools/tools/line_footprint_relative.py +75 -0
  38. beratools/tools/tool_template.py +72 -0
  39. beratools/tools/vertex_optimization.py +57 -0
  40. beratools-0.1.0.dist-info/METADATA +134 -0
  41. beratools-0.1.0.dist-info/RECORD +44 -0
  42. beratools-0.1.0.dist-info/WHEEL +4 -0
  43. beratools-0.1.0.dist-info/entry_points.txt +2 -0
  44. beratools-0.1.0.dist-info/licenses/LICENSE +22 -0
@@ -0,0 +1,769 @@
1
+ import os.path
2
+ from multiprocessing.pool import Pool
3
+ import geopandas as gpd
4
+ import json
5
+ import argparse
6
+ import time
7
+ import pandas as pd
8
+ import numpy as np
9
+ import shapely
10
+ from beratools.core.constants import *
11
+ from beratools.tools.common import *
12
+ import sys
13
+ import math
14
+
15
+
16
+ class OperationCancelledException(Exception):
17
+ pass
18
+
19
+
20
+ def main_canopy_threshold_relative(
21
+ callback,
22
+ in_line,
23
+ in_chm,
24
+ off_ln_dist,
25
+ canopy_percentile,
26
+ canopy_thresh_percentage,
27
+ tree_radius,
28
+ max_line_dist,
29
+ canopy_avoidance,
30
+ exponent,
31
+ full_step,
32
+ processes,
33
+ verbose,
34
+ ):
35
+ file_path, in_file_name = os.path.split(Path(in_line))
36
+ out_file = os.path.join(Path(file_path), "DynCanTh_" + in_file_name)
37
+ line_seg = gpd.GeoDataFrame.from_file(in_line)
38
+
39
+ # check coordinate systems between line and raster features
40
+ # with rasterio.open(in_chm) as in_raster:
41
+ if compare_crs(vector_crs(in_line), raster_crs(in_chm)):
42
+ pass
43
+ else:
44
+ print("Line and raster spatial references are not same, please check.")
45
+ exit()
46
+
47
+ # Check the canopy threshold percent in 0-100 range. If it is not, 50% will be applied
48
+ if not 100 >= int(canopy_percentile) > 0:
49
+ canopy_percentile = 50
50
+
51
+ # Check the Dynamic Canopy threshold column in data. If it is not, new column will be created
52
+ if "DynCanTh" not in line_seg.columns.array:
53
+ if BT_DEBUGGING:
54
+ print("{} column not found in input line".format("DynCanTh"))
55
+ print("New column created: {}".format("DynCanTh"))
56
+ line_seg["DynCanTh"] = np.nan
57
+
58
+ # Check the OLnFID column in data. If it is not, column will be created
59
+ if "OLnFID" not in line_seg.columns.array:
60
+ if BT_DEBUGGING:
61
+ print("{} column not found in input line".format("OLnFID"))
62
+
63
+ print("New column created: {}".format("OLnFID"))
64
+ line_seg["OLnFID"] = line_seg.index
65
+
66
+ # Check the OLnSEG column in data. If it is not, column will be created
67
+ if "OLnSEG" not in line_seg.columns.array:
68
+ if BT_DEBUGGING:
69
+ print("{} column not found in input line".format("OLnSEG"))
70
+
71
+ print("New column created: {}".format("OLnSEG"))
72
+ line_seg["OLnSEG"] = 0
73
+
74
+ line_seg = chk_df_multipart(line_seg, "LineString")[0]
75
+
76
+ proc_segments = False
77
+ if proc_segments:
78
+ line_seg = split_into_segments(line_seg)
79
+ else:
80
+ pass
81
+
82
+ # copy original line input to another GeoDataframe
83
+ workln_dfC = gpd.GeoDataFrame.copy((line_seg),deep=True)
84
+ workln_dfC.geometry = workln_dfC.geometry.simplify(tolerance=0.5, preserve_topology=True)
85
+
86
+ print("%{}".format(5))
87
+
88
+ worklnbuffer_dfLRing = gpd.GeoDataFrame.copy((workln_dfC),deep=True)
89
+ worklnbuffer_dfRRing = gpd.GeoDataFrame.copy((workln_dfC),deep=True)
90
+
91
+ print("Create ring buffer for input line to find the forest edge....")
92
+
93
+ def multiringbuffer(df, nrings, ringdist):
94
+ """
95
+ Buffers an input dataframes geometry nring (number of rings) times, with a distance between
96
+ rings of ringdist and returns a list of non overlapping buffers
97
+ """
98
+
99
+ rings = [] # A list to hold the individual buffers
100
+ for ring in np.arange(0, ringdist, nrings): # For each ring (1, 2, 3, ..., nrings)
101
+ big_ring = df["geometry"].buffer(
102
+ nrings + ring, single_sided=True, cap_style="flat"
103
+ ) # Create one big buffer
104
+ small_ring = df["geometry"].buffer(
105
+ ring, single_sided=True, cap_style="flat"
106
+ ) # Create one smaller one
107
+ the_ring = big_ring.difference(small_ring) # Difference the big with the small to create a ring
108
+ if (
109
+ ~shapely.is_empty(the_ring)
110
+ or ~shapely.is_missing(the_ring)
111
+ or not None
112
+ or ~the_ring.area == 0
113
+ ):
114
+ if isinstance(the_ring, shapely.MultiPolygon) or isinstance(the_ring, shapely.Polygon):
115
+ rings.append(the_ring) # Append the ring to the rings list
116
+ else:
117
+ if isinstance(the_ring, shapely.GeometryCollection):
118
+ for i in range(0, len(the_ring.geoms)):
119
+ if not isinstance(the_ring.geoms[i], shapely.LineString):
120
+ rings.append(the_ring.geoms[i])
121
+ print(" %{} ".format((ring / ringdist) * 100))
122
+
123
+ return rings # return the list
124
+
125
+ # Create a column with the rings as a list
126
+
127
+ worklnbuffer_dfLRing["mgeometry"] = worklnbuffer_dfLRing.apply(
128
+ lambda x: multiringbuffer(df=x, nrings=1, ringdist=15), axis=1
129
+ )
130
+
131
+ worklnbuffer_dfLRing = worklnbuffer_dfLRing.explode("mgeometry") # Explode to create a row for each ring
132
+ worklnbuffer_dfLRing = worklnbuffer_dfLRing.set_geometry("mgeometry")
133
+ worklnbuffer_dfLRing = (
134
+ worklnbuffer_dfLRing.drop(columns=["geometry"]).rename_geometry("geometry").set_crs(workln_dfC.crs)
135
+ )
136
+ worklnbuffer_dfLRing["iRing"] = worklnbuffer_dfLRing.groupby(["OLnFID", "OLnSEG"]).cumcount()
137
+ worklnbuffer_dfLRing = worklnbuffer_dfLRing.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
138
+ worklnbuffer_dfLRing = worklnbuffer_dfLRing.reset_index(drop=True)
139
+
140
+ worklnbuffer_dfRRing["mgeometry"] = worklnbuffer_dfRRing.apply(
141
+ lambda x: multiringbuffer(df=x, nrings=-1, ringdist=-15), axis=1
142
+ )
143
+
144
+ worklnbuffer_dfRRing = worklnbuffer_dfRRing.explode("mgeometry") # Explode to create a row for each ring
145
+ worklnbuffer_dfRRing = worklnbuffer_dfRRing.set_geometry("mgeometry")
146
+ worklnbuffer_dfRRing = (
147
+ worklnbuffer_dfRRing.drop(columns=["geometry"]).rename_geometry("geometry").set_crs(workln_dfC.crs)
148
+ )
149
+ worklnbuffer_dfRRing["iRing"] = worklnbuffer_dfRRing.groupby(["OLnFID", "OLnSEG"]).cumcount()
150
+ worklnbuffer_dfRRing = worklnbuffer_dfRRing.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
151
+ worklnbuffer_dfRRing = worklnbuffer_dfRRing.reset_index(drop=True)
152
+
153
+ print("Task done.")
154
+ print("%{}".format(20))
155
+
156
+ worklnbuffer_dfRRing["Percentile_RRing"] = np.nan
157
+ worklnbuffer_dfLRing["Percentile_LRing"] = np.nan
158
+ line_seg["CL_CutHt"] = np.nan
159
+ line_seg["CR_CutHt"] = np.nan
160
+ line_seg["RDist_Cut"] = np.nan
161
+ line_seg["LDist_Cut"] = np.nan
162
+ print("%{}".format(80))
163
+
164
+ # calculate the Height percentile for each parallel area using CHM
165
+ worklnbuffer_dfLRing = multiprocessing_Percentile(
166
+ worklnbuffer_dfLRing,
167
+ int(canopy_percentile),
168
+ float(canopy_thresh_percentage),
169
+ in_chm,
170
+ processes,
171
+ side="LRing",
172
+ )
173
+
174
+ worklnbuffer_dfLRing = worklnbuffer_dfLRing.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
175
+ worklnbuffer_dfLRing = worklnbuffer_dfLRing.reset_index(drop=True)
176
+
177
+ worklnbuffer_dfRRing = multiprocessing_Percentile(
178
+ worklnbuffer_dfRRing,
179
+ int(canopy_percentile),
180
+ float(canopy_thresh_percentage),
181
+ in_chm,
182
+ processes,
183
+ side="RRing",
184
+ )
185
+
186
+ worklnbuffer_dfRRing = worklnbuffer_dfRRing.sort_values(by=["OLnFID", "OLnSEG", "iRing"])
187
+ worklnbuffer_dfRRing = worklnbuffer_dfRRing.reset_index(drop=True)
188
+
189
+ result = multiprocessing_RofC(line_seg, worklnbuffer_dfLRing, worklnbuffer_dfRRing, processes)
190
+ print("%{}".format(40))
191
+ print("Task done.")
192
+
193
+ print("Saving percentile information to input line ...")
194
+ gpd.GeoDataFrame.to_file(result, out_file)
195
+ print("Saving percentile information to input line ...done.")
196
+
197
+ if full_step:
198
+ return out_file
199
+
200
+ print("%{}".format(100))
201
+
202
+
203
+ def rate_of_change(in_arg): # ,max_chmht):
204
+ x = in_arg[0]
205
+ Olnfid = in_arg[1]
206
+ Olnseg = in_arg[2]
207
+ side = in_arg[3]
208
+ df = in_arg[4]
209
+ index = in_arg[5]
210
+
211
+ # Since the x interval is 1 unit, the array 'diff' is the rate of change (slope)
212
+ diff = np.ediff1d(x)
213
+ cut_dist = len(x) / 5
214
+
215
+ median_percentile = np.nanmedian(x)
216
+ if not np.isnan(median_percentile):
217
+ cut_percentile = math.floor(median_percentile)
218
+ else:
219
+ cut_percentile = 0.5
220
+ found = False
221
+ changes = 1.50
222
+ Change = np.insert(diff, 0, 0)
223
+ scale_down = 1
224
+
225
+ # test the rate of change is > than 150% (1.5), if it is
226
+ # no result found then lower to 140% (1.4) until 110% (1.1)
227
+ try:
228
+ while not found and changes >= 1.1:
229
+ for ii in range(0, len(Change) - 1):
230
+ if x[ii] >= 0.5:
231
+ if (Change[ii]) >= changes:
232
+ cut_dist = (ii + 1) * scale_down
233
+ cut_percentile = math.floor(x[ii])
234
+ # median_diff=(cut_percentile-median_percentile)
235
+ if 0.5 >= cut_percentile:
236
+ if cut_dist > 5:
237
+ cut_percentile = 2
238
+ cut_dist = cut_dist * scale_down**3
239
+ print(
240
+ "{}: OLnFID:{}, OLnSEG: {} @<0.5 found and modified".format(
241
+ side, Olnfid, Olnseg
242
+ ),
243
+ flush=True,
244
+ )
245
+ elif 0.5 < cut_percentile <= 5.0:
246
+ if cut_dist > 6:
247
+ cut_dist = cut_dist * scale_down**3 # 4.0
248
+ print(
249
+ "{}: OLnFID:{}, OLnSEG: {} @0.5-5.0 found and modified".format(
250
+ side, Olnfid, Olnseg
251
+ ),
252
+ flush=True,
253
+ )
254
+ elif 5.0 < cut_percentile <= 10.0:
255
+ if cut_dist > 8: # 5
256
+ cut_dist = cut_dist * scale_down**3
257
+ print(
258
+ "{}: OLnFID:{}, OLnSEG: {} @5-10 found and modified".format(
259
+ side, Olnfid, Olnseg
260
+ ),
261
+ flush=True,
262
+ )
263
+ elif 10.0 < cut_percentile <= 15:
264
+ if cut_dist > 5:
265
+ cut_dist = cut_dist * scale_down**3 # 5.5
266
+ print(
267
+ "{}: OLnFID:{}, OLnSEG: {} @10-15 found and modified".format(
268
+ side, Olnfid, Olnseg
269
+ ),
270
+ flush=True,
271
+ )
272
+ elif 15 < cut_percentile:
273
+ if cut_dist > 4:
274
+ cut_dist = cut_dist * scale_down**2
275
+ cut_percentile = 15.5
276
+ print(
277
+ "{}: OLnFID:{}, OLnSEG: {} @>15 found and modified".format(
278
+ side, Olnfid, Olnseg
279
+ ),
280
+ flush=True,
281
+ )
282
+ found = True
283
+ print(
284
+ "{}: OLnFID:{}, OLnSEG: {} rate of change found".format(side, Olnfid, Olnseg),
285
+ flush=True,
286
+ )
287
+ break
288
+ changes = changes - 0.1
289
+
290
+ except IndexError:
291
+ pass
292
+
293
+ # if still is no result found, lower to 10% (1.1), if no result found then default is used
294
+ if not found:
295
+ if 0.5 >= median_percentile:
296
+ cut_dist = 4 * scale_down # 3
297
+ cut_percentile = 0.5
298
+ elif 0.5 < median_percentile <= 5.0:
299
+ cut_dist = 4.5 * scale_down # 4.0
300
+ cut_percentile = math.floor(median_percentile)
301
+ elif 5.0 < median_percentile <= 10.0:
302
+ cut_dist = 5.5 * scale_down # 5
303
+ cut_percentile = math.floor(median_percentile)
304
+ elif 10.0 < median_percentile <= 15:
305
+ cut_dist = 6 * scale_down # 5.5
306
+ cut_percentile = math.floor(median_percentile)
307
+ elif 15 < median_percentile:
308
+ cut_dist = 5 * scale_down # 5
309
+ cut_percentile = 15.5
310
+ print(
311
+ "{}: OLnFID:{}, OLnSEG: {} Estimated".format(side, Olnfid, Olnseg),
312
+ flush=True,
313
+ )
314
+ if side == "Right":
315
+ df["RDist_Cut"] = cut_dist
316
+ df["CR_CutHt"] = cut_percentile
317
+ elif side == "Left":
318
+ df["LDist_Cut"] = cut_dist
319
+ df["CL_CutHt"] = cut_percentile
320
+
321
+ return df
322
+
323
+
324
+ def multiprocessing_RofC(line_seg, worklnbuffer_dfLRing, worklnbuffer_dfRRing, processes):
325
+ in_argsL = []
326
+ in_argsR = []
327
+
328
+ for index in line_seg.index:
329
+ resultsL = []
330
+ resultsR = []
331
+ Olnfid = int(line_seg.OLnFID.iloc[index])
332
+ Olnseg = int(line_seg.OLnSEG.iloc[index])
333
+ sql_dfL = worklnbuffer_dfLRing.loc[
334
+ (worklnbuffer_dfLRing["OLnFID"] == Olnfid) & (worklnbuffer_dfLRing["OLnSEG"] == Olnseg)
335
+ ].sort_values(by=["iRing"])
336
+ PLRing = list(sql_dfL["Percentile_LRing"])
337
+ sql_dfR = worklnbuffer_dfRRing.loc[
338
+ (worklnbuffer_dfRRing["OLnFID"] == Olnfid) & (worklnbuffer_dfRRing["OLnSEG"] == Olnseg)
339
+ ].sort_values(by=["iRing"])
340
+ PRRing = list(sql_dfR["Percentile_RRing"])
341
+ in_argsL.append([PLRing, Olnfid, Olnseg, "Left", line_seg.loc[index], index])
342
+ in_argsR.append([PRRing, Olnfid, Olnseg, "Right", line_seg.loc[index], index])
343
+ print(' "PROGRESS_LABEL Preparing grouped buffer areas...." ', flush=True)
344
+ print(" %{} ".format((index + 1 / len(line_seg)) * 100))
345
+
346
+ total_steps = len(in_argsL) + len(in_argsR)
347
+ featuresL = []
348
+ featuresR = []
349
+
350
+ if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
351
+ with Pool(processes=int(processes)) as pool:
352
+ step = 0
353
+ # execute tasks in order, process results out of order
354
+ try:
355
+ for resultL in pool.imap_unordered(rate_of_change, in_argsL):
356
+ if BT_DEBUGGING:
357
+ print("Got result: {}".format(resultL), flush=True)
358
+ featuresL.append(resultL)
359
+ step += 1
360
+ print(
361
+ ' "PROGRESS_LABEL Calculate Rate of Change In Buffer Area {} of {}" '.format(
362
+ step, total_steps
363
+ ),
364
+ flush=True,
365
+ )
366
+ print("%{}".format(step / total_steps * 100), flush=True)
367
+ except Exception:
368
+ print(Exception)
369
+ raise
370
+
371
+ gpdL = gpd.GeoDataFrame(pd.concat(featuresL, axis=1).T)
372
+ with Pool(processes=int(processes)) as pool:
373
+ try:
374
+ for resultR in pool.imap_unordered(rate_of_change, in_argsR):
375
+ if BT_DEBUGGING:
376
+ print("Got result: {}".format(resultR), flush=True)
377
+ featuresR.append(resultR)
378
+ step += 1
379
+ print(
380
+ ' "PROGRESS_LABEL Calculate Rate of Change Area {} of {}" '.format(
381
+ step + len(in_argsL), total_steps
382
+ ),
383
+ flush=True,
384
+ )
385
+ print(
386
+ "%{}".format((step + len(in_argsL)) / total_steps * 100),
387
+ flush=True,
388
+ )
389
+ except Exception:
390
+ print(Exception)
391
+ raise
392
+ gpdR = gpd.GeoDataFrame(pd.concat(featuresR, axis=1).T)
393
+ else:
394
+ for rowL in in_argsL:
395
+ featuresL.append(rate_of_change(rowL))
396
+
397
+ for rowR in in_argsR:
398
+ featuresR.append(rate_of_change(rowR))
399
+
400
+ gpdL = gpd.GeoDataFrame(pd.concat(featuresL, axis=1).T)
401
+ gpdR = gpd.GeoDataFrame(pd.concat(featuresR, axis=1).T)
402
+
403
+ for index in line_seg.index:
404
+ lnfid = line_seg.OLnFID.iloc[index]
405
+ Olnseg = line_seg.OLnSEG.iloc[index]
406
+ line_seg.loc[index, "RDist_Cut"] = float(
407
+ gpdR.loc[(gpdR.OLnFID == lnfid) & (gpdR.OLnSEG == Olnseg)]["RDist_Cut"]
408
+ )
409
+ line_seg.loc[index, "LDist_Cut"] = float(
410
+ gpdL.loc[(gpdL.OLnFID == lnfid) & (gpdL.OLnSEG == Olnseg)]["LDist_Cut"]
411
+ )
412
+ line_seg.loc[index, "CL_CutHt"] = float(
413
+ gpdL.loc[(gpdL.OLnFID == lnfid) & (gpdL.OLnSEG == Olnseg)]["CL_CutHt"]
414
+ )
415
+ line_seg.loc[index, "CR_CutHt"] = float(
416
+ gpdR.loc[(gpdR.OLnFID == lnfid) & (gpdR.OLnSEG == Olnseg)]["CR_CutHt"]
417
+ )
418
+ line_seg.loc[index, "DynCanTh"] = (
419
+ line_seg.loc[index, "CL_CutHt"] + line_seg.loc[index, "CR_CutHt"]
420
+ ) / 2
421
+ print(
422
+ ' "PROGRESS_LABEL Recording ... {} of {}" '.format(index + 1, len(line_seg)),
423
+ flush=True,
424
+ )
425
+ print(" %{} ".format(index + 1 / len(line_seg) * 100), flush=True)
426
+
427
+ return line_seg
428
+
429
+
430
+ def split_line_fc(line):
431
+ if line:
432
+ return list(map(shapely.LineString, zip(line.coords[:-1], line.coords[1:])))
433
+ else:
434
+ return None
435
+
436
+
437
+ def split_into_segments(df):
438
+ odf = df
439
+ crs = odf.crs
440
+ if "OLnSEG" not in odf.columns.array:
441
+ df["OLnSEG"] = np.nan
442
+ else:
443
+ pass
444
+ df = odf.assign(geometry=odf.apply(lambda x: split_line_fc(x.geometry), axis=1))
445
+ df = df.explode()
446
+
447
+ df["OLnSEG"] = df.groupby("OLnFID").cumcount()
448
+ gdf = gpd.GeoDataFrame(df, geometry=df.geometry, crs=crs)
449
+ gdf = gdf.sort_values(by=["OLnFID", "OLnSEG"])
450
+ gdf = gdf.reset_index(drop=True)
451
+ return gdf
452
+
453
+
454
+ def multiprocessing_copyparallel_lineLRC(dfL, dfR, dfc, processes, left_dis, right_dist, center_dist):
455
+ try:
456
+ line_arg = []
457
+ total_steps = len(dfL)
458
+
459
+ for item in dfL.index:
460
+ item_list = [dfL, dfR, dfc, left_dis, right_dist, center_dist, item]
461
+ line_arg.append(item_list)
462
+
463
+ featuresL = []
464
+ featuresR = []
465
+ result = None
466
+ step = 0
467
+
468
+ if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
469
+ with Pool(processes=int(processes)) as pool:
470
+ # execute tasks in order, process results out of order
471
+ for result in pool.imap_unordered(copyparallel_lineLRC, line_arg):
472
+ if BT_DEBUGGING:
473
+ print(f"Got result: {result}", flush=True)
474
+ if result:
475
+ featuresL.append(result[0]) # resultL
476
+ featuresR.append(result[1]) # resultR
477
+ step += 1
478
+ print(f" %{step / total_steps * 100} ")
479
+
480
+ return gpd.GeoDataFrame(pd.concat(featuresL)), gpd.GeoDataFrame(
481
+ pd.concat(featuresR)
482
+ ) # , gpd.GeoDataFrame(pd.concat(featuresC))
483
+ elif PARALLEL_MODE == ParallelMode.SEQUENTIAL:
484
+ for line in line_arg:
485
+ result = copyparallel_lineLRC(line)
486
+ if BT_DEBUGGING:
487
+ print(f"Got result: {result}", flush=True)
488
+ if result:
489
+ featuresL.append(result[0]) # resultL
490
+ featuresR.append(result[1]) # resultR
491
+ step += 1
492
+ print(f" %{step / total_steps * 100} ")
493
+
494
+ return gpd.GeoDataFrame(pd.concat(featuresL)), gpd.GeoDataFrame(
495
+ pd.concat(featuresR)
496
+ ) # , gpd.GeoDataFrame(pd.concat(featuresC))
497
+
498
+ except OperationCancelledException:
499
+ print("Operation cancelled")
500
+
501
+
502
+ def multiprocessing_Percentile(df, CanPercentile, CanThrPercentage, in_CHM, processes, side):
503
+ try:
504
+ line_arg = []
505
+ total_steps = len(df)
506
+ cal_percentile = cal_percentileLR
507
+ which_side = side
508
+ if side == "left":
509
+ PerCol = "Percentile_L"
510
+ which_side = "left"
511
+ cal_percentile = cal_percentileLR
512
+ elif side == "right":
513
+ PerCol = "Percentile_R"
514
+ which_side = "right"
515
+ cal_percentile = cal_percentileLR
516
+ elif side == "LRing":
517
+ PerCol = "Percentile_LRing"
518
+ cal_percentile = cal_percentileRing
519
+ which_side = "left"
520
+ elif side == "RRing":
521
+ PerCol = "Percentile_RRing"
522
+ which_side = "right"
523
+ cal_percentile = cal_percentileRing
524
+
525
+ print("Calculating surrounding ({}) forest population for buffer area ...".format(which_side))
526
+
527
+ for item in df.index:
528
+ item_list = [
529
+ df.iloc[[item]],
530
+ CanPercentile,
531
+ CanThrPercentage,
532
+ in_CHM,
533
+ item,
534
+ PerCol,
535
+ ]
536
+ line_arg.append(item_list)
537
+ print(
538
+ ' "PROGRESS_LABEL Preparing... {} of {}" '.format(item + 1, len(df)),
539
+ flush=True,
540
+ )
541
+ print(" %{} ".format(item / len(df) * 100), flush=True)
542
+
543
+ features = []
544
+ # chunksize = math.ceil(total_steps / processes)
545
+ # PARALLEL_MODE=False
546
+ if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
547
+ with Pool(processes=int(processes)) as pool:
548
+ step = 0
549
+ # execute tasks in order, process results out of order
550
+ try:
551
+ for result in pool.imap_unordered(cal_percentile, line_arg):
552
+ if BT_DEBUGGING:
553
+ print("Got result: {}".format(result), flush=True)
554
+ features.append(result)
555
+ step += 1
556
+ print(
557
+ ' "PROGRESS_LABEL Calculate Percentile In Buffer Area {} of {}" '.format(
558
+ step, total_steps
559
+ ),
560
+ flush=True,
561
+ )
562
+ print("%{}".format(step / total_steps * 100), flush=True)
563
+ except Exception:
564
+ print(Exception)
565
+ raise
566
+ del line_arg
567
+
568
+ return gpd.GeoDataFrame(pd.concat(features))
569
+ else:
570
+ verbose = False
571
+ total_steps = len(line_arg)
572
+ step = 0
573
+ for row in line_arg:
574
+ features.append(cal_percentile(row))
575
+ step += 1
576
+ if verbose:
577
+ print(
578
+ ' "PROGRESS_LABEL Calculate Percentile on line {} of {}" '.format(step, total_steps),
579
+ flush=True,
580
+ )
581
+ print(" %{} ".format(step / total_steps * 100), flush=True)
582
+ return gpd.GeoDataFrame(pd.concat(features))
583
+
584
+ except OperationCancelledException:
585
+ print("Operation cancelled")
586
+
587
+
588
+ def cal_percentileLR(line_arg):
589
+ from shapely import ops
590
+
591
+ try:
592
+ df = line_arg[0]
593
+ CanPercentile = line_arg[1]
594
+ CanThrPercentage = line_arg[2]
595
+ in_CHM = line_arg[3]
596
+ row_index = line_arg[4]
597
+ PerCol = line_arg[5]
598
+ line_buffer = df.loc[row_index, "geometry"]
599
+
600
+ if line_buffer.is_empty or shapely.is_missing(line_buffer):
601
+ return None
602
+ if line_buffer.has_z:
603
+ line_buffer = ops.transform(lambda x, y, z=None: (x, y), line_buffer)
604
+ except Exception as e:
605
+ print(e)
606
+ print("Assigning variable on index:{} Error: ".format(line_arg) + sys.exc_info())
607
+ exit()
608
+
609
+ # TODO: temporary workaround for exception causing not percentile defined
610
+ percentile = 0
611
+ Dyn_Canopy_Threshold = 0.05
612
+ try:
613
+ with rasterio.open(in_CHM) as raster:
614
+ clipped_raster, out_transform = rasterio.mask.mask(
615
+ raster, [line_buffer], crop=True, nodata=BT_NODATA, filled=True
616
+ )
617
+ clipped_raster = np.squeeze(clipped_raster, axis=0)
618
+
619
+ # mask all -9999 (nodata) value cells
620
+ masked_raster = np.ma.masked_where(clipped_raster == BT_NODATA, clipped_raster)
621
+ filled_raster = np.ma.filled(masked_raster, np.nan)
622
+
623
+ # Calculate the percentile
624
+ # masked_mean = np.ma.mean(masked_raster)
625
+ percentile = np.nanpercentile(filled_raster, CanPercentile) # ,method='hazen')
626
+ median = np.nanmedian(filled_raster)
627
+ if percentile > 0.05: # (percentile+median)>0.0:
628
+ Dyn_Canopy_Threshold = percentile * (CanThrPercentage / 100.0)
629
+ else:
630
+ # print("(percentile)<0.05 @ {}".format(row_index))
631
+ Dyn_Canopy_Threshold = 0.05
632
+
633
+ del clipped_raster, out_transform
634
+ del raster
635
+ # return the generated value
636
+ except Exception as e:
637
+ print(e)
638
+ # print(sys.exc_info())
639
+ percentile = 0
640
+ Dyn_Canopy_Threshold = 0
641
+
642
+ try:
643
+ df.loc[row_index, PerCol] = percentile
644
+ df.loc[row_index, "DynCanTh"] = Dyn_Canopy_Threshold
645
+ return df
646
+ except Exception as e:
647
+ print("Error writing Percentile and Dynamic Canopy into table: " + sys.exc_info())
648
+
649
+
650
+ def cal_percentileRing(line_arg):
651
+ from shapely import ops
652
+
653
+ try:
654
+ df = line_arg[0]
655
+ CanPercentile = line_arg[1]
656
+ CanThrPercentage = line_arg[2]
657
+ in_CHM = line_arg[3]
658
+ row_index = line_arg[4]
659
+ PerCol = line_arg[5]
660
+
661
+ line_buffer = df.loc[row_index, "geometry"]
662
+ if line_buffer.is_empty or shapely.is_missing(line_buffer):
663
+ return None
664
+ if line_buffer.has_z:
665
+ line_buffer = ops.transform(lambda x, y, z=None: (x, y), line_buffer)
666
+
667
+ except Exception as e:
668
+ print(e)
669
+ print("Assigning variable on index:{} Error: ".format(line_arg) + sys.exc_info())
670
+ exit()
671
+
672
+ # TODO: temporary workaround for exception causing not percentile defined
673
+ percentile = 0.5
674
+ Dyn_Canopy_Threshold = 0.05
675
+ try:
676
+ # with rasterio.open(in_CHM) as raster:
677
+ # clipped_raster, out_transform = rasterio.mask.mask(raster, [line_buffer], crop=True,
678
+ # nodata=BT_NODATA, filled=True)
679
+ clipped_raster, out_meta = clip_raster(in_CHM, line_buffer, 0)
680
+ clipped_raster = np.squeeze(clipped_raster, axis=0)
681
+
682
+ # mask all -9999 (nodata) value cells
683
+ masked_raster = np.ma.masked_where(clipped_raster == BT_NODATA, clipped_raster)
684
+ filled_raster = np.ma.filled(masked_raster, np.nan)
685
+
686
+ # Calculate the percentile
687
+ # masked_mean = np.ma.mean(masked_raster)
688
+ percentile = np.nanpercentile(filled_raster, 50) # CanPercentile)#,method='hazen')
689
+
690
+ if percentile > 1: # (percentile+median)>0.0:
691
+ Dyn_Canopy_Threshold = percentile * (0.3)
692
+ else:
693
+ Dyn_Canopy_Threshold = 1
694
+
695
+ del clipped_raster, out_meta
696
+ # del raster
697
+ # return the generated value
698
+ except Exception as e:
699
+ print(e)
700
+ # print('Something wrong in ID:{}'.format(row_index))
701
+ print("Default values are used.")
702
+
703
+ finally:
704
+ df.loc[row_index, PerCol] = percentile
705
+ df.loc[row_index, "DynCanTh"] = Dyn_Canopy_Threshold
706
+ return df
707
+
708
+
709
+ def copyparallel_lineLRC(line_arg):
710
+ dfL = line_arg[0]
711
+ dfR = line_arg[1]
712
+
713
+ # Simplify input center lines
714
+ geom = dfL.loc[line_arg[6], "geometry"]
715
+ if not geom:
716
+ return None
717
+
718
+ lineL = dfL.loc[line_arg[6], "geometry"].simplify(tolerance=0.05, preserve_topology=True)
719
+ lineR = dfR.loc[line_arg[6], "geometry"].simplify(tolerance=0.05, preserve_topology=True)
720
+ # lineC = dfC.loc[line_arg[6], 'geometry'].simplify(tolerance=0.05, preserve_topology=True)
721
+ offset_distL = float(line_arg[3])
722
+ offset_distR = float(line_arg[4])
723
+
724
+ # Older alternative method to the offset_curve() method,
725
+ # but uses resolution instead of quad_segs and a side keyword (‘left’ or ‘right’) instead
726
+ # of sign of the distance. This method is kept for backwards compatibility for now,
727
+ # but it is recommended to use offset_curve() instead.
728
+ # (ref: https://shapely.readthedocs.io/en/stable/manual.html#object.offset_curve)
729
+ parallel_lineL = lineL.parallel_offset(
730
+ distance=offset_distL, side="left", join_style=shapely.BufferJoinStyle.mitre
731
+ )
732
+
733
+ parallel_lineR = lineR.parallel_offset(
734
+ distance=-offset_distR, side="right", join_style=shapely.BufferJoinStyle.mitre
735
+ )
736
+
737
+ if not parallel_lineL.is_empty:
738
+ dfL.loc[line_arg[6], "geometry"] = parallel_lineL
739
+ if not parallel_lineR.is_empty:
740
+ dfR.loc[line_arg[6], "geometry"] = parallel_lineR
741
+
742
+ return dfL.iloc[[line_arg[6]]], dfR.iloc[[line_arg[6]]] # ,dfC.iloc[[line_arg[6]]]
743
+
744
+
745
+ if __name__ == "__main__":
746
+ start_time = time.time()
747
+ print(
748
+ "Starting Dynamic Canopy Threshold calculation processing\n @ {}".format(
749
+ time.strftime("%d %b %Y %H:%M:%S", time.localtime())
750
+ )
751
+ )
752
+
753
+ parser = argparse.ArgumentParser()
754
+ parser.add_argument("-i", "--input", type=json.loads)
755
+ parser.add_argument("-p", "--processes")
756
+ parser.add_argument("-v", "--verbose")
757
+ args = parser.parse_args()
758
+ args.input["full_step"] = False
759
+
760
+ verbose = True if args.verbose == "True" else False
761
+ main_canopy_threshold_relative(print, **args.input, processes=int(args.processes), verbose=verbose)
762
+
763
+ print("%{}".format(100))
764
+ print(
765
+ "Finishing Dynamic Canopy Threshold calculation @ {}\n(or in {} second)".format(
766
+ time.strftime("%d %b %Y %H:%M:%S", time.localtime()),
767
+ round(time.time() - start_time, 5),
768
+ )
769
+ )