BERATools 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. beratools/__init__.py +8 -3
  2. beratools/core/{algo_footprint_rel.py → algo_canopy_footprint_exp.py} +176 -139
  3. beratools/core/algo_centerline.py +61 -77
  4. beratools/core/algo_common.py +48 -57
  5. beratools/core/algo_cost.py +18 -25
  6. beratools/core/algo_dijkstra.py +37 -45
  7. beratools/core/algo_line_grouping.py +100 -100
  8. beratools/core/algo_merge_lines.py +40 -8
  9. beratools/core/algo_split_with_lines.py +289 -304
  10. beratools/core/algo_vertex_optimization.py +25 -46
  11. beratools/core/canopy_threshold_relative.py +755 -0
  12. beratools/core/constants.py +8 -9
  13. beratools/{tools → core}/line_footprint_functions.py +411 -258
  14. beratools/core/logger.py +18 -2
  15. beratools/core/tool_base.py +17 -75
  16. beratools/gui/assets/BERALogo.ico +0 -0
  17. beratools/gui/assets/BERA_Splash.gif +0 -0
  18. beratools/gui/assets/BERA_WizardImage.png +0 -0
  19. beratools/gui/assets/beratools.json +475 -2171
  20. beratools/gui/bt_data.py +585 -234
  21. beratools/gui/bt_gui_main.py +129 -91
  22. beratools/gui/main.py +4 -7
  23. beratools/gui/tool_widgets.py +530 -354
  24. beratools/tools/__init__.py +0 -7
  25. beratools/tools/{line_footprint_absolute.py → canopy_footprint_absolute.py} +81 -56
  26. beratools/tools/canopy_footprint_exp.py +113 -0
  27. beratools/tools/centerline.py +30 -37
  28. beratools/tools/check_seed_line.py +127 -0
  29. beratools/tools/common.py +65 -586
  30. beratools/tools/{line_footprint_fixed.py → ground_footprint.py} +140 -117
  31. beratools/tools/line_footprint_relative.py +64 -35
  32. beratools/tools/tool_template.py +48 -40
  33. beratools/tools/vertex_optimization.py +20 -34
  34. beratools/utility/env_checks.py +53 -0
  35. beratools/utility/spatial_common.py +210 -0
  36. beratools/utility/tool_args.py +138 -0
  37. beratools-0.2.5.dist-info/METADATA +134 -0
  38. beratools-0.2.5.dist-info/RECORD +50 -0
  39. {beratools-0.2.3.dist-info → beratools-0.2.5.dist-info}/WHEEL +1 -1
  40. beratools-0.2.5.dist-info/entry_points.txt +3 -0
  41. beratools-0.2.5.dist-info/licenses/LICENSE +674 -0
  42. beratools/core/algo_tiler.py +0 -428
  43. beratools/gui/__init__.py +0 -11
  44. beratools/gui/batch_processing_dlg.py +0 -513
  45. beratools/gui/map_window.py +0 -162
  46. beratools/tools/Beratools_r_script.r +0 -1120
  47. beratools/tools/Ht_metrics.py +0 -116
  48. beratools/tools/batch_processing.py +0 -136
  49. beratools/tools/canopy_threshold_relative.py +0 -672
  50. beratools/tools/canopycostraster.py +0 -222
  51. beratools/tools/fl_regen_csf.py +0 -428
  52. beratools/tools/forest_line_attributes.py +0 -408
  53. beratools/tools/line_grouping.py +0 -45
  54. beratools/tools/ln_relative_metrics.py +0 -615
  55. beratools/tools/r_cal_lpi_elai.r +0 -25
  56. beratools/tools/r_generate_pd_focalraster.r +0 -101
  57. beratools/tools/r_interface.py +0 -80
  58. beratools/tools/r_point_density.r +0 -9
  59. beratools/tools/rpy_chm2trees.py +0 -86
  60. beratools/tools/rpy_dsm_chm_by.py +0 -81
  61. beratools/tools/rpy_dtm_by.py +0 -63
  62. beratools/tools/rpy_find_cellsize.py +0 -43
  63. beratools/tools/rpy_gnd_csf.py +0 -74
  64. beratools/tools/rpy_hummock_hollow.py +0 -85
  65. beratools/tools/rpy_hummock_hollow_raster.py +0 -71
  66. beratools/tools/rpy_las_info.py +0 -51
  67. beratools/tools/rpy_laz2las.py +0 -40
  68. beratools/tools/rpy_lpi_elai_lascat.py +0 -466
  69. beratools/tools/rpy_normalized_lidar_by.py +0 -56
  70. beratools/tools/rpy_percent_above_dbh.py +0 -80
  71. beratools/tools/rpy_points2trees.py +0 -88
  72. beratools/tools/rpy_vegcoverage.py +0 -94
  73. beratools/tools/tiler.py +0 -48
  74. beratools/tools/zonal_threshold.py +0 -144
  75. beratools-0.2.3.dist-info/METADATA +0 -108
  76. beratools-0.2.3.dist-info/RECORD +0 -74
  77. beratools-0.2.3.dist-info/entry_points.txt +0 -2
  78. beratools-0.2.3.dist-info/licenses/LICENSE +0 -22
@@ -1,672 +0,0 @@
1
- import os.path
2
- from multiprocessing.pool import Pool
3
- import geopandas as gpd
4
- import json
5
- import argparse
6
- import time
7
- import pandas as pd
8
- import numpy as np
9
- import shapely
10
- from beratools.core.constants import *
11
- from beratools.tools.common import *
12
- import sys
13
- import math
14
-
15
-
16
- class OperationCancelledException(Exception):
17
- pass
18
-
19
-
20
- def main_canopy_threshold_relative(callback, in_line, in_chm, off_ln_dist, canopy_percentile,
21
- canopy_thresh_percentage, tree_radius, max_line_dist, canopy_avoidance,
22
- exponent, full_step, processes, verbose):
23
- file_path, in_file_name = os.path.split(in_line)
24
- out_file = os.path.join(file_path, 'DynCanTh_' + in_file_name)
25
- line_seg = gpd.GeoDataFrame.from_file(in_line)
26
-
27
- # check coordinate systems between line and raster features
28
- # with rasterio.open(in_chm) as in_raster:
29
- if compare_crs(vector_crs(in_line), raster_crs(in_chm)):
30
- pass
31
- else:
32
- print("Line and raster spatial references are not same, please check.")
33
- exit()
34
-
35
- # Check the canopy threshold percent in 0-100 range. If it is not, 50% will be applied
36
- if not 100 >= int(canopy_percentile) > 0:
37
- canopy_percentile = 50
38
-
39
- # Check the Dynamic Canopy threshold column in data. If it is not, new column will be created
40
- if 'DynCanTh' not in line_seg.columns.array:
41
- if BT_DEBUGGING:
42
- print("{} column not found in input line".format('DynCanTh'))
43
- print("New column created: {}".format('DynCanTh'))
44
- line_seg['DynCanTh'] = np.nan
45
-
46
- # Check the OLnFID column in data. If it is not, column will be created
47
- if 'OLnFID' not in line_seg.columns.array:
48
- if BT_DEBUGGING:
49
- print("{} column not found in input line".format('OLnFID'))
50
-
51
- print("New column created: {}".format('OLnFID'))
52
- line_seg['OLnFID'] = line_seg.index
53
-
54
- # Check the OLnSEG column in data. If it is not, column will be created
55
- if 'OLnSEG' not in line_seg.columns.array:
56
- if BT_DEBUGGING:
57
- print("{} column not found in input line".format('OLnSEG'))
58
-
59
- print("New column created: {}".format('OLnSEG'))
60
- line_seg['OLnSEG'] = 0
61
-
62
- line_seg = chk_df_multipart(line_seg, 'LineString')[0]
63
-
64
- proc_segments = False
65
- if proc_segments:
66
- line_seg = split_into_segments(line_seg)
67
- else:
68
- pass
69
-
70
- # copy original line input to another GeoDataframe
71
- workln_dfC = gpd.GeoDataFrame.copy((line_seg))
72
- workln_dfC.geometry = workln_dfC.geometry.simplify(tolerance=0.5, preserve_topology=True)
73
-
74
- print('%{}'.format(5))
75
-
76
- worklnbuffer_dfLRing = gpd.GeoDataFrame.copy((workln_dfC))
77
- worklnbuffer_dfRRing = gpd.GeoDataFrame.copy((workln_dfC))
78
-
79
- print('Create ring buffer for input line to find the forest edge....')
80
-
81
- def multiringbuffer(df, nrings, ringdist):
82
- """
83
- Buffers an input dataframes geometry nring (number of rings) times, with a distance between
84
- rings of ringdist and returns a list of non overlapping buffers
85
- """
86
-
87
- rings = [] # A list to hold the individual buffers
88
- for ring in np.arange(0, ringdist, nrings): # For each ring (1, 2, 3, ..., nrings)
89
- big_ring = df["geometry"].buffer(nrings + ring, single_sided=True,
90
- cap_style='flat') # Create one big buffer
91
- small_ring = df["geometry"].buffer(ring, single_sided=True, cap_style='flat') # Create one smaller one
92
- the_ring = big_ring.difference(small_ring) # Difference the big with the small to create a ring
93
- if (~shapely.is_empty(the_ring) or ~shapely.is_missing(the_ring) or not None or ~the_ring.area == 0):
94
- if isinstance(the_ring, shapely.MultiPolygon) or isinstance(the_ring, shapely.Polygon):
95
- rings.append(the_ring) # Append the ring to the rings list
96
- else:
97
- if isinstance(the_ring, shapely.GeometryCollection):
98
- for i in range(0, len(the_ring.geoms)):
99
- if not isinstance(the_ring.geoms[i], shapely.LineString):
100
- rings.append(the_ring.geoms[i])
101
- print(' %{} '.format((ring / ringdist) * 100))
102
-
103
- return rings # return the list
104
-
105
- # Create a column with the rings as a list
106
-
107
- worklnbuffer_dfLRing['mgeometry'] = worklnbuffer_dfLRing.apply(lambda x: multiringbuffer(df=x, nrings=1,
108
- ringdist=15), axis=1)
109
-
110
- worklnbuffer_dfLRing = worklnbuffer_dfLRing.explode("mgeometry") # Explode to create a row for each ring
111
- worklnbuffer_dfLRing = worklnbuffer_dfLRing.set_geometry("mgeometry")
112
- worklnbuffer_dfLRing = worklnbuffer_dfLRing.drop(columns=["geometry"]).rename_geometry("geometry").set_crs(
113
- workln_dfC.crs)
114
- worklnbuffer_dfLRing['iRing'] = worklnbuffer_dfLRing.groupby(['OLnFID', 'OLnSEG']).cumcount()
115
- worklnbuffer_dfLRing = worklnbuffer_dfLRing.sort_values(by=['OLnFID', 'OLnSEG', 'iRing'])
116
- worklnbuffer_dfLRing = worklnbuffer_dfLRing.reset_index(drop=True)
117
-
118
- worklnbuffer_dfRRing['mgeometry'] = worklnbuffer_dfRRing.apply(
119
- lambda x: multiringbuffer(df=x, nrings=-1, ringdist=-15), axis=1)
120
-
121
- worklnbuffer_dfRRing = worklnbuffer_dfRRing.explode("mgeometry") # Explode to create a row for each ring
122
- worklnbuffer_dfRRing = worklnbuffer_dfRRing.set_geometry("mgeometry")
123
- worklnbuffer_dfRRing = worklnbuffer_dfRRing.drop(columns=["geometry"]).rename_geometry("geometry").set_crs(
124
- workln_dfC.crs)
125
- worklnbuffer_dfRRing['iRing'] = worklnbuffer_dfRRing.groupby(['OLnFID', 'OLnSEG']).cumcount()
126
- worklnbuffer_dfRRing = worklnbuffer_dfRRing.sort_values(by=['OLnFID', 'OLnSEG', 'iRing'])
127
- worklnbuffer_dfRRing = worklnbuffer_dfRRing.reset_index(drop=True)
128
-
129
- print("Task done.")
130
- print('%{}'.format(20))
131
-
132
- worklnbuffer_dfRRing['Percentile_RRing'] = np.nan
133
- worklnbuffer_dfLRing['Percentile_LRing'] = np.nan
134
- line_seg['CL_CutHt'] = np.nan
135
- line_seg['CR_CutHt'] = np.nan
136
- line_seg['RDist_Cut'] = np.nan
137
- line_seg['LDist_Cut'] = np.nan
138
- print('%{}'.format(80))
139
-
140
- # calculate the Height percentile for each parallel area using CHM
141
- worklnbuffer_dfLRing = multiprocessing_Percentile(worklnbuffer_dfLRing, int(canopy_percentile),
142
- float(canopy_thresh_percentage), in_chm,
143
- processes, side='LRing')
144
-
145
- worklnbuffer_dfLRing = worklnbuffer_dfLRing.sort_values(by=['OLnFID', 'OLnSEG', 'iRing'])
146
- worklnbuffer_dfLRing = worklnbuffer_dfLRing.reset_index(drop=True)
147
-
148
- worklnbuffer_dfRRing = multiprocessing_Percentile(worklnbuffer_dfRRing, int(canopy_percentile),
149
- float(canopy_thresh_percentage), in_chm,
150
- processes, side='RRing')
151
-
152
- worklnbuffer_dfRRing = worklnbuffer_dfRRing.sort_values(by=['OLnFID', 'OLnSEG', 'iRing'])
153
- worklnbuffer_dfRRing = worklnbuffer_dfRRing.reset_index(drop=True)
154
-
155
- result = multiprocessing_RofC(line_seg, worklnbuffer_dfLRing, worklnbuffer_dfRRing, processes)
156
- print('%{}'.format(40))
157
- print("Task done.")
158
-
159
- print("Saving percentile information to input line ...")
160
- gpd.GeoDataFrame.to_file(result, out_file)
161
- print("Task done.")
162
-
163
- if full_step:
164
- return out_file
165
-
166
- print('%{}'.format(100))
167
-
168
-
169
- def rate_of_change(in_arg): # ,max_chmht):
170
- x = in_arg[0]
171
- Olnfid = in_arg[1]
172
- Olnseg = in_arg[2]
173
- side = in_arg[3]
174
- df = in_arg[4]
175
- index = in_arg[5]
176
-
177
- # Since the x interval is 1 unit, the array 'diff' is the rate of change (slope)
178
- diff = np.ediff1d(x)
179
- cut_dist = len(x) / 5
180
-
181
- median_percentile = np.nanmedian(x)
182
- if not np.isnan(median_percentile):
183
- cut_percentile = math.floor(median_percentile)
184
- else:
185
- cut_percentile = 0.5
186
- found = False
187
- changes = 1.50
188
- Change = np.insert(diff, 0, 0)
189
- scale_down = 1
190
-
191
- # test the rate of change is > than 150% (1.5), if it is
192
- # no result found then lower to 140% (1.4) until 110% (1.1)
193
- try:
194
- while not found and changes >= 1.1:
195
- for ii in range(0, len(Change) - 1):
196
- if x[ii] >= 0.5:
197
- if (Change[ii]) >= changes:
198
- cut_dist = (ii + 1) * scale_down
199
- cut_percentile = math.floor(x[ii])
200
- # median_diff=(cut_percentile-median_percentile)
201
- if 0.5 >= cut_percentile:
202
- if cut_dist > 5:
203
- cut_percentile = 2
204
- cut_dist = cut_dist * scale_down ** 3
205
- print("{}: OLnFID:{}, OLnSEG: {} @<0.5 found and modified".format(side,
206
- Olnfid,
207
- Olnseg), flush=True)
208
- elif 0.5 < cut_percentile <= 5.0:
209
- if cut_dist > 6:
210
- cut_dist = cut_dist * scale_down ** 3 # 4.0
211
- print("{}: OLnFID:{}, OLnSEG: {} @0.5-5.0 found and modified".format(side,
212
- Olnfid,
213
- Olnseg),
214
- flush=True)
215
- elif 5.0 < cut_percentile <= 10.0:
216
- if cut_dist > 8: # 5
217
- cut_dist = cut_dist * scale_down ** 3
218
- print("{}: OLnFID:{}, OLnSEG: {} @5-10 found and modified".format(side,
219
- Olnfid,
220
- Olnseg), flush=True)
221
- elif 10.0 < cut_percentile <= 15:
222
- if cut_dist > 5:
223
- cut_dist = cut_dist * scale_down ** 3 # 5.5
224
- print("{}: OLnFID:{}, OLnSEG: {} @10-15 found and modified".format(side,
225
- Olnfid,
226
- Olnseg), flush=True)
227
- elif 15 < cut_percentile:
228
- if cut_dist > 4:
229
- cut_dist = cut_dist * scale_down ** 2
230
- cut_percentile = 15.5
231
- print("{}: OLnFID:{}, OLnSEG: {} @>15 found and modified".format(side,
232
- Olnfid,
233
- Olnseg), flush=True)
234
- found = True
235
- print("{}: OLnFID:{}, OLnSEG: {} rate of change found".format(side, Olnfid, Olnseg), flush=True)
236
- break
237
- changes = changes - 0.1
238
-
239
- except IndexError:
240
- pass
241
-
242
- # if still is no result found, lower to 10% (1.1), if no result found then default is used
243
- if not found:
244
-
245
- if 0.5 >= median_percentile:
246
- cut_dist = 4 * scale_down # 3
247
- cut_percentile = 0.5
248
- elif 0.5 < median_percentile <= 5.0:
249
- cut_dist = 4.5 * scale_down # 4.0
250
- cut_percentile = math.floor(median_percentile)
251
- elif 5.0 < median_percentile <= 10.0:
252
- cut_dist = 5.5 * scale_down # 5
253
- cut_percentile = math.floor(median_percentile)
254
- elif 10.0 < median_percentile <= 15:
255
- cut_dist = 6 * scale_down # 5.5
256
- cut_percentile = math.floor(median_percentile)
257
- elif 15 < median_percentile:
258
- cut_dist = 5 * scale_down # 5
259
- cut_percentile = 15.5
260
- print("{}: OLnFID:{}, OLnSEG: {} Estimated".format(side, Olnfid, Olnseg), flush=True)
261
- if side == 'Right':
262
- df['RDist_Cut'] = cut_dist
263
- df['CR_CutHt'] = cut_percentile
264
- elif side == 'Left':
265
- df['LDist_Cut'] = cut_dist
266
- df['CL_CutHt'] = cut_percentile
267
-
268
- return df
269
-
270
-
271
- def multiprocessing_RofC(line_seg, worklnbuffer_dfLRing, worklnbuffer_dfRRing, processes):
272
- in_argsL = []
273
- in_argsR = []
274
-
275
- for index in (line_seg.index):
276
- resultsL = []
277
- resultsR = []
278
- Olnfid = int(line_seg.OLnFID.iloc[index])
279
- Olnseg = int(line_seg.OLnSEG.iloc[index])
280
- sql_dfL = worklnbuffer_dfLRing.loc[
281
- (worklnbuffer_dfLRing['OLnFID'] == Olnfid) & (worklnbuffer_dfLRing['OLnSEG'] == Olnseg)].sort_values(
282
- by=['iRing'])
283
- PLRing = list(sql_dfL['Percentile_LRing'])
284
- sql_dfR = worklnbuffer_dfRRing.loc[
285
- (worklnbuffer_dfRRing['OLnFID'] == Olnfid) & (worklnbuffer_dfRRing['OLnSEG'] == Olnseg)].sort_values(
286
- by=['iRing'])
287
- PRRing = list(sql_dfR['Percentile_RRing'])
288
- in_argsL.append([PLRing, Olnfid, Olnseg, 'Left', line_seg.loc[index], index])
289
- in_argsR.append([PRRing, Olnfid, Olnseg, 'Right', line_seg.loc[index], index])
290
- print(' "PROGRESS_LABEL Preparing grouped buffer areas...." ', flush=True)
291
- print(' %{} '.format((index + 1 / len(line_seg)) * 100))
292
-
293
- total_steps = len(in_argsL) + len(in_argsR)
294
- featuresL = []
295
- featuresR = []
296
-
297
- if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
298
- with Pool(processes=int(processes)) as pool:
299
-
300
- step = 0
301
- # execute tasks in order, process results out of order
302
- try:
303
- for resultL in pool.imap_unordered(rate_of_change, in_argsL):
304
- if BT_DEBUGGING:
305
- print('Got result: {}'.format(resultL), flush=True)
306
- featuresL.append(resultL)
307
- step += 1
308
- print(
309
- ' "PROGRESS_LABEL Calculate Rate of Change In Buffer Area {} of {}" '.format(step, total_steps),
310
- flush=True)
311
- print('%{}'.format(step / total_steps * 100), flush=True)
312
- except Exception:
313
- print(Exception)
314
- raise
315
-
316
- gpdL = gpd.GeoDataFrame(pd.concat(featuresL, axis=1).T)
317
- with Pool(processes=int(processes)) as pool:
318
- try:
319
- for resultR in pool.imap_unordered(rate_of_change, in_argsR):
320
- if BT_DEBUGGING:
321
- print('Got result: {}'.format(resultR), flush=True)
322
- featuresR.append(resultR)
323
- step += 1
324
- print(
325
- ' "PROGRESS_LABEL Calculate Rate of Change Area {} of {}" '.format(step + len(in_argsL),
326
- total_steps),
327
- flush=True)
328
- print('%{}'.format((step + len(in_argsL)) / total_steps * 100), flush=True)
329
- except Exception:
330
- print(Exception)
331
- raise
332
- gpdR = gpd.GeoDataFrame(pd.concat(featuresR, axis=1).T)
333
- else:
334
- for rowL in in_argsL:
335
- featuresL.append(rate_of_change(rowL))
336
-
337
- for rowR in in_argsR:
338
- featuresR.append(rate_of_change(rowR))
339
-
340
- gpdL = gpd.GeoDataFrame(pd.concat(featuresL, axis=1).T)
341
- gpdR = gpd.GeoDataFrame(pd.concat(featuresR, axis=1).T)
342
-
343
- for index in line_seg.index:
344
- lnfid = line_seg.OLnFID.iloc[index]
345
- Olnseg = line_seg.OLnSEG.iloc[index]
346
- line_seg.loc[index, 'RDist_Cut'] = float(
347
- gpdR.loc[(gpdR.OLnFID == lnfid) & (gpdR.OLnSEG == Olnseg)]['RDist_Cut'])
348
- line_seg.loc[index, 'LDist_Cut'] = float(
349
- gpdL.loc[(gpdL.OLnFID == lnfid) & (gpdL.OLnSEG == Olnseg)]['LDist_Cut'])
350
- line_seg.loc[index, 'CL_CutHt'] = float(gpdL.loc[(gpdL.OLnFID == lnfid) & (gpdL.OLnSEG == Olnseg)]['CL_CutHt'])
351
- line_seg.loc[index, 'CR_CutHt'] = float(gpdR.loc[(gpdR.OLnFID == lnfid) & (gpdR.OLnSEG == Olnseg)]['CR_CutHt'])
352
- line_seg.loc[index, 'DynCanTh'] = (line_seg.loc[index, 'CL_CutHt'] + line_seg.loc[index, 'CR_CutHt']) / 2
353
- print(' "PROGRESS_LABEL Recording ... {} of {}" '.format(index + 1, len(line_seg)), flush=True)
354
- print(' %{} '.format(index + 1 / len(line_seg) * 100), flush=True)
355
-
356
- return line_seg
357
-
358
-
359
- def split_line_fc(line):
360
- if line:
361
- return list(map(shapely.LineString, zip(line.coords[:-1], line.coords[1:])))
362
- else:
363
- return None
364
-
365
-
366
- def split_into_segments(df):
367
- odf = df
368
- crs = odf.crs
369
- if 'OLnSEG' not in odf.columns.array:
370
- df['OLnSEG'] = np.nan
371
- else:
372
- pass
373
- df = odf.assign(geometry=odf.apply(lambda x: split_line_fc(x.geometry), axis=1))
374
- df = df.explode()
375
-
376
- df['OLnSEG'] = df.groupby('OLnFID').cumcount()
377
- gdf = gpd.GeoDataFrame(df, geometry=df.geometry, crs=crs)
378
- gdf = gdf.sort_values(by=['OLnFID', 'OLnSEG'])
379
- gdf = gdf.reset_index(drop=True)
380
- return gdf
381
-
382
-
383
- def multiprocessing_copyparallel_lineLRC(dfL, dfR, dfc, processes, left_dis, right_dist, center_dist):
384
- try:
385
- line_arg = []
386
- total_steps = len(dfL)
387
-
388
- for item in dfL.index:
389
- item_list = [dfL, dfR, dfc, left_dis, right_dist, center_dist, item]
390
- line_arg.append(item_list)
391
-
392
- featuresL = []
393
- featuresR = []
394
- result = None
395
- step = 0
396
-
397
- if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
398
- with Pool(processes=int(processes)) as pool:
399
- # execute tasks in order, process results out of order
400
- for result in pool.imap_unordered(copyparallel_lineLRC, line_arg):
401
- if BT_DEBUGGING:
402
- print(f'Got result: {result}', flush=True)
403
- if result:
404
- featuresL.append(result[0]) # resultL
405
- featuresR.append(result[1]) # resultR
406
- step += 1
407
- print(f' %{step / total_steps * 100} ')
408
-
409
- return gpd.GeoDataFrame(pd.concat(featuresL)), \
410
- gpd.GeoDataFrame(pd.concat(featuresR)) # , gpd.GeoDataFrame(pd.concat(featuresC))
411
- elif PARALLEL_MODE == ParallelMode.SEQUENTIAL:
412
- for line in line_arg:
413
- result = copyparallel_lineLRC(line)
414
- if BT_DEBUGGING:
415
- print(f'Got result: {result}', flush=True)
416
- if result:
417
- featuresL.append(result[0]) # resultL
418
- featuresR.append(result[1]) # resultR
419
- step += 1
420
- print(f' %{step / total_steps * 100} ')
421
-
422
- return gpd.GeoDataFrame(pd.concat(featuresL)), \
423
- gpd.GeoDataFrame(pd.concat(featuresR)) # , gpd.GeoDataFrame(pd.concat(featuresC))
424
-
425
- except OperationCancelledException:
426
- print("Operation cancelled")
427
-
428
-
429
- def multiprocessing_Percentile(df, CanPercentile, CanThrPercentage, in_CHM, processes, side):
430
- try:
431
- line_arg = []
432
- total_steps = len(df)
433
- cal_percentile = cal_percentileLR
434
- if side == 'left':
435
- PerCol = 'Percentile_L'
436
- which_side = 'left'
437
- cal_percentile = cal_percentileLR
438
- elif side == 'right':
439
- PerCol = 'Percentile_R'
440
- which_side = 'right'
441
- cal_percentile = cal_percentileLR
442
- elif side == 'LRing':
443
- PerCol = 'Percentile_LRing'
444
- cal_percentile = cal_percentileRing
445
- which_side = 'left'
446
- elif side == 'RRing':
447
- PerCol = 'Percentile_RRing'
448
- which_side = 'right'
449
- cal_percentile = cal_percentileRing
450
-
451
- print("Calculating surrounding ({}) forest population for buffer area ...".format(which_side))
452
-
453
- for item in df.index:
454
- item_list = [df.iloc[[item]], CanPercentile, CanThrPercentage, in_CHM, item, PerCol]
455
- line_arg.append(item_list)
456
- print(' "PROGRESS_LABEL Preparing... {} of {}" '.format(item + 1, len(df)), flush=True)
457
- print(' %{} '.format(item / len(df) * 100), flush=True)
458
-
459
- features = []
460
- # chunksize = math.ceil(total_steps / processes)
461
- # PARALLEL_MODE=False
462
- if PARALLEL_MODE == ParallelMode.MULTIPROCESSING:
463
- with Pool(processes=int(processes)) as pool:
464
-
465
- step = 0
466
- # execute tasks in order, process results out of order
467
- try:
468
- for result in pool.imap_unordered(cal_percentile, line_arg):
469
- if BT_DEBUGGING:
470
- print('Got result: {}'.format(result), flush=True)
471
- features.append(result)
472
- step += 1
473
- print(
474
- ' "PROGRESS_LABEL Calculate Percentile In Buffer Area {} of {}" '.format(step, total_steps),
475
- flush=True)
476
- print('%{}'.format(step / total_steps * 100), flush=True)
477
- except Exception:
478
- print(Exception)
479
- raise
480
- del line_arg
481
-
482
- return gpd.GeoDataFrame(pd.concat(features))
483
- else:
484
- verbose = False
485
- total_steps = len(line_arg)
486
- step = 0
487
- for row in line_arg:
488
- features.append(cal_percentile(row))
489
- step += 1
490
- if verbose:
491
- print(' "PROGRESS_LABEL Calculate Percentile on line {} of {}" '.format(step, total_steps),
492
- flush=True)
493
- print(' %{} '.format(step / total_steps * 100), flush=True)
494
- return gpd.GeoDataFrame(pd.concat(features))
495
-
496
- except OperationCancelledException:
497
- print("Operation cancelled")
498
-
499
-
500
- def cal_percentileLR(line_arg):
501
- from shapely import ops
502
- try:
503
- df = line_arg[0]
504
- CanPercentile = line_arg[1]
505
- CanThrPercentage = line_arg[2]
506
- in_CHM = line_arg[3]
507
- row_index = line_arg[4]
508
- PerCol = line_arg[5]
509
- line_buffer = df.loc[row_index, 'geometry']
510
-
511
- if line_buffer.is_empty or shapely.is_missing(line_buffer):
512
- return None
513
- if line_buffer.has_z:
514
- line_buffer = ops.transform(lambda x, y, z=None: (x, y), line_buffer)
515
- except Exception as e:
516
- print(e)
517
- print("Assigning variable on index:{} Error: ".format(line_arg) + sys.exc_info())
518
- exit()
519
-
520
- # TODO: temporary workaround for exception causing not percentile defined
521
- percentile = 0
522
- Dyn_Canopy_Threshold = 0.05
523
- try:
524
- with rasterio.open(in_CHM) as raster:
525
- clipped_raster, out_transform = rasterio.mask.mask(raster, [line_buffer], crop=True,
526
- nodata=BT_NODATA, filled=True)
527
- clipped_raster = np.squeeze(clipped_raster, axis=0)
528
-
529
- # mask all -9999 (nodata) value cells
530
- masked_raster = np.ma.masked_where(clipped_raster == BT_NODATA, clipped_raster)
531
- filled_raster = np.ma.filled(masked_raster, np.nan)
532
-
533
- # Calculate the percentile
534
- # masked_mean = np.ma.mean(masked_raster)
535
- percentile = np.nanpercentile(filled_raster, CanPercentile) # ,method='hazen')
536
- median = np.nanmedian(filled_raster)
537
- if percentile > 0.05: # (percentile+median)>0.0:
538
- Dyn_Canopy_Threshold = percentile * (CanThrPercentage / 100.0)
539
- else:
540
- # print("(percentile)<0.05 @ {}".format(row_index))
541
- Dyn_Canopy_Threshold = 0.05
542
-
543
- del clipped_raster, out_transform
544
- del raster
545
- # return the generated value
546
- except Exception as e:
547
- print(e)
548
- # print(sys.exc_info())
549
- percentile = 0
550
- Dyn_Canopy_Threshold = 0
551
-
552
- try:
553
- df.loc[row_index, PerCol] = percentile
554
- df.loc[row_index, 'DynCanTh'] = Dyn_Canopy_Threshold
555
- return df
556
- except Exception as e:
557
- print("Error writing Percentile and Dynamic Canopy into table: " + sys.exc_info())
558
-
559
-
560
- def cal_percentileRing(line_arg):
561
- from shapely import ops
562
- try:
563
- df = line_arg[0]
564
- CanPercentile = line_arg[1]
565
- CanThrPercentage = line_arg[2]
566
- in_CHM = line_arg[3]
567
- row_index = line_arg[4]
568
- PerCol = line_arg[5]
569
-
570
- line_buffer = df.loc[row_index, 'geometry']
571
- if line_buffer.is_empty or shapely.is_missing(line_buffer):
572
- return None
573
- if line_buffer.has_z:
574
- line_buffer = ops.transform(lambda x, y, z=None: (x, y), line_buffer)
575
-
576
-
577
- except Exception as e:
578
- print(e)
579
- print("Assigning variable on index:{} Error: ".format(line_arg) + sys.exc_info())
580
- exit()
581
-
582
- # TODO: temporary workaround for exception causing not percentile defined
583
- percentile = 0.5
584
- Dyn_Canopy_Threshold = 0.05
585
- try:
586
-
587
- #with rasterio.open(in_CHM) as raster:
588
- # clipped_raster, out_transform = rasterio.mask.mask(raster, [line_buffer], crop=True,
589
- # nodata=BT_NODATA, filled=True)
590
- clipped_raster, out_meta = clip_raster(in_CHM, line_buffer, 0)
591
- clipped_raster = np.squeeze(clipped_raster, axis=0)
592
-
593
- # mask all -9999 (nodata) value cells
594
- masked_raster = np.ma.masked_where(clipped_raster == BT_NODATA, clipped_raster)
595
- filled_raster = np.ma.filled(masked_raster, np.nan)
596
-
597
- # Calculate the percentile
598
- # masked_mean = np.ma.mean(masked_raster)
599
- percentile = np.nanpercentile(filled_raster, 50) # CanPercentile)#,method='hazen')
600
-
601
- if percentile > 1: # (percentile+median)>0.0:
602
- Dyn_Canopy_Threshold = percentile * (0.3)
603
- else:
604
- Dyn_Canopy_Threshold = 1
605
-
606
- del clipped_raster, out_meta
607
- #del raster
608
- # return the generated value
609
- except Exception as e:
610
- print(e)
611
- # print('Something wrong in ID:{}'.format(row_index))
612
- print("Default values are used.")
613
-
614
-
615
- finally:
616
- df.loc[row_index, PerCol] = percentile
617
- df.loc[row_index, 'DynCanTh'] = Dyn_Canopy_Threshold
618
- return df
619
-
620
-
621
- def copyparallel_lineLRC(line_arg):
622
- dfL = line_arg[0]
623
- dfR = line_arg[1]
624
-
625
- # Simplify input center lines
626
- geom = dfL.loc[line_arg[6], 'geometry']
627
- if not geom:
628
- return None
629
-
630
- lineL = dfL.loc[line_arg[6], 'geometry'].simplify(tolerance=0.05, preserve_topology=True)
631
- lineR = dfR.loc[line_arg[6], 'geometry'].simplify(tolerance=0.05, preserve_topology=True)
632
- # lineC = dfC.loc[line_arg[6], 'geometry'].simplify(tolerance=0.05, preserve_topology=True)
633
- offset_distL = float(line_arg[3])
634
- offset_distR = float(line_arg[4])
635
-
636
- # Older alternative method to the offset_curve() method,
637
- # but uses resolution instead of quad_segs and a side keyword (‘left’ or ‘right’) instead
638
- # of sign of the distance. This method is kept for backwards compatibility for now,
639
- # but it is recommended to use offset_curve() instead.
640
- # (ref: https://shapely.readthedocs.io/en/stable/manual.html#object.offset_curve)
641
- parallel_lineL = lineL.parallel_offset(distance=offset_distL, side='left',
642
- join_style=shapely.BufferJoinStyle.mitre)
643
-
644
- parallel_lineR = lineR.parallel_offset(distance=-offset_distR, side='right',
645
- join_style=shapely.BufferJoinStyle.mitre)
646
-
647
- if not parallel_lineL.is_empty:
648
- dfL.loc[line_arg[6], 'geometry'] = parallel_lineL
649
- if not parallel_lineR.is_empty:
650
- dfR.loc[line_arg[6], 'geometry'] = parallel_lineR
651
-
652
- return dfL.iloc[[line_arg[6]]], dfR.iloc[[line_arg[6]]] # ,dfC.iloc[[line_arg[6]]]
653
-
654
-
655
- if __name__ == '__main__':
656
- start_time = time.time()
657
- print('Starting Dynamic Canopy Threshold calculation processing\n @ {}'.format(
658
- time.strftime("%d %b %Y %H:%M:%S", time.localtime())))
659
-
660
- parser = argparse.ArgumentParser()
661
- parser.add_argument('-i', '--input', type=json.loads)
662
- parser.add_argument('-p', '--processes')
663
- parser.add_argument('-v', '--verbose')
664
- args = parser.parse_args()
665
- args.input['full_step'] = False
666
-
667
- verbose = True if args.verbose == 'True' else False
668
- main_canopy_threshold_relative(print, **args.input, processes=int(args.processes), verbose=verbose)
669
-
670
- print('%{}'.format(100))
671
- print('Finishing Dynamic Canopy Threshold calculation @ {}\n(or in {} second)'.format(
672
- time.strftime("%d %b %Y %H:%M:%S", time.localtime()), round(time.time() - start_time, 5)))