BERATools 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beratools/__init__.py +8 -3
- beratools/core/{algo_footprint_rel.py → algo_canopy_footprint_exp.py} +176 -139
- beratools/core/algo_centerline.py +61 -77
- beratools/core/algo_common.py +48 -57
- beratools/core/algo_cost.py +18 -25
- beratools/core/algo_dijkstra.py +37 -45
- beratools/core/algo_line_grouping.py +100 -100
- beratools/core/algo_merge_lines.py +40 -8
- beratools/core/algo_split_with_lines.py +289 -304
- beratools/core/algo_vertex_optimization.py +25 -46
- beratools/core/canopy_threshold_relative.py +755 -0
- beratools/core/constants.py +8 -9
- beratools/{tools → core}/line_footprint_functions.py +411 -258
- beratools/core/logger.py +18 -2
- beratools/core/tool_base.py +17 -75
- beratools/gui/assets/BERALogo.ico +0 -0
- beratools/gui/assets/BERA_Splash.gif +0 -0
- beratools/gui/assets/BERA_WizardImage.png +0 -0
- beratools/gui/assets/beratools.json +475 -2171
- beratools/gui/bt_data.py +585 -234
- beratools/gui/bt_gui_main.py +129 -91
- beratools/gui/main.py +4 -7
- beratools/gui/tool_widgets.py +530 -354
- beratools/tools/__init__.py +0 -7
- beratools/tools/{line_footprint_absolute.py → canopy_footprint_absolute.py} +81 -56
- beratools/tools/canopy_footprint_exp.py +113 -0
- beratools/tools/centerline.py +30 -37
- beratools/tools/check_seed_line.py +127 -0
- beratools/tools/common.py +65 -586
- beratools/tools/{line_footprint_fixed.py → ground_footprint.py} +140 -117
- beratools/tools/line_footprint_relative.py +64 -35
- beratools/tools/tool_template.py +48 -40
- beratools/tools/vertex_optimization.py +20 -34
- beratools/utility/env_checks.py +53 -0
- beratools/utility/spatial_common.py +210 -0
- beratools/utility/tool_args.py +138 -0
- beratools-0.2.4.dist-info/METADATA +134 -0
- beratools-0.2.4.dist-info/RECORD +50 -0
- {beratools-0.2.3.dist-info → beratools-0.2.4.dist-info}/WHEEL +1 -1
- beratools-0.2.4.dist-info/entry_points.txt +3 -0
- beratools-0.2.4.dist-info/licenses/LICENSE +674 -0
- beratools/core/algo_tiler.py +0 -428
- beratools/gui/__init__.py +0 -11
- beratools/gui/batch_processing_dlg.py +0 -513
- beratools/gui/map_window.py +0 -162
- beratools/tools/Beratools_r_script.r +0 -1120
- beratools/tools/Ht_metrics.py +0 -116
- beratools/tools/batch_processing.py +0 -136
- beratools/tools/canopy_threshold_relative.py +0 -672
- beratools/tools/canopycostraster.py +0 -222
- beratools/tools/fl_regen_csf.py +0 -428
- beratools/tools/forest_line_attributes.py +0 -408
- beratools/tools/line_grouping.py +0 -45
- beratools/tools/ln_relative_metrics.py +0 -615
- beratools/tools/r_cal_lpi_elai.r +0 -25
- beratools/tools/r_generate_pd_focalraster.r +0 -101
- beratools/tools/r_interface.py +0 -80
- beratools/tools/r_point_density.r +0 -9
- beratools/tools/rpy_chm2trees.py +0 -86
- beratools/tools/rpy_dsm_chm_by.py +0 -81
- beratools/tools/rpy_dtm_by.py +0 -63
- beratools/tools/rpy_find_cellsize.py +0 -43
- beratools/tools/rpy_gnd_csf.py +0 -74
- beratools/tools/rpy_hummock_hollow.py +0 -85
- beratools/tools/rpy_hummock_hollow_raster.py +0 -71
- beratools/tools/rpy_las_info.py +0 -51
- beratools/tools/rpy_laz2las.py +0 -40
- beratools/tools/rpy_lpi_elai_lascat.py +0 -466
- beratools/tools/rpy_normalized_lidar_by.py +0 -56
- beratools/tools/rpy_percent_above_dbh.py +0 -80
- beratools/tools/rpy_points2trees.py +0 -88
- beratools/tools/rpy_vegcoverage.py +0 -94
- beratools/tools/tiler.py +0 -48
- beratools/tools/zonal_threshold.py +0 -144
- beratools-0.2.3.dist-info/METADATA +0 -108
- beratools-0.2.3.dist-info/RECORD +0 -74
- beratools-0.2.3.dist-info/entry_points.txt +0 -2
- beratools-0.2.3.dist-info/licenses/LICENSE +0 -22
|
@@ -1,222 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import sys
|
|
3
|
-
import time
|
|
4
|
-
import numpy as np
|
|
5
|
-
from numpy.lib.stride_tricks import as_strided
|
|
6
|
-
|
|
7
|
-
import rasterio
|
|
8
|
-
import xarray as xr
|
|
9
|
-
from xrspatial import convolution, focal
|
|
10
|
-
from scipy import ndimage
|
|
11
|
-
|
|
12
|
-
from common import *
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
# TODO: Rolling Statistics for grid data... an alternative
|
|
16
|
-
# by Dan Patterson
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
def _check(a, r_c, subok=False):
|
|
20
|
-
"""Performs the array checks necessary for stride and block.
|
|
21
|
-
: in_array - Array or list.
|
|
22
|
-
: r_c - tuple/list/array of rows x cols.
|
|
23
|
-
: subok - from numpy 1.12 added, keep for now
|
|
24
|
-
:Returns:
|
|
25
|
-
:------
|
|
26
|
-
:Attempts will be made to ...
|
|
27
|
-
: produce a shape at least (1*c). For a scalar, the
|
|
28
|
-
: minimum shape will be (1*r) for 1D array or (1*c) for 2D
|
|
29
|
-
: array if r<c. Be aware
|
|
30
|
-
"""
|
|
31
|
-
if isinstance(r_c, (int, float)):
|
|
32
|
-
r_c = (1, int(r_c))
|
|
33
|
-
|
|
34
|
-
r, c = r_c
|
|
35
|
-
if a.ndim == 1:
|
|
36
|
-
a = np.atleast_2d(a)
|
|
37
|
-
|
|
38
|
-
r, c = r_c = (min(r, a.shape[0]), min(c, a.shape[1]))
|
|
39
|
-
a = np.array(a, copy=False, subok=subok)
|
|
40
|
-
return a, r, c, tuple(r_c)
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
def _pad(in_array, kernel):
|
|
44
|
-
"""Pad a sliding array to allow for stats"""
|
|
45
|
-
pad_x = int(kernel.shape[0] / 2)
|
|
46
|
-
pad_y = int(kernel.shape[0] / 2)
|
|
47
|
-
result = np.pad(in_array, pad_width=(pad_x, pad_y), mode="constant", constant_values=(np.NaN, np.NaN))
|
|
48
|
-
|
|
49
|
-
return result
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def stride(a, r_c):
|
|
53
|
-
"""Provide a 2D sliding/moving view of an array.
|
|
54
|
-
: There is no edge correction for outputs.
|
|
55
|
-
:
|
|
56
|
-
:Requires:
|
|
57
|
-
:--------
|
|
58
|
-
: _check(a, r_c) ... Runs the checks on the inputs.
|
|
59
|
-
: a - array or list, usually a 2D array. Assumes rows is >=1,
|
|
60
|
-
: it is corrected as is the number of columns.
|
|
61
|
-
: r_c - tuple/list/array of rows x cols. Attempts to
|
|
62
|
-
: produce a shape at least (1*c). For a scalar, the
|
|
63
|
-
: minimum shape will be (1*r) for 1D array or 2D
|
|
64
|
-
: array if r<c. Be aware
|
|
65
|
-
"""
|
|
66
|
-
a, r, c, r_c = _check(a, r_c)
|
|
67
|
-
shape = (a.shape[0] - r + 1, a.shape[1] - c + 1) + r_c
|
|
68
|
-
strides = a.strides * 2
|
|
69
|
-
a_s = (as_strided(a, shape=shape, strides=strides)).squeeze()
|
|
70
|
-
return a_s
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
def normalize_chm(raster):
|
|
74
|
-
n_raster = np.where(raster >= 0, raster, 0)
|
|
75
|
-
return n_raster
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
def np_cc_map(out_canopy_r, chm, in_array, min_ht):
|
|
79
|
-
print('Generating Canopy Closure Raster ...')
|
|
80
|
-
|
|
81
|
-
# canopy_ndarray = np.where(in_array >= min_ht, 1., 0.).astype(float)
|
|
82
|
-
canopy_ndarray = np.ma.where(in_array > min_ht, 1., 0.).astype(float)
|
|
83
|
-
canopy_ndarray = np.ma.filled(canopy_ndarray, chm.nodata)
|
|
84
|
-
try:
|
|
85
|
-
write_canopy = rasterio.open(out_canopy_r, 'w', **chm.profile)
|
|
86
|
-
write_canopy.write(canopy_ndarray, 1)
|
|
87
|
-
write_canopy.close()
|
|
88
|
-
print('Generating Canopy Closure (CC) Raster ... Done')
|
|
89
|
-
except Exception as e:
|
|
90
|
-
print(sys.exc_info())
|
|
91
|
-
del in_array
|
|
92
|
-
return canopy_ndarray
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
def fs_raster(in_ndarray, kernel):
|
|
96
|
-
print('Generating Canopy Closure Focal Statistic ...')
|
|
97
|
-
padded_array = _pad(in_ndarray, kernel)
|
|
98
|
-
a_s = stride(padded_array, kernel.shape)
|
|
99
|
-
|
|
100
|
-
# TODO: np.where on large ndarray fail (allocate memory error)
|
|
101
|
-
a_s_masked = np.where(kernel == 1, a_s, np.NaN)
|
|
102
|
-
print("Calculating Canopy Closure's Focal Statistic-Mean ...")
|
|
103
|
-
mean_result = np.nanmean(a_s_masked, axis=(2, 3))
|
|
104
|
-
print("Calculating Canopy Closure's Focal Statistic-Stand Deviation Raster ...")
|
|
105
|
-
stdev_result = np.nanstd(a_s_masked, axis=(2, 3))
|
|
106
|
-
del a_s
|
|
107
|
-
return mean_result, stdev_result
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
def fs_raster_stdmean(in_ndarray, kernel, nodata):
|
|
111
|
-
# This function uses xrspatial whcih can handle large data but slow
|
|
112
|
-
in_ndarray[in_ndarray == nodata] = np.nan
|
|
113
|
-
result_ndarray = focal.focal_stats(xr.DataArray(in_ndarray), kernel, stats_funcs=['std', 'mean'])
|
|
114
|
-
|
|
115
|
-
# Flattening the array
|
|
116
|
-
flatten_std_result_ndarray = result_ndarray[0].data.reshape(-1)
|
|
117
|
-
flatten_mean_result_ndarray = result_ndarray[1].data.reshape(-1)
|
|
118
|
-
|
|
119
|
-
# Re-shaping the array
|
|
120
|
-
reshape_std_ndarray = flatten_std_result_ndarray.reshape(in_ndarray.shape[0], in_ndarray.shape[1])
|
|
121
|
-
reshape_mean_ndarray = flatten_mean_result_ndarray.reshape(in_ndarray.shape[0], in_ndarray.shape[1])
|
|
122
|
-
return reshape_std_ndarray, reshape_mean_ndarray
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
def smooth_cost(in_raster, search_dist, sampling):
|
|
126
|
-
print('Generating Cost Raster ...')
|
|
127
|
-
from tempfile import mkdtemp
|
|
128
|
-
import os.path as path
|
|
129
|
-
import shutil
|
|
130
|
-
|
|
131
|
-
euc_dist_array = None
|
|
132
|
-
row, col = in_raster.shape
|
|
133
|
-
if row * col >= 30000 * 30000:
|
|
134
|
-
filename = path.join(mkdtemp(), 'tempmmemap.dat')
|
|
135
|
-
a_in_mat = np.memmap(filename, in_raster.dtype, 'w+', shape=in_raster.shape)
|
|
136
|
-
a_in_mat[:] = in_raster[:]
|
|
137
|
-
a_in_mat.flush()
|
|
138
|
-
euc_dist_array = ndimage.distance_transform_edt(np.logical_not(a_in_mat), sampling=sampling)
|
|
139
|
-
del a_in_mat, in_raster
|
|
140
|
-
shutil.rmtree(path.dirname(filename))
|
|
141
|
-
else:
|
|
142
|
-
euc_dist_array = ndimage.distance_transform_edt(np.logical_not(in_raster), sampling=sampling)
|
|
143
|
-
|
|
144
|
-
smooth1 = float(search_dist) - euc_dist_array
|
|
145
|
-
smooth1[smooth1 <= 0.0] = 0.0
|
|
146
|
-
smooth_cost_array = smooth1 / float(search_dist)
|
|
147
|
-
|
|
148
|
-
return smooth_cost_array
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
def np_cost_raster(canopy_ndarray, cc_mean, cc_std, cc_smooth, chm, avoidance, cost_raster_exponent, out_cost_r):
|
|
152
|
-
print('Generating Smoothed Cost Raster ...')
|
|
153
|
-
aM1a = (cc_mean - cc_std)
|
|
154
|
-
aM1b = (cc_mean + cc_std)
|
|
155
|
-
aM1 = np.divide(aM1a, aM1b, where=aM1b != 0, out=np.zeros(aM1a.shape, dtype=float))
|
|
156
|
-
aM = (1 + aM1) / 2
|
|
157
|
-
aaM = (cc_mean + cc_std)
|
|
158
|
-
bM = np.where(aaM <= 0, 0, aM)
|
|
159
|
-
cM = bM * (1 - avoidance) + (cc_smooth * avoidance)
|
|
160
|
-
dM = np.where(canopy_ndarray == 1, 1, cM)
|
|
161
|
-
eM = np.exp(dM)
|
|
162
|
-
result = np.power(eM, float(cost_raster_exponent))
|
|
163
|
-
write_cost = rasterio.open(out_cost_r, 'w+', driver='GTiff', height=chm.shape[0], width=chm.shape[1],
|
|
164
|
-
count=1, dtype=chm.read(1).dtype, crs=chm.crs, transform=chm.transform)
|
|
165
|
-
write_cost.write(result, 1)
|
|
166
|
-
write_cost.close()
|
|
167
|
-
print('Generating Smoothed Cost Raster ... Done')
|
|
168
|
-
return
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
# TODO: deal with NODATA
|
|
172
|
-
def canopy_cost_raster(callback, in_chm, canopy_ht_threshold, tree_radius, max_line_dist,
|
|
173
|
-
canopy_avoidance, exponent, out_canopy, out_cost, processes, verbose):
|
|
174
|
-
canopy_ht_threshold = float(canopy_ht_threshold)
|
|
175
|
-
tree_radius = float(tree_radius)
|
|
176
|
-
max_line_dist = float(max_line_dist)
|
|
177
|
-
canopy_avoidance = float(canopy_avoidance)
|
|
178
|
-
cost_raster_exponent = float(exponent)
|
|
179
|
-
|
|
180
|
-
print('In CHM: ' + in_chm)
|
|
181
|
-
chm = rasterio.open(in_chm)
|
|
182
|
-
(cell_x, cell_y) = chm.res
|
|
183
|
-
|
|
184
|
-
print('Loading CHM ...')
|
|
185
|
-
band1_ndarray = chm.read(1, masked=True)
|
|
186
|
-
print('%{}'.format(10))
|
|
187
|
-
|
|
188
|
-
print('Preparing Kernel window ...')
|
|
189
|
-
kernel = convolution.circle_kernel(cell_x, cell_y, tree_radius)
|
|
190
|
-
print('%{}'.format(20))
|
|
191
|
-
|
|
192
|
-
# Generate Canopy Raster and return the Canopy array
|
|
193
|
-
canopy_ndarray = np_cc_map(out_canopy, chm, band1_ndarray, canopy_ht_threshold)
|
|
194
|
-
print('%{}'.format(40))
|
|
195
|
-
print('Apply focal statistic on raster ...')
|
|
196
|
-
|
|
197
|
-
# Calculating focal statistic from canopy raster
|
|
198
|
-
# Alternative: (only work on large cell size
|
|
199
|
-
if cell_y > 1 and cell_x > 1:
|
|
200
|
-
cc_mean, cc_std = fs_raster(canopy_ndarray, kernel)
|
|
201
|
-
else:
|
|
202
|
-
cc_std, cc_mean = fs_raster_stdmean(canopy_ndarray, kernel, chm.nodata)
|
|
203
|
-
print('%{}'.format(60))
|
|
204
|
-
print('Apply focal statistic on raster ... Done')
|
|
205
|
-
|
|
206
|
-
# Smoothing raster
|
|
207
|
-
cc_smooth = smooth_cost(canopy_ndarray, max_line_dist, [cell_x, cell_y])
|
|
208
|
-
avoidance = max(min(float(canopy_avoidance), 1), 0)
|
|
209
|
-
np_cost_raster(canopy_ndarray, cc_mean, cc_std, cc_smooth, chm, avoidance, cost_raster_exponent, out_cost)
|
|
210
|
-
print('%{}'.format(100))
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
if __name__ == '__main__':
|
|
214
|
-
in_args, in_verbose = check_arguments()
|
|
215
|
-
|
|
216
|
-
start_time = time.time()
|
|
217
|
-
print('Starting Canopy and Cost Raster processing\n @ {}'
|
|
218
|
-
.format(time.strftime("%d %b %Y %H:%M:%S", time.localtime())))
|
|
219
|
-
|
|
220
|
-
canopy_cost_raster(print, **in_args.input, processes=int(in_args.processes), verbose=in_verbose)
|
|
221
|
-
print('Canopy and Cost Raster processing is done in {} seconds)'
|
|
222
|
-
.format(round(time.time() - start_time, 5)))
|
beratools/tools/fl_regen_csf.py
DELETED
|
@@ -1,428 +0,0 @@
|
|
|
1
|
-
import math
|
|
2
|
-
import time
|
|
3
|
-
import pandas
|
|
4
|
-
import geopandas
|
|
5
|
-
import numpy
|
|
6
|
-
import scipy
|
|
7
|
-
import os
|
|
8
|
-
import pyogrio
|
|
9
|
-
import shapely
|
|
10
|
-
from shapely.ops import unary_union, split
|
|
11
|
-
from rasterio import mask
|
|
12
|
-
import argparse
|
|
13
|
-
import json
|
|
14
|
-
from multiprocessing.pool import Pool
|
|
15
|
-
|
|
16
|
-
from common import *
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class OperationCancelledException(Exception):
|
|
20
|
-
pass
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def regen_csf(line_args):
|
|
24
|
-
# (result_identity,attr_seg_lines, area_analysis, change_analysis, in_change,in_tree_shp)
|
|
25
|
-
attr_seg_line = line_args[0]
|
|
26
|
-
result_identity = line_args[1]
|
|
27
|
-
|
|
28
|
-
area_analysis = line_args[2]
|
|
29
|
-
change_analysis = line_args[3]
|
|
30
|
-
in_change = line_args[4]
|
|
31
|
-
in_tree = line_args[5]
|
|
32
|
-
|
|
33
|
-
has_footprint = True
|
|
34
|
-
if type(result_identity) is geopandas.geodataframe.GeoDataFrame:
|
|
35
|
-
if result_identity.empty:
|
|
36
|
-
has_footprint = False
|
|
37
|
-
else:
|
|
38
|
-
# merge result_identity
|
|
39
|
-
result_identity = result_identity.dissolve()
|
|
40
|
-
|
|
41
|
-
elif not result_identity:
|
|
42
|
-
has_footprint = False
|
|
43
|
-
|
|
44
|
-
# Check if query result is not empty, if empty input identity footprint will be skipped
|
|
45
|
-
if attr_seg_line.empty:
|
|
46
|
-
return None
|
|
47
|
-
|
|
48
|
-
if "AvgWidth" in attr_seg_line.columns.array:
|
|
49
|
-
max_ln_width = math.ceil(attr_seg_line["AvgWidth"])
|
|
50
|
-
if not max_ln_width >= 1.0:
|
|
51
|
-
max_ln_width = 0.5
|
|
52
|
-
else:
|
|
53
|
-
if has_footprint:
|
|
54
|
-
# estimate width= (Perimeter -Sqrt(Perimeter^2-16*Area))/4
|
|
55
|
-
# for long and skinny: estimate width = 2*Area / Perimeter
|
|
56
|
-
P = float(result_identity.geometry.length)
|
|
57
|
-
A = float(result_identity.geometry.area)
|
|
58
|
-
max_ln_width = math.ceil((2 * A) / P)
|
|
59
|
-
if not max_ln_width >= 1.0:
|
|
60
|
-
max_ln_width = 0.5
|
|
61
|
-
else:
|
|
62
|
-
max_ln_width = 0.5
|
|
63
|
-
index = 0
|
|
64
|
-
|
|
65
|
-
if change_analysis and has_footprint: # with change raster and footprint
|
|
66
|
-
|
|
67
|
-
fp = result_identity.iloc[0].geometry
|
|
68
|
-
line_feat = attr_seg_line.iloc[0].geometry
|
|
69
|
-
|
|
70
|
-
# if the selected seg do not have identity footprint geometry
|
|
71
|
-
if shapely.is_empty(fp):
|
|
72
|
-
# use the buffer from the segment line
|
|
73
|
-
line_buffer = shapely.buffer(line_feat, float(max_ln_width) / 4)
|
|
74
|
-
else:
|
|
75
|
-
# if identity footprint has geometry, use as a buffer area
|
|
76
|
-
line_buffer = fp
|
|
77
|
-
# check trees
|
|
78
|
-
with rasterio.open(in_change) as in_change_file:
|
|
79
|
-
cell_size_x = in_change_file.transform[0]
|
|
80
|
-
cell_size_y = -in_change_file.transform[4]
|
|
81
|
-
# clipped the change base on polygon of line buffer or footprint
|
|
82
|
-
clipped_change, out_transform = rasterio.mask.mask(in_change_file, [line_buffer], crop=True)
|
|
83
|
-
|
|
84
|
-
# drop the ndarray to 2D ndarray
|
|
85
|
-
clipped_change = numpy.squeeze(clipped_change, axis=0)
|
|
86
|
-
|
|
87
|
-
# masked all NoData value cells
|
|
88
|
-
clean_change = numpy.ma.masked_where(clipped_change == in_change_file.nodata, clipped_change)
|
|
89
|
-
|
|
90
|
-
# Calculate the summary statistics from the clipped change
|
|
91
|
-
change_mean = numpy.nanmean(clean_change)
|
|
92
|
-
# count trees within FP area
|
|
93
|
-
trees_counts = len(in_tree[in_tree.within(line_buffer)])
|
|
94
|
-
trees_density = trees_counts / line_buffer.area
|
|
95
|
-
if trees_density >= 0.6:
|
|
96
|
-
reg_class = "Advanced"
|
|
97
|
-
elif 0.2 < trees_density < 0.6:
|
|
98
|
-
reg_class = "Regenerating"
|
|
99
|
-
else: # 0-60 trees counts
|
|
100
|
-
if change_mean > 0.06:
|
|
101
|
-
reg_class = "Regenerating"
|
|
102
|
-
else:
|
|
103
|
-
reg_class = "Arrested"
|
|
104
|
-
|
|
105
|
-
elif change_analysis and not has_footprint: # with change raster but no footprint
|
|
106
|
-
|
|
107
|
-
line_feat = attr_seg_line.geometry.iloc[0]
|
|
108
|
-
line_buffer = shapely.buffer(line_feat, float(max_ln_width))
|
|
109
|
-
|
|
110
|
-
with rasterio.open(in_change) as in_change_file:
|
|
111
|
-
cell_size_x = in_change_file.transform[0]
|
|
112
|
-
cell_size_y = -in_change_file.transform[4]
|
|
113
|
-
# Calculate the mean changes
|
|
114
|
-
# clipped the change base on polygon of line buffer or footprint
|
|
115
|
-
clipped_change, out_transform = rasterio.mask.mask(in_change_file, [line_buffer], crop=True)
|
|
116
|
-
|
|
117
|
-
# drop the ndarray to 2D ndarray
|
|
118
|
-
clipped_change = numpy.squeeze(clipped_change, axis=0)
|
|
119
|
-
|
|
120
|
-
# masked all NoData value cells
|
|
121
|
-
clean_change = numpy.ma.masked_where(clipped_change == in_change_file.nodata, clipped_change)
|
|
122
|
-
|
|
123
|
-
# Calculate the summary statistics from the clipped change
|
|
124
|
-
change_mean = numpy.nanmean(clean_change)
|
|
125
|
-
# count trees within FP area
|
|
126
|
-
trees_counts = len(in_tree[in_tree.within(line_buffer)])
|
|
127
|
-
trees_density = trees_counts / line_buffer.area
|
|
128
|
-
if trees_density >= 0.6:
|
|
129
|
-
reg_class = "Advanced"
|
|
130
|
-
elif 0.2 < trees_density < 0.6:
|
|
131
|
-
reg_class = "Regenerating"
|
|
132
|
-
else: # 0-60 trees counts
|
|
133
|
-
if change_mean > 0.06:
|
|
134
|
-
reg_class = "Regenerating"
|
|
135
|
-
else:
|
|
136
|
-
reg_class = "Arrested"
|
|
137
|
-
elif not change_analysis or not has_footprint: # Either no change_analysis or no footprint
|
|
138
|
-
line_feat = attr_seg_line.geometry.iloc[0]
|
|
139
|
-
|
|
140
|
-
# if the selected seg do not have identity footprint geometry
|
|
141
|
-
line_buffer = shapely.buffer(line_feat, float(max_ln_width))
|
|
142
|
-
|
|
143
|
-
# count trees within FP area
|
|
144
|
-
trees_counts = len(in_tree[in_tree.within(line_buffer)])
|
|
145
|
-
trees_density = trees_counts / line_buffer.area
|
|
146
|
-
if trees_density >= 0.6:
|
|
147
|
-
reg_class = "Advanced"
|
|
148
|
-
elif 0.2 < trees_density < 0.6:
|
|
149
|
-
reg_class = "Regenerating"
|
|
150
|
-
else:
|
|
151
|
-
reg_class = "Not Available"
|
|
152
|
-
|
|
153
|
-
change_mean = numpy.nan
|
|
154
|
-
elif not change_analysis and not has_footprint: # no change raster and no footprint
|
|
155
|
-
reg_class = "Not Available"
|
|
156
|
-
change_mean = numpy.nan
|
|
157
|
-
trees_counts = numpy.nan
|
|
158
|
-
trees_density = numpy.nan
|
|
159
|
-
|
|
160
|
-
attr_seg_line["AveChanges"] = change_mean
|
|
161
|
-
attr_seg_line["Num_trees"] = trees_counts
|
|
162
|
-
attr_seg_line["trees_density"] = trees_density
|
|
163
|
-
attr_seg_line["Reg_Class"] = reg_class
|
|
164
|
-
return attr_seg_line
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
def identity_polygon(line_args):
|
|
168
|
-
line = line_args[0]
|
|
169
|
-
in_touched_fp = line_args[1][['geometry', 'OLnFID', 'OLnSEG']]
|
|
170
|
-
in_search_polygon = line_args[2]
|
|
171
|
-
if 'OLnSEG' not in in_search_polygon.columns.array:
|
|
172
|
-
in_search_polygon = in_search_polygon.assign(OLnSEG=0)
|
|
173
|
-
if 'OLnFID' not in in_search_polygon.columns.array:
|
|
174
|
-
in_search_polygon = in_search_polygon.assign(OLnFID=in_search_polygon['OLnFID'].index)
|
|
175
|
-
identity = None
|
|
176
|
-
try:
|
|
177
|
-
# TODO: determine when there is empty polygon
|
|
178
|
-
# TODO: this will produce empty identity
|
|
179
|
-
if not in_search_polygon.empty:
|
|
180
|
-
identity = in_search_polygon.overlay(in_touched_fp, how='identity')
|
|
181
|
-
identity = identity.dropna(subset=['OLnSEG_2', 'OLnFID_2'])
|
|
182
|
-
identity = identity.drop(columns=['OLnSEG_1', 'OLnFID_2'])
|
|
183
|
-
identity = identity.rename(columns={'OLnFID_1': 'OLnFID', 'OLnSEG_2': 'OLnSEG'})
|
|
184
|
-
except Exception as e:
|
|
185
|
-
print(e)
|
|
186
|
-
|
|
187
|
-
return line, identity
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
def execute_multiprocessing_identity(line_args, processes):
|
|
191
|
-
# Multiprocessing identity polygon
|
|
192
|
-
try:
|
|
193
|
-
total_steps = len(line_args)
|
|
194
|
-
features = []
|
|
195
|
-
with Pool(processes) as pool:
|
|
196
|
-
step = 0
|
|
197
|
-
# execute tasks in order, process results out of order
|
|
198
|
-
for result in pool.imap_unordered(identity_polygon, line_args):
|
|
199
|
-
if BT_DEBUGGING:
|
|
200
|
-
print('Got result: {}'.format(result), flush=True)
|
|
201
|
-
features.append(result)
|
|
202
|
-
step += 1
|
|
203
|
-
print('%{}'.format(step / total_steps * 100))
|
|
204
|
-
|
|
205
|
-
except OperationCancelledException:
|
|
206
|
-
print("Operation cancelled")
|
|
207
|
-
exit()
|
|
208
|
-
|
|
209
|
-
print("Identifies are done.")
|
|
210
|
-
return features
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
def execute_multiprocessing_csf(line_args, processes):
|
|
214
|
-
try:
|
|
215
|
-
total_steps = len(line_args)
|
|
216
|
-
features = []
|
|
217
|
-
with Pool(processes) as pool:
|
|
218
|
-
step = 0
|
|
219
|
-
# execute tasks in order, process results out of order
|
|
220
|
-
for result in pool.imap_unordered(regen_csf, line_args):
|
|
221
|
-
if BT_DEBUGGING:
|
|
222
|
-
print('Got result: {}'.format(result), flush=True)
|
|
223
|
-
print('Line processed: {}'.format(step))
|
|
224
|
-
|
|
225
|
-
features.append(result)
|
|
226
|
-
step += 1
|
|
227
|
-
print('%{}'.format(step / total_steps * 100))
|
|
228
|
-
|
|
229
|
-
except OperationCancelledException:
|
|
230
|
-
print("Operation cancelled")
|
|
231
|
-
exit()
|
|
232
|
-
|
|
233
|
-
return features
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
def fl_restration_csf(callback, in_line, in_footprint, in_trees, in_change, proc_segments, out_line, processes,
|
|
237
|
-
verbose):
|
|
238
|
-
# assign Tool arguments
|
|
239
|
-
BT_DEBUGGING = False
|
|
240
|
-
in_cl = in_line
|
|
241
|
-
in_fp = in_footprint
|
|
242
|
-
|
|
243
|
-
print("Checking input parameters ...")
|
|
244
|
-
|
|
245
|
-
try:
|
|
246
|
-
print("loading shapefile(s) ...")
|
|
247
|
-
in_line_shp = pyogrio.read_dataframe(in_line)
|
|
248
|
-
in_tree_shp = pyogrio.read_dataframe(in_trees)
|
|
249
|
-
in_fp_shp = pyogrio.read_dataframe(in_footprint)
|
|
250
|
-
except SystemError:
|
|
251
|
-
print("Invalid input feature, please check!")
|
|
252
|
-
exit()
|
|
253
|
-
|
|
254
|
-
# Check datum, at this stage only check input data against NAD 83 datum
|
|
255
|
-
print("Checking datum....")
|
|
256
|
-
sameDatum = False
|
|
257
|
-
for shp in [in_line_shp, in_tree_shp, in_fp_shp]:
|
|
258
|
-
if shp.crs.datum.name in NADDatum:
|
|
259
|
-
sameDatum = True
|
|
260
|
-
else:
|
|
261
|
-
sameDatum = False
|
|
262
|
-
try:
|
|
263
|
-
# Check projection zone among input data with NAD 83 datum
|
|
264
|
-
if sameDatum:
|
|
265
|
-
if in_line_shp.crs.utm_zone != in_tree_shp.crs.utm_zone != in_fp_shp.crs.utm_zone:
|
|
266
|
-
print("Input shapefiles are on different project Zone, please check.")
|
|
267
|
-
exit()
|
|
268
|
-
else:
|
|
269
|
-
print("Input shapefiles are not on supported Datum, please check.")
|
|
270
|
-
exit()
|
|
271
|
-
except Exception as error_in_shapefiles:
|
|
272
|
-
print("Input shapefiles are invalid: {} , please check.".format(error_in_shapefiles))
|
|
273
|
-
exit()
|
|
274
|
-
|
|
275
|
-
if not os.path.exists(os.path.dirname(out_line)):
|
|
276
|
-
os.makedirs(os.path.dirname(out_line))
|
|
277
|
-
else:
|
|
278
|
-
pass
|
|
279
|
-
print("Checking input parameters ... Done")
|
|
280
|
-
|
|
281
|
-
in_fields = list(in_line_shp.columns)
|
|
282
|
-
|
|
283
|
-
# check coordinate systems between line and raster features
|
|
284
|
-
try:
|
|
285
|
-
# Check projection zone among input raster with input vector data
|
|
286
|
-
with rasterio.open(in_change) as in_raster:
|
|
287
|
-
if not in_raster.crs.to_epsg() in [in_fp_shp.crs.to_epsg(), in_line_shp.crs.to_epsg(),
|
|
288
|
-
in_tree_shp.crs.to_epsg(), 2956]:
|
|
289
|
-
print("Line and raster spatial references are different , please check.")
|
|
290
|
-
exit()
|
|
291
|
-
else:
|
|
292
|
-
change_analysis = True
|
|
293
|
-
except Exception as error_in_raster:
|
|
294
|
-
|
|
295
|
-
print("Invalid input raster: {}, please check!".format(error_in_raster))
|
|
296
|
-
change_analysis = False
|
|
297
|
-
exit()
|
|
298
|
-
|
|
299
|
-
HasOLnFID = False
|
|
300
|
-
|
|
301
|
-
# determine to do area or/and height analysis
|
|
302
|
-
if len(in_fp_shp) == 0:
|
|
303
|
-
print('No footprints provided, buffer of the input lines will be used instead')
|
|
304
|
-
area_analysis = False
|
|
305
|
-
else:
|
|
306
|
-
area_analysis = True
|
|
307
|
-
|
|
308
|
-
print("Preparing line segments...")
|
|
309
|
-
|
|
310
|
-
# Segment lines
|
|
311
|
-
# Return split lines with two extra columns:['OLnFID','OLnSEG']
|
|
312
|
-
# or return whole input line
|
|
313
|
-
print("Input_Lines: {}".format(in_cl))
|
|
314
|
-
if not 'OLnFID' in in_line_shp.columns.array:
|
|
315
|
-
print(
|
|
316
|
-
"Cannot find {} column in input line data.\n '{}' column will be create".format('OLnFID', 'OLnFID'))
|
|
317
|
-
in_line_shp['OLnFID'] = in_line_shp.index
|
|
318
|
-
if proc_segments:
|
|
319
|
-
attr_seg_lines = line_split2(in_line_shp, 10)
|
|
320
|
-
else:
|
|
321
|
-
# copy original line input to another Geodataframe
|
|
322
|
-
attr_seg_lines = geopandas.GeoDataFrame.copy(in_line_shp)
|
|
323
|
-
if not "OLnSEG" in attr_seg_lines.columns.array:
|
|
324
|
-
if proc_segments:
|
|
325
|
-
attr_seg_lines["OLnSEG"] = int(attr_seg_lines["OLnSEG"])
|
|
326
|
-
else:
|
|
327
|
-
attr_seg_lines["OLnSEG"] = 0
|
|
328
|
-
print('%{}'.format(10))
|
|
329
|
-
|
|
330
|
-
print("Line segments preparation done.")
|
|
331
|
-
print("{} footprints to be identified by {} segments ...".format(len(in_fp_shp.index), len(attr_seg_lines)))
|
|
332
|
-
|
|
333
|
-
# Prepare line parameters for multiprocessing
|
|
334
|
-
line_args = []
|
|
335
|
-
|
|
336
|
-
# prepare line args: list of line, line buffer and footprint polygon
|
|
337
|
-
# footprint spatial searching
|
|
338
|
-
footprint_sindex = in_fp_shp.sindex
|
|
339
|
-
|
|
340
|
-
for i in attr_seg_lines.index:
|
|
341
|
-
line = attr_seg_lines.iloc[[i]]
|
|
342
|
-
line_buffer = line.copy()
|
|
343
|
-
if proc_segments:
|
|
344
|
-
line_buffer['geometry'] = line.simplify(tolerance=1, preserve_topology=True).buffer(10,
|
|
345
|
-
cap_style=shapely.BufferCapStyle.flat)
|
|
346
|
-
else:
|
|
347
|
-
line_buffer['geometry'] = line.buffer(10, cap_style=shapely.BufferCapStyle.flat)
|
|
348
|
-
fp_touched = in_fp_shp.iloc[
|
|
349
|
-
footprint_sindex.query(line_buffer.iloc[0].geometry, predicate="overlaps", sort=True)]
|
|
350
|
-
if not "OLnFID" in fp_touched.columns.array:
|
|
351
|
-
fp_touched["OLnFID"] = int(line["OLnFID"])
|
|
352
|
-
if not "OLnSEG" in fp_touched.columns.array:
|
|
353
|
-
if proc_segments:
|
|
354
|
-
fp_touched["OLnSEG"] = int(line["OLnSEG"])
|
|
355
|
-
else:
|
|
356
|
-
fp_touched["OLnSEG"] = 0
|
|
357
|
-
fp_intersected = fp_touched.dissolve()
|
|
358
|
-
fp_intersected.geometry = fp_intersected.geometry.clip(line_buffer)
|
|
359
|
-
fp_intersected['geometry'] = fp_intersected.geometry.map(lambda x: unary_union(x))
|
|
360
|
-
list_item = [line, fp_touched, fp_intersected]
|
|
361
|
-
|
|
362
|
-
line_args.append(list_item)
|
|
363
|
-
print("Identifying footprint.... ")
|
|
364
|
-
# multiprocessing of identity polygons
|
|
365
|
-
features = []
|
|
366
|
-
if not BT_DEBUGGING:
|
|
367
|
-
features = execute_multiprocessing_identity(line_args, processes)
|
|
368
|
-
else:
|
|
369
|
-
# Debug use
|
|
370
|
-
for index in range(0, len(line_args)):
|
|
371
|
-
result = (identity_polygon(line_args[index]))
|
|
372
|
-
if not len(result) == 0:
|
|
373
|
-
features.append(result)
|
|
374
|
-
|
|
375
|
-
print("Prepare for classify ...")
|
|
376
|
-
|
|
377
|
-
# prepare list of result_identity, Att_seg_lines, areaAnalysis, heightAnalysis, args.input
|
|
378
|
-
AOI_trees = in_tree_shp
|
|
379
|
-
line_args = []
|
|
380
|
-
for index in range(0, len(features)):
|
|
381
|
-
list_item = [features[index][0], features[index][1], area_analysis, change_analysis, in_change, AOI_trees]
|
|
382
|
-
line_args.append(list_item)
|
|
383
|
-
|
|
384
|
-
# Linear attributes
|
|
385
|
-
print("Classify lines ...")
|
|
386
|
-
print('%{}'.format(60))
|
|
387
|
-
|
|
388
|
-
# Multiprocessing regeneration classifying
|
|
389
|
-
features = []
|
|
390
|
-
BT_DEBUGGING = True
|
|
391
|
-
if not BT_DEBUGGING:
|
|
392
|
-
features = execute_multiprocessing_csf(line_args, processes)
|
|
393
|
-
else:
|
|
394
|
-
# Debug use
|
|
395
|
-
for index in range(0, len(line_args) - 1):
|
|
396
|
-
result = (regen_csf(line_args[index]))
|
|
397
|
-
if not len(result) == 0:
|
|
398
|
-
features.append(result)
|
|
399
|
-
print(result)
|
|
400
|
-
|
|
401
|
-
# Combine identity polygons into once geodataframe
|
|
402
|
-
if len(features) == 0:
|
|
403
|
-
print('No lines found.')
|
|
404
|
-
exit()
|
|
405
|
-
print('Appending output ...')
|
|
406
|
-
result_attr = geopandas.GeoDataFrame(pandas.concat(features, ignore_index=True))
|
|
407
|
-
result_attr.reset_index()
|
|
408
|
-
|
|
409
|
-
print('%{}'.format(90))
|
|
410
|
-
print('Saving output ...')
|
|
411
|
-
|
|
412
|
-
# Save attributed lines, was output_att_line
|
|
413
|
-
geopandas.GeoDataFrame.to_file(result_attr, out_line)
|
|
414
|
-
|
|
415
|
-
print('%{}'.format(100))
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
if __name__ == '__main__':
|
|
419
|
-
start_time = time.time()
|
|
420
|
-
print('Line regeneration classify started at {}'.format(time.strftime("%b %Y %H:%M:%S", time.localtime())))
|
|
421
|
-
|
|
422
|
-
# Get tool arguments
|
|
423
|
-
|
|
424
|
-
in_args, in_verbose = check_arguments()
|
|
425
|
-
fl_restration_csf(print, **in_args.input, processes=int(in_args.processes), verbose=in_verbose)
|
|
426
|
-
|
|
427
|
-
print('Current time: {}'.format(time.strftime("%d %b %Y %H:%M:%S", time.localtime())))
|
|
428
|
-
print('Line regeneration classify done in {} seconds'.format(round(time.time() - start_time, 5)))
|