yirgacheffe 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yirgacheffe/__init__.py +17 -0
- yirgacheffe/backends/__init__.py +13 -0
- yirgacheffe/backends/enumeration.py +33 -0
- yirgacheffe/backends/mlx.py +156 -0
- yirgacheffe/backends/numpy.py +110 -0
- yirgacheffe/constants.py +1 -0
- yirgacheffe/h3layer.py +2 -0
- yirgacheffe/layers/__init__.py +44 -0
- yirgacheffe/layers/area.py +91 -0
- yirgacheffe/layers/base.py +265 -0
- yirgacheffe/layers/constant.py +41 -0
- yirgacheffe/layers/group.py +357 -0
- yirgacheffe/layers/h3layer.py +203 -0
- yirgacheffe/layers/rasters.py +333 -0
- yirgacheffe/layers/rescaled.py +94 -0
- yirgacheffe/layers/vectors.py +380 -0
- yirgacheffe/operators.py +738 -0
- yirgacheffe/rounding.py +57 -0
- yirgacheffe/window.py +141 -0
- yirgacheffe-1.2.0.dist-info/METADATA +473 -0
- yirgacheffe-1.2.0.dist-info/RECORD +25 -0
- yirgacheffe-1.2.0.dist-info/WHEEL +5 -0
- yirgacheffe-1.2.0.dist-info/entry_points.txt +2 -0
- yirgacheffe-1.2.0.dist-info/licenses/LICENSE +7 -0
- yirgacheffe-1.2.0.dist-info/top_level.txt +1 -0
yirgacheffe/operators.py
ADDED
|
@@ -0,0 +1,738 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import multiprocessing
|
|
3
|
+
import sys
|
|
4
|
+
import time
|
|
5
|
+
import types
|
|
6
|
+
from enum import Enum
|
|
7
|
+
from multiprocessing import Semaphore, Process
|
|
8
|
+
from multiprocessing.managers import SharedMemoryManager
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
from osgeo import gdal
|
|
13
|
+
from dill import dumps, loads
|
|
14
|
+
|
|
15
|
+
from . import constants
|
|
16
|
+
from .rounding import are_pixel_scales_equal_enough, round_up_pixels, round_down_pixels
|
|
17
|
+
from .window import Area, PixelScale, Window
|
|
18
|
+
from .backends import backend
|
|
19
|
+
from .backends.enumeration import operators as op
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
logger.setLevel(logging.WARNING)
|
|
23
|
+
|
|
24
|
+
class WindowOperation(Enum):
|
|
25
|
+
NONE = 1
|
|
26
|
+
UNION = 2
|
|
27
|
+
INTERSECTION = 3
|
|
28
|
+
LEFT = 4
|
|
29
|
+
RIGHT = 5
|
|
30
|
+
|
|
31
|
+
class LayerConstant:
|
|
32
|
+
def __init__(self, val):
|
|
33
|
+
self.val = val
|
|
34
|
+
|
|
35
|
+
def __str__(self):
|
|
36
|
+
return str(self.val)
|
|
37
|
+
|
|
38
|
+
def _eval(self, _area, _index, _step, _target_window):
|
|
39
|
+
return self.val
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class LayerMathMixin:
|
|
43
|
+
|
|
44
|
+
def __add__(self, other):
|
|
45
|
+
return LayerOperation(self, op.ADD, other, window_op=WindowOperation.UNION)
|
|
46
|
+
|
|
47
|
+
def __sub__(self, other):
|
|
48
|
+
return LayerOperation(self, op.SUB, other, window_op=WindowOperation.UNION)
|
|
49
|
+
|
|
50
|
+
def __mul__(self, other):
|
|
51
|
+
return LayerOperation(self, op.MUL, other, window_op=WindowOperation.INTERSECTION)
|
|
52
|
+
|
|
53
|
+
def __truediv__(self, other):
|
|
54
|
+
return LayerOperation(self, op.TRUEDIV, other, window_op=WindowOperation.INTERSECTION)
|
|
55
|
+
|
|
56
|
+
def __floordiv__(self, other):
|
|
57
|
+
return LayerOperation(self, op.FLOORDIV, other, window_op=WindowOperation.INTERSECTION)
|
|
58
|
+
|
|
59
|
+
def __mod__(self, other):
|
|
60
|
+
return LayerOperation(self, op.REMAINDER, other, window_op=WindowOperation.INTERSECTION)
|
|
61
|
+
|
|
62
|
+
def __pow__(self, other):
|
|
63
|
+
return LayerOperation(self, op.POW, other, window_op=WindowOperation.UNION)
|
|
64
|
+
|
|
65
|
+
def __eq__(self, other):
|
|
66
|
+
return LayerOperation(self, op.EQ, other, window_op=WindowOperation.INTERSECTION)
|
|
67
|
+
|
|
68
|
+
def __ne__(self, other):
|
|
69
|
+
return LayerOperation(self, op.NE, other, window_op=WindowOperation.UNION)
|
|
70
|
+
|
|
71
|
+
def __lt__(self, other):
|
|
72
|
+
return LayerOperation(self, op.LT, other, window_op=WindowOperation.UNION)
|
|
73
|
+
|
|
74
|
+
def __le__(self, other):
|
|
75
|
+
return LayerOperation(self, op.LE, other, window_op=WindowOperation.UNION)
|
|
76
|
+
|
|
77
|
+
def __gt__(self, other):
|
|
78
|
+
return LayerOperation(self, op.GT, other, window_op=WindowOperation.UNION)
|
|
79
|
+
|
|
80
|
+
def __ge__(self, other):
|
|
81
|
+
return LayerOperation(self, op.GE, other, window_op=WindowOperation.UNION)
|
|
82
|
+
|
|
83
|
+
def __and__(self, other):
|
|
84
|
+
return LayerOperation(self, op.AND, other, window_op=WindowOperation.INTERSECTION)
|
|
85
|
+
|
|
86
|
+
def __or__(self, other):
|
|
87
|
+
return LayerOperation(self, op.OR, other, window_op=WindowOperation.UNION)
|
|
88
|
+
|
|
89
|
+
def _eval(self, area, index, step, target_window=None):
|
|
90
|
+
try:
|
|
91
|
+
window = self.window if target_window is None else target_window
|
|
92
|
+
return self.read_array_for_area(area, 0, index, window.xsize, step)
|
|
93
|
+
except AttributeError:
|
|
94
|
+
return self.read_array_for_area(area, 0, index, target_window.xsize if target_window else 1, step)
|
|
95
|
+
|
|
96
|
+
def nan_to_num(self, nan=0, posinf=None, neginf=None):
|
|
97
|
+
return LayerOperation(
|
|
98
|
+
self,
|
|
99
|
+
op.NAN_TO_NUM,
|
|
100
|
+
window_op=WindowOperation.NONE,
|
|
101
|
+
copy=False,
|
|
102
|
+
nan=nan,
|
|
103
|
+
posinf=posinf,
|
|
104
|
+
neginf=neginf,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
def isin(self, test_elements):
|
|
108
|
+
return LayerOperation(
|
|
109
|
+
self,
|
|
110
|
+
op.ISIN,
|
|
111
|
+
window_op=WindowOperation.NONE,
|
|
112
|
+
test_elements=test_elements,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def log(self):
|
|
116
|
+
return LayerOperation(
|
|
117
|
+
self,
|
|
118
|
+
op.LOG,
|
|
119
|
+
window_op=WindowOperation.NONE,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def log2(self):
|
|
123
|
+
return LayerOperation(
|
|
124
|
+
self,
|
|
125
|
+
op.LOG2,
|
|
126
|
+
window_op=WindowOperation.NONE,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
def log10(self):
|
|
130
|
+
return LayerOperation(
|
|
131
|
+
self,
|
|
132
|
+
op.LOG10,
|
|
133
|
+
window_op=WindowOperation.NONE,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
def exp(self):
|
|
137
|
+
return LayerOperation(
|
|
138
|
+
self,
|
|
139
|
+
op.EXP,
|
|
140
|
+
window_op=WindowOperation.NONE,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
def exp2(self):
|
|
144
|
+
return LayerOperation(
|
|
145
|
+
self,
|
|
146
|
+
op.EXP2,
|
|
147
|
+
window_op=WindowOperation.NONE,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
def clip(self, min=None, max=None): # pylint: disable=W0622
|
|
151
|
+
# In the numpy 1 API np.clip(array) used a_max, a_min arguments and array.clip() used max and min as arguments
|
|
152
|
+
# In numpy 2 they moved so that max and min worked on both, but still support a_max, and a_min on np.clip.
|
|
153
|
+
# For now I'm only going to support the newer max/min everywhere notion, but I have to internally call
|
|
154
|
+
# a_max, a_min so that yirgacheffe can work on older numpy installs.
|
|
155
|
+
return LayerOperation(
|
|
156
|
+
self,
|
|
157
|
+
op.CLIP,
|
|
158
|
+
window_op=WindowOperation.NONE,
|
|
159
|
+
a_min=min,
|
|
160
|
+
a_max=max,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
def conv2d(self, weights):
|
|
164
|
+
# A set of limitations that are just down to implementation time restrictions
|
|
165
|
+
weights_width, weights_height = weights.shape
|
|
166
|
+
if weights_width != weights_height:
|
|
167
|
+
raise ValueError("Currently only square matrixes are supported for weights")
|
|
168
|
+
padding = (weights_width - 1) / 2
|
|
169
|
+
if padding != int(padding):
|
|
170
|
+
raise ValueError("Currently weights dimensions must be odd")
|
|
171
|
+
|
|
172
|
+
return LayerOperation(
|
|
173
|
+
self,
|
|
174
|
+
op.CONV2D,
|
|
175
|
+
window_op=WindowOperation.NONE,
|
|
176
|
+
buffer_padding=padding,
|
|
177
|
+
weights=weights.astype(np.float32),
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
def numpy_apply(self, func, other=None):
|
|
181
|
+
return LayerOperation(self, func, other)
|
|
182
|
+
|
|
183
|
+
def shader_apply(self, func, other=None):
|
|
184
|
+
return ShaderStyleOperation(self, func, other)
|
|
185
|
+
|
|
186
|
+
def save(self, destination_layer, and_sum=False, callback=None, band=1):
|
|
187
|
+
return LayerOperation(self).save(destination_layer, and_sum, callback, band)
|
|
188
|
+
|
|
189
|
+
def parallel_save(self, destination_layer, and_sum=False, callback=None, parallelism=None, band=1):
|
|
190
|
+
return LayerOperation(self).parallel_save(destination_layer, and_sum, callback, parallelism, band)
|
|
191
|
+
|
|
192
|
+
def parallel_sum(self, callback=None, parallelism=None, band=1):
|
|
193
|
+
return LayerOperation(self).parallel_sum(callback, parallelism, band)
|
|
194
|
+
|
|
195
|
+
def sum(self):
|
|
196
|
+
return LayerOperation(self).sum()
|
|
197
|
+
|
|
198
|
+
def min(self):
|
|
199
|
+
return LayerOperation(self).min()
|
|
200
|
+
|
|
201
|
+
def max(self):
|
|
202
|
+
return LayerOperation(self).max()
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
class LayerOperation(LayerMathMixin):
|
|
206
|
+
|
|
207
|
+
@staticmethod
|
|
208
|
+
def where(cond, a, b):
|
|
209
|
+
return LayerOperation(
|
|
210
|
+
cond,
|
|
211
|
+
op.WHERE,
|
|
212
|
+
rhs=a,
|
|
213
|
+
other=b
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
@staticmethod
|
|
217
|
+
def maximum(a, b):
|
|
218
|
+
return LayerOperation(
|
|
219
|
+
a,
|
|
220
|
+
op.MAXIMUM,
|
|
221
|
+
b,
|
|
222
|
+
window_op=WindowOperation.UNION,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
@staticmethod
|
|
226
|
+
def minimum(a, b):
|
|
227
|
+
return LayerOperation(
|
|
228
|
+
a,
|
|
229
|
+
op.MINIMUM,
|
|
230
|
+
rhs=b,
|
|
231
|
+
window_op=WindowOperation.UNION,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
def __init__(
|
|
235
|
+
self,
|
|
236
|
+
lhs,
|
|
237
|
+
operator=None,
|
|
238
|
+
rhs=None,
|
|
239
|
+
other=None,
|
|
240
|
+
window_op=WindowOperation.NONE,
|
|
241
|
+
buffer_padding=0,
|
|
242
|
+
**kwargs
|
|
243
|
+
):
|
|
244
|
+
self.ystep = constants.YSTEP
|
|
245
|
+
self.kwargs = kwargs
|
|
246
|
+
self.window_op = window_op
|
|
247
|
+
self.buffer_padding = buffer_padding
|
|
248
|
+
|
|
249
|
+
if lhs is None:
|
|
250
|
+
raise ValueError("LHS on operation should not be none")
|
|
251
|
+
self.lhs = lhs
|
|
252
|
+
|
|
253
|
+
self.operator = operator
|
|
254
|
+
|
|
255
|
+
if rhs is not None:
|
|
256
|
+
if backend.isscalar(rhs):
|
|
257
|
+
self.rhs = LayerConstant(rhs)
|
|
258
|
+
elif isinstance(rhs, (backend.array_t)):
|
|
259
|
+
if rhs.shape == ():
|
|
260
|
+
self.rhs = LayerConstant(rhs.item())
|
|
261
|
+
else:
|
|
262
|
+
raise ValueError("Numpy arrays are no allowed")
|
|
263
|
+
else:
|
|
264
|
+
if not are_pixel_scales_equal_enough([lhs.pixel_scale, rhs.pixel_scale]):
|
|
265
|
+
raise ValueError("Not all layers are at the same pixel scale")
|
|
266
|
+
self.rhs = rhs
|
|
267
|
+
else:
|
|
268
|
+
self.rhs = None
|
|
269
|
+
|
|
270
|
+
if other is not None:
|
|
271
|
+
if backend.isscalar(other):
|
|
272
|
+
self.other = LayerConstant(other)
|
|
273
|
+
elif isinstance(other, (backend.array_t)):
|
|
274
|
+
if other.shape == ():
|
|
275
|
+
self.rhs = LayerConstant(other.item())
|
|
276
|
+
else:
|
|
277
|
+
raise ValueError("Numpy arrays are no allowed")
|
|
278
|
+
else:
|
|
279
|
+
if not are_pixel_scales_equal_enough([lhs.pixel_scale, other.pixel_scale]):
|
|
280
|
+
raise ValueError("Not all layers are at the same pixel scale")
|
|
281
|
+
self.other = other
|
|
282
|
+
else:
|
|
283
|
+
self.other = None
|
|
284
|
+
|
|
285
|
+
def __str__(self):
|
|
286
|
+
try:
|
|
287
|
+
return f"({self.lhs} {self.operator} {self.rhs})"
|
|
288
|
+
except AttributeError:
|
|
289
|
+
try:
|
|
290
|
+
return f"({self.operator} {self.lhs})"
|
|
291
|
+
except AttributeError:
|
|
292
|
+
return str(self.lhs)
|
|
293
|
+
|
|
294
|
+
def __len__(self):
|
|
295
|
+
return len(self.lhs)
|
|
296
|
+
|
|
297
|
+
def __getstate__(self) -> object:
|
|
298
|
+
odict = self.__dict__.copy()
|
|
299
|
+
if isinstance(self.operator, types.LambdaType):
|
|
300
|
+
odict['operator_dill'] = dumps(self.operator)
|
|
301
|
+
del odict['operator']
|
|
302
|
+
return odict
|
|
303
|
+
|
|
304
|
+
def __setstate__(self, state):
|
|
305
|
+
if 'operator_dill' in state:
|
|
306
|
+
state['operator'] = loads(state['operator_dill'])
|
|
307
|
+
del state['operator_dill']
|
|
308
|
+
self.__dict__.update(state)
|
|
309
|
+
|
|
310
|
+
@property
|
|
311
|
+
def area(self) -> Area:
|
|
312
|
+
# The type().__name__ here is to avoid a circular import dependancy
|
|
313
|
+
lhs_area = self.lhs.area if not type(self.lhs).__name__ == "ConstantLayer" else None
|
|
314
|
+
try:
|
|
315
|
+
rhs_area = self.rhs.area if not type(self.rhs).__name__ == "ConstantLayer" else None
|
|
316
|
+
except AttributeError:
|
|
317
|
+
rhs_area = None
|
|
318
|
+
try:
|
|
319
|
+
other_area = self.other.area if not type(self.other).__name__ == "ConstantLayer" else None
|
|
320
|
+
except AttributeError:
|
|
321
|
+
other_area = None
|
|
322
|
+
|
|
323
|
+
all_areas = []
|
|
324
|
+
if lhs_area is not None:
|
|
325
|
+
all_areas.append(lhs_area)
|
|
326
|
+
if rhs_area is not None:
|
|
327
|
+
all_areas.append(rhs_area)
|
|
328
|
+
if other_area is not None:
|
|
329
|
+
all_areas.append(other_area)
|
|
330
|
+
|
|
331
|
+
match self.window_op:
|
|
332
|
+
case WindowOperation.NONE:
|
|
333
|
+
return all_areas[0]
|
|
334
|
+
case WindowOperation.LEFT:
|
|
335
|
+
return lhs_area
|
|
336
|
+
case WindowOperation.RIGHT:
|
|
337
|
+
assert rhs_area is not None
|
|
338
|
+
return rhs_area
|
|
339
|
+
case WindowOperation.INTERSECTION:
|
|
340
|
+
intersection = Area(
|
|
341
|
+
left=max(x.left for x in all_areas),
|
|
342
|
+
top=min(x.top for x in all_areas),
|
|
343
|
+
right=min(x.right for x in all_areas),
|
|
344
|
+
bottom=max(x.bottom for x in all_areas)
|
|
345
|
+
)
|
|
346
|
+
if (intersection.left >= intersection.right) or (intersection.bottom >= intersection.top):
|
|
347
|
+
raise ValueError('No intersection possible')
|
|
348
|
+
return intersection
|
|
349
|
+
case WindowOperation.UNION:
|
|
350
|
+
return Area(
|
|
351
|
+
left=min(x.left for x in all_areas),
|
|
352
|
+
top=max(x.top for x in all_areas),
|
|
353
|
+
right=max(x.right for x in all_areas),
|
|
354
|
+
bottom=min(x.bottom for x in all_areas)
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
@property
|
|
358
|
+
def pixel_scale(self) -> PixelScale:
|
|
359
|
+
# Because we test at construction that pixel scales for RHS/other are roughly equal,
|
|
360
|
+
# I believe this should be sufficient...
|
|
361
|
+
try:
|
|
362
|
+
pixel_scale = self.lhs.pixel_scale
|
|
363
|
+
except AttributeError:
|
|
364
|
+
pixel_scale = None
|
|
365
|
+
|
|
366
|
+
if pixel_scale is None:
|
|
367
|
+
return self.rhs.pixel_scale
|
|
368
|
+
return pixel_scale
|
|
369
|
+
|
|
370
|
+
@property
|
|
371
|
+
def window(self) -> Window:
|
|
372
|
+
pixel_scale = self.pixel_scale
|
|
373
|
+
area = self.area
|
|
374
|
+
|
|
375
|
+
return Window(
|
|
376
|
+
xoff=round_down_pixels(area.left / pixel_scale.xstep, pixel_scale.xstep),
|
|
377
|
+
yoff=round_down_pixels(area.top / (pixel_scale.ystep * -1.0), pixel_scale.ystep * -1.0),
|
|
378
|
+
xsize=round_up_pixels(
|
|
379
|
+
(area.right - area.left) / pixel_scale.xstep, pixel_scale.xstep
|
|
380
|
+
),
|
|
381
|
+
ysize=round_up_pixels(
|
|
382
|
+
(area.top - area.bottom) / (pixel_scale.ystep * -1.0),
|
|
383
|
+
(pixel_scale.ystep * -1.0)
|
|
384
|
+
),
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
@property
|
|
388
|
+
def datatype(self):
|
|
389
|
+
# TODO: Work out how to indicate type promotion via numpy
|
|
390
|
+
return self.lhs.datatype
|
|
391
|
+
|
|
392
|
+
@property
|
|
393
|
+
def projection(self):
|
|
394
|
+
try:
|
|
395
|
+
return self.lhs.projection
|
|
396
|
+
except AttributeError:
|
|
397
|
+
return self.rhs.projection
|
|
398
|
+
|
|
399
|
+
def _eval(self, area: Area, index: int, step: int, target_window:Optional[Window]=None):
|
|
400
|
+
|
|
401
|
+
if self.buffer_padding:
|
|
402
|
+
if target_window:
|
|
403
|
+
target_window = target_window.grow(self.buffer_padding)
|
|
404
|
+
pixel_scale = self.pixel_scale
|
|
405
|
+
area = area.grow(self.buffer_padding * pixel_scale.xstep)
|
|
406
|
+
# The index doesn't need updating because we updated area/window
|
|
407
|
+
step += (2 * self.buffer_padding)
|
|
408
|
+
|
|
409
|
+
lhs_data = self.lhs._eval(area, index, step, target_window)
|
|
410
|
+
|
|
411
|
+
if self.operator is None:
|
|
412
|
+
return lhs_data
|
|
413
|
+
|
|
414
|
+
try:
|
|
415
|
+
operator = backend.operator_map[self.operator]
|
|
416
|
+
except KeyError:
|
|
417
|
+
# Handles things like `numpy_apply` where a custom operator is provided
|
|
418
|
+
operator = self.operator
|
|
419
|
+
|
|
420
|
+
if self.other is not None:
|
|
421
|
+
assert self.rhs is not None
|
|
422
|
+
rhs_data = self.rhs._eval(area, index, step, target_window)
|
|
423
|
+
other_data = self.other._eval(area, index, step, target_window)
|
|
424
|
+
return operator(lhs_data, rhs_data, other_data, **self.kwargs)
|
|
425
|
+
|
|
426
|
+
if self.rhs is not None:
|
|
427
|
+
rhs_data = self.rhs._eval(area, index, step, target_window)
|
|
428
|
+
return operator(lhs_data, rhs_data, **self.kwargs)
|
|
429
|
+
|
|
430
|
+
return operator(lhs_data, **self.kwargs)
|
|
431
|
+
|
|
432
|
+
def sum(self):
|
|
433
|
+
# The result accumulator is float64, and for precision reasons
|
|
434
|
+
# we force the sum to be done in float64 also. Otherwise we
|
|
435
|
+
# see variable results depending on chunk size, as different parts
|
|
436
|
+
# of the sum are done in different types.
|
|
437
|
+
res = 0.0
|
|
438
|
+
computation_window = self.window
|
|
439
|
+
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
440
|
+
step=self.ystep
|
|
441
|
+
if yoffset+step > computation_window.ysize:
|
|
442
|
+
step = computation_window.ysize - yoffset
|
|
443
|
+
chunk = self._eval(self.area, yoffset, step, computation_window)
|
|
444
|
+
res += backend.sum_op(chunk)
|
|
445
|
+
return res
|
|
446
|
+
|
|
447
|
+
def min(self):
|
|
448
|
+
res = None
|
|
449
|
+
computation_window = self.window
|
|
450
|
+
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
451
|
+
step=self.ystep
|
|
452
|
+
if yoffset+step > computation_window.ysize:
|
|
453
|
+
step = computation_window.ysize - yoffset
|
|
454
|
+
chunk = self._eval(self.area, yoffset, step, computation_window)
|
|
455
|
+
chunk_min = backend.min_op(chunk)
|
|
456
|
+
if (res is None) or (res > chunk_min):
|
|
457
|
+
res = chunk_min
|
|
458
|
+
return res
|
|
459
|
+
|
|
460
|
+
def max(self):
|
|
461
|
+
res = None
|
|
462
|
+
computation_window = self.window
|
|
463
|
+
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
464
|
+
step=self.ystep
|
|
465
|
+
if yoffset+step > computation_window.ysize:
|
|
466
|
+
step = computation_window.ysize - yoffset
|
|
467
|
+
chunk = self._eval(self.area, yoffset, step, computation_window)
|
|
468
|
+
chunk_max = backend.max_op(chunk)
|
|
469
|
+
if (res is None) or (chunk_max > res):
|
|
470
|
+
res = chunk_max
|
|
471
|
+
return res
|
|
472
|
+
|
|
473
|
+
def save(self, destination_layer, and_sum=False, callback=None, band=1):
|
|
474
|
+
"""
|
|
475
|
+
Calling save will write the output of the operation to the provied layer.
|
|
476
|
+
If you provide sum as true it will additionall compute the sum and return that.
|
|
477
|
+
"""
|
|
478
|
+
|
|
479
|
+
if destination_layer is None:
|
|
480
|
+
raise ValueError("Layer is required")
|
|
481
|
+
try:
|
|
482
|
+
band = destination_layer._dataset.GetRasterBand(band)
|
|
483
|
+
except AttributeError as exc:
|
|
484
|
+
raise ValueError("Layer must be a raster backed layer") from exc
|
|
485
|
+
|
|
486
|
+
computation_window = self.window
|
|
487
|
+
destination_window = destination_layer.window
|
|
488
|
+
|
|
489
|
+
if (computation_window.xsize != destination_window.xsize) \
|
|
490
|
+
or (computation_window.ysize != destination_window.ysize):
|
|
491
|
+
raise ValueError("Destination raster window size does not match input raster window size.")
|
|
492
|
+
|
|
493
|
+
total = 0.0
|
|
494
|
+
|
|
495
|
+
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
496
|
+
if callback:
|
|
497
|
+
callback(yoffset / computation_window.ysize)
|
|
498
|
+
step=self.ystep
|
|
499
|
+
if yoffset+step > computation_window.ysize:
|
|
500
|
+
step = computation_window.ysize - yoffset
|
|
501
|
+
chunk = self._eval(self.area, yoffset, step, computation_window)
|
|
502
|
+
if isinstance(chunk, (float, int)):
|
|
503
|
+
chunk = backend.full((step, destination_window.xsize), chunk)
|
|
504
|
+
band.WriteArray(
|
|
505
|
+
backend.demote_array(chunk),
|
|
506
|
+
destination_window.xoff,
|
|
507
|
+
yoffset + destination_window.yoff,
|
|
508
|
+
)
|
|
509
|
+
if and_sum:
|
|
510
|
+
total += backend.sum_op(chunk)
|
|
511
|
+
if callback:
|
|
512
|
+
callback(1.0)
|
|
513
|
+
|
|
514
|
+
return total if and_sum else None
|
|
515
|
+
|
|
516
|
+
def _parallel_worker(self, index, shared_mem, sem, np_dtype, width, input_queue, output_queue):
|
|
517
|
+
arr = np.ndarray((self.ystep, width), dtype=np_dtype, buffer=shared_mem.buf)
|
|
518
|
+
|
|
519
|
+
try:
|
|
520
|
+
while True:
|
|
521
|
+
# We acquire the lock so we know we have somewhere to put the
|
|
522
|
+
# result before we take work. This is because in practice
|
|
523
|
+
# it seems the writing to GeoTIFF is the bottleneck, and
|
|
524
|
+
# we had workers taking a task, then waiting for somewhere to
|
|
525
|
+
# write to for ages when other workers were exiting because there
|
|
526
|
+
# was nothing to do.
|
|
527
|
+
sem.acquire()
|
|
528
|
+
|
|
529
|
+
task = input_queue.get()
|
|
530
|
+
if task is None:
|
|
531
|
+
sem.release()
|
|
532
|
+
output_queue.put(None)
|
|
533
|
+
break
|
|
534
|
+
yoffset, step = task
|
|
535
|
+
|
|
536
|
+
result = self._eval(self.area, yoffset, step)
|
|
537
|
+
backend.eval_op(result)
|
|
538
|
+
|
|
539
|
+
arr[:step] = backend.demote_array(result)
|
|
540
|
+
|
|
541
|
+
output_queue.put((index, yoffset, step))
|
|
542
|
+
|
|
543
|
+
except Exception as e: # pylint: disable=W0718
|
|
544
|
+
logger.exception(e)
|
|
545
|
+
sem.release()
|
|
546
|
+
output_queue.put(None)
|
|
547
|
+
|
|
548
|
+
def _park(self):
|
|
549
|
+
try:
|
|
550
|
+
self.lhs._park()
|
|
551
|
+
except AttributeError:
|
|
552
|
+
pass
|
|
553
|
+
try:
|
|
554
|
+
self.rhs._park()
|
|
555
|
+
except AttributeError:
|
|
556
|
+
pass
|
|
557
|
+
try:
|
|
558
|
+
self.other._park()
|
|
559
|
+
except AttributeError:
|
|
560
|
+
pass
|
|
561
|
+
|
|
562
|
+
def _parallel_save(self, destination_layer, and_sum=False, callback=None, parallelism=None, band=1):
|
|
563
|
+
assert (destination_layer is not None) or and_sum
|
|
564
|
+
computation_window = self.window
|
|
565
|
+
if destination_layer is not None:
|
|
566
|
+
try:
|
|
567
|
+
band = destination_layer._dataset.GetRasterBand(band)
|
|
568
|
+
except AttributeError as exc:
|
|
569
|
+
raise ValueError("Layer must be a raster backed layer") from exc
|
|
570
|
+
|
|
571
|
+
destination_window = destination_layer.window
|
|
572
|
+
|
|
573
|
+
if (computation_window.xsize != destination_window.xsize) \
|
|
574
|
+
or (computation_window.ysize != destination_window.ysize):
|
|
575
|
+
raise ValueError("Destination raster window size does not match input raster window size.")
|
|
576
|
+
|
|
577
|
+
np_dtype = {
|
|
578
|
+
gdal.GDT_Byte: np.dtype('byte'),
|
|
579
|
+
gdal.GDT_Float32: np.dtype('float32'),
|
|
580
|
+
gdal.GDT_Float64: np.dtype('float64'),
|
|
581
|
+
gdal.GDT_Int8: np.dtype('int8'),
|
|
582
|
+
gdal.GDT_Int16: np.dtype('int16'),
|
|
583
|
+
gdal.GDT_Int32: np.dtype('int32'),
|
|
584
|
+
gdal.GDT_Int64: np.dtype('int64'),
|
|
585
|
+
gdal.GDT_UInt16: np.dtype('uint16'),
|
|
586
|
+
gdal.GDT_UInt32: np.dtype('uint32'),
|
|
587
|
+
gdal.GDT_UInt64: np.dtype('uint64'),
|
|
588
|
+
}[band.DataType]
|
|
589
|
+
else:
|
|
590
|
+
band = None
|
|
591
|
+
np_dtype = np.dtype('float64')
|
|
592
|
+
|
|
593
|
+
# The parallel save will cause a fork on linux, so we need to
|
|
594
|
+
# remove all SWIG references
|
|
595
|
+
self._park()
|
|
596
|
+
|
|
597
|
+
total = 0.0
|
|
598
|
+
|
|
599
|
+
with multiprocessing.Manager() as manager:
|
|
600
|
+
with SharedMemoryManager() as smm:
|
|
601
|
+
|
|
602
|
+
worker_count = parallelism or multiprocessing.cpu_count()
|
|
603
|
+
work_blocks = len(range(0, computation_window.ysize, self.ystep))
|
|
604
|
+
worker_count = min(work_blocks, worker_count)
|
|
605
|
+
|
|
606
|
+
mem_sem_cast = []
|
|
607
|
+
for i in range(worker_count):
|
|
608
|
+
shared_buf = smm.SharedMemory(size=np_dtype.itemsize * self.ystep * computation_window.xsize)
|
|
609
|
+
cast_buf = np.ndarray((self.ystep, computation_window.xsize), dtype=np_dtype, buffer=shared_buf.buf)
|
|
610
|
+
cast_buf[:] = np.zeros((self.ystep, computation_window.xsize), np_dtype)
|
|
611
|
+
mem_sem_cast.append((shared_buf, Semaphore(), cast_buf))
|
|
612
|
+
|
|
613
|
+
source_queue = manager.Queue()
|
|
614
|
+
result_queue = manager.Queue()
|
|
615
|
+
|
|
616
|
+
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
617
|
+
step = ((computation_window.ysize - yoffset)
|
|
618
|
+
if yoffset+self.ystep > computation_window.ysize
|
|
619
|
+
else self.ystep)
|
|
620
|
+
source_queue.put((
|
|
621
|
+
yoffset,
|
|
622
|
+
step
|
|
623
|
+
))
|
|
624
|
+
for _ in range(worker_count):
|
|
625
|
+
source_queue.put(None)
|
|
626
|
+
|
|
627
|
+
if callback:
|
|
628
|
+
callback(0.0)
|
|
629
|
+
|
|
630
|
+
workers = [Process(target=self._parallel_worker, args=(
|
|
631
|
+
i,
|
|
632
|
+
mem_sem_cast[i][0],
|
|
633
|
+
mem_sem_cast[i][1],
|
|
634
|
+
np_dtype,
|
|
635
|
+
computation_window.xsize,
|
|
636
|
+
source_queue,
|
|
637
|
+
result_queue
|
|
638
|
+
)) for i in range(worker_count)]
|
|
639
|
+
for worker in workers:
|
|
640
|
+
worker.start()
|
|
641
|
+
|
|
642
|
+
sentinal_count = len(workers)
|
|
643
|
+
retired_blocks = 0
|
|
644
|
+
while sentinal_count > 0:
|
|
645
|
+
res = result_queue.get()
|
|
646
|
+
if res is None:
|
|
647
|
+
sentinal_count -= 1
|
|
648
|
+
continue
|
|
649
|
+
index, yoffset, step = res
|
|
650
|
+
_, sem, arr = mem_sem_cast[index]
|
|
651
|
+
if band:
|
|
652
|
+
band.WriteArray(
|
|
653
|
+
arr[0:step],
|
|
654
|
+
destination_window.xoff,
|
|
655
|
+
yoffset + destination_window.yoff,
|
|
656
|
+
)
|
|
657
|
+
if and_sum:
|
|
658
|
+
total += np.sum(np.array(arr[0:step]).astype(np.float64))
|
|
659
|
+
sem.release()
|
|
660
|
+
retired_blocks += 1
|
|
661
|
+
if callback:
|
|
662
|
+
callback(retired_blocks / work_blocks)
|
|
663
|
+
|
|
664
|
+
processes = workers
|
|
665
|
+
while processes:
|
|
666
|
+
candidates = [x for x in processes if not x.is_alive()]
|
|
667
|
+
for candidate in candidates:
|
|
668
|
+
candidate.join()
|
|
669
|
+
if candidate.exitcode:
|
|
670
|
+
for victim in processes:
|
|
671
|
+
victim.kill()
|
|
672
|
+
sys.exit(candidate.exitcode)
|
|
673
|
+
processes.remove(candidate)
|
|
674
|
+
time.sleep(0.01)
|
|
675
|
+
|
|
676
|
+
return total if and_sum else None
|
|
677
|
+
|
|
678
|
+
def parallel_save(self, destination_layer, and_sum=False, callback=None, parallelism=None, band=1):
|
|
679
|
+
if destination_layer is None:
|
|
680
|
+
raise ValueError("Layer is required")
|
|
681
|
+
return self._parallel_save(destination_layer, and_sum, callback, parallelism, band)
|
|
682
|
+
|
|
683
|
+
def parallel_sum(self, callback=None, parallelism=None, band=1):
|
|
684
|
+
return self._parallel_save(None, True, callback, parallelism, band)
|
|
685
|
+
|
|
686
|
+
class ShaderStyleOperation(LayerOperation):
|
|
687
|
+
|
|
688
|
+
def _eval(self, area, index, step, target_window=None):
|
|
689
|
+
if target_window is None:
|
|
690
|
+
target_window = self.window
|
|
691
|
+
lhs_data = self.lhs._eval(area, index, step, target_window)
|
|
692
|
+
if self.rhs is not None:
|
|
693
|
+
rhs_data = self.rhs._eval(area, index, step, target_window)
|
|
694
|
+
else:
|
|
695
|
+
rhs_data = None
|
|
696
|
+
|
|
697
|
+
# Constant results make this a bit messier. Might in future
|
|
698
|
+
# be nicer to promote them to arrays sooner?
|
|
699
|
+
if isinstance(lhs_data, (int, float)):
|
|
700
|
+
if rhs_data is None:
|
|
701
|
+
return self.operator(lhs_data, **self.kwargs)
|
|
702
|
+
if isinstance(rhs_data, (int, float)):
|
|
703
|
+
return self.operator(lhs_data, rhs_data, **self.kwargs)
|
|
704
|
+
else:
|
|
705
|
+
result = np.empty_like(rhs_data)
|
|
706
|
+
else:
|
|
707
|
+
result = np.empty_like(lhs_data)
|
|
708
|
+
|
|
709
|
+
window = self.window
|
|
710
|
+
for yoffset in range(step):
|
|
711
|
+
for xoffset in range(window.xsize):
|
|
712
|
+
try:
|
|
713
|
+
lhs_val = lhs_data[yoffset][xoffset]
|
|
714
|
+
except TypeError:
|
|
715
|
+
lhs_val = lhs_data
|
|
716
|
+
if rhs_data is not None:
|
|
717
|
+
try:
|
|
718
|
+
rhs_val = rhs_data[yoffset][xoffset]
|
|
719
|
+
except TypeError:
|
|
720
|
+
rhs_val = rhs_data
|
|
721
|
+
result[yoffset][xoffset] = self.operator(lhs_val, rhs_val, **self.kwargs)
|
|
722
|
+
else:
|
|
723
|
+
result[yoffset][xoffset] = self.operator(lhs_val, **self.kwargs)
|
|
724
|
+
|
|
725
|
+
return result
|
|
726
|
+
|
|
727
|
+
# We provide these module level accessors as it's often nicer to write `log(x/y)` rather than `(x/y).log()`
|
|
728
|
+
where = LayerOperation.where
|
|
729
|
+
minumum = LayerOperation.minimum
|
|
730
|
+
maximum = LayerOperation.maximum
|
|
731
|
+
clip = LayerOperation.clip
|
|
732
|
+
log = LayerOperation.log
|
|
733
|
+
log2 = LayerOperation.log2
|
|
734
|
+
log10 = LayerOperation.log10
|
|
735
|
+
exp = LayerOperation.exp
|
|
736
|
+
exp2 = LayerOperation.exp2
|
|
737
|
+
nan_to_num = LayerOperation.nan_to_num
|
|
738
|
+
isin = LayerOperation.isin
|