yirgacheffe 1.7.6__py3-none-any.whl → 1.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of yirgacheffe might be problematic. Click here for more details.
- yirgacheffe/_core.py +1 -1
- yirgacheffe/_operators.py +982 -0
- yirgacheffe/layers/base.py +2 -1
- yirgacheffe/layers/constant.py +1 -2
- yirgacheffe/layers/group.py +1 -2
- yirgacheffe/layers/h3layer.py +1 -1
- yirgacheffe/layers/rasters.py +1 -1
- yirgacheffe/layers/rescaled.py +1 -1
- yirgacheffe/layers/vectors.py +1 -1
- yirgacheffe/operators.py +6 -970
- yirgacheffe/window.py +2 -2
- {yirgacheffe-1.7.6.dist-info → yirgacheffe-1.7.7.dist-info}/METADATA +1 -1
- yirgacheffe-1.7.7.dist-info/RECORD +26 -0
- yirgacheffe-1.7.6.dist-info/RECORD +0 -25
- {yirgacheffe-1.7.6.dist-info → yirgacheffe-1.7.7.dist-info}/WHEEL +0 -0
- {yirgacheffe-1.7.6.dist-info → yirgacheffe-1.7.7.dist-info}/entry_points.txt +0 -0
- {yirgacheffe-1.7.6.dist-info → yirgacheffe-1.7.7.dist-info}/licenses/LICENSE +0 -0
- {yirgacheffe-1.7.6.dist-info → yirgacheffe-1.7.7.dist-info}/top_level.txt +0 -0
yirgacheffe/operators.py
CHANGED
|
@@ -1,971 +1,7 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
import multiprocessing
|
|
4
|
-
import os
|
|
5
|
-
import sys
|
|
6
|
-
import tempfile
|
|
7
|
-
import time
|
|
8
|
-
import types
|
|
9
|
-
from enum import Enum
|
|
10
|
-
from multiprocessing import Semaphore, Process
|
|
11
|
-
from multiprocessing.managers import SharedMemoryManager
|
|
12
|
-
from pathlib import Path
|
|
13
|
-
from typing import Callable, Dict, Optional, Union
|
|
1
|
+
# Eventually all this should be moved to the top level in 2.0, but for backwards compatibility in 1.x needs
|
|
2
|
+
# to remain here
|
|
14
3
|
|
|
15
|
-
import
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
from
|
|
19
|
-
from dill import dumps, loads # type: ignore
|
|
20
|
-
|
|
21
|
-
from . import constants, __version__
|
|
22
|
-
from .rounding import round_up_pixels, round_down_pixels
|
|
23
|
-
from .window import Area, PixelScale, MapProjection, Window
|
|
24
|
-
from ._backends import backend
|
|
25
|
-
from ._backends.enumeration import operators as op
|
|
26
|
-
from ._backends.enumeration import dtype as DataType
|
|
27
|
-
|
|
28
|
-
logger = logging.getLogger(__name__)
|
|
29
|
-
logger.setLevel(logging.WARNING)
|
|
30
|
-
|
|
31
|
-
class WindowOperation(Enum):
|
|
32
|
-
NONE = 1
|
|
33
|
-
UNION = 2
|
|
34
|
-
INTERSECTION = 3
|
|
35
|
-
LEFT = 4
|
|
36
|
-
RIGHT = 5
|
|
37
|
-
|
|
38
|
-
class LayerConstant:
|
|
39
|
-
def __init__(self, val):
|
|
40
|
-
self.val = val
|
|
41
|
-
|
|
42
|
-
def __str__(self) -> str:
|
|
43
|
-
return str(self.val)
|
|
44
|
-
|
|
45
|
-
def _eval(self, _area, _projection, _index, _step, _target_window):
|
|
46
|
-
return self.val
|
|
47
|
-
|
|
48
|
-
@property
|
|
49
|
-
def area(self) -> Area:
|
|
50
|
-
return Area.world()
|
|
51
|
-
|
|
52
|
-
def _get_operation_area(self, _projection) -> Area:
|
|
53
|
-
return Area.world()
|
|
54
|
-
|
|
55
|
-
class LayerMathMixin:
|
|
56
|
-
|
|
57
|
-
def __add__(self, other):
|
|
58
|
-
return LayerOperation(self, op.ADD, other, window_op=WindowOperation.UNION)
|
|
59
|
-
|
|
60
|
-
def __sub__(self, other):
|
|
61
|
-
return LayerOperation(self, op.SUB, other, window_op=WindowOperation.UNION)
|
|
62
|
-
|
|
63
|
-
def __mul__(self, other):
|
|
64
|
-
return LayerOperation(self, op.MUL, other, window_op=WindowOperation.INTERSECTION)
|
|
65
|
-
|
|
66
|
-
def __truediv__(self, other):
|
|
67
|
-
return LayerOperation(self, op.TRUEDIV, other, window_op=WindowOperation.INTERSECTION)
|
|
68
|
-
|
|
69
|
-
def __floordiv__(self, other):
|
|
70
|
-
return LayerOperation(self, op.FLOORDIV, other, window_op=WindowOperation.INTERSECTION)
|
|
71
|
-
|
|
72
|
-
def __mod__(self, other):
|
|
73
|
-
return LayerOperation(self, op.REMAINDER, other, window_op=WindowOperation.INTERSECTION)
|
|
74
|
-
|
|
75
|
-
def __pow__(self, other):
|
|
76
|
-
return LayerOperation(self, op.POW, other, window_op=WindowOperation.UNION)
|
|
77
|
-
|
|
78
|
-
def __eq__(self, other):
|
|
79
|
-
return LayerOperation(self, op.EQ, other, window_op=WindowOperation.INTERSECTION)
|
|
80
|
-
|
|
81
|
-
def __ne__(self, other):
|
|
82
|
-
return LayerOperation(self, op.NE, other, window_op=WindowOperation.UNION)
|
|
83
|
-
|
|
84
|
-
def __lt__(self, other):
|
|
85
|
-
return LayerOperation(self, op.LT, other, window_op=WindowOperation.UNION)
|
|
86
|
-
|
|
87
|
-
def __le__(self, other):
|
|
88
|
-
return LayerOperation(self, op.LE, other, window_op=WindowOperation.UNION)
|
|
89
|
-
|
|
90
|
-
def __gt__(self, other):
|
|
91
|
-
return LayerOperation(self, op.GT, other, window_op=WindowOperation.UNION)
|
|
92
|
-
|
|
93
|
-
def __ge__(self, other):
|
|
94
|
-
return LayerOperation(self, op.GE, other, window_op=WindowOperation.UNION)
|
|
95
|
-
|
|
96
|
-
def __and__(self, other):
|
|
97
|
-
return LayerOperation(self, op.AND, other, window_op=WindowOperation.INTERSECTION)
|
|
98
|
-
|
|
99
|
-
def __or__(self, other):
|
|
100
|
-
return LayerOperation(self, op.OR, other, window_op=WindowOperation.UNION)
|
|
101
|
-
|
|
102
|
-
def _eval(
|
|
103
|
-
self,
|
|
104
|
-
area,
|
|
105
|
-
projection,
|
|
106
|
-
index,
|
|
107
|
-
step,
|
|
108
|
-
target_window=None
|
|
109
|
-
):
|
|
110
|
-
try:
|
|
111
|
-
window = self.window if target_window is None else target_window
|
|
112
|
-
return self._read_array_for_area(area, projection, 0, index, window.xsize, step)
|
|
113
|
-
except AttributeError:
|
|
114
|
-
return self._read_array_for_area(
|
|
115
|
-
area,
|
|
116
|
-
projection,
|
|
117
|
-
0,
|
|
118
|
-
index,
|
|
119
|
-
target_window.xsize if target_window else 1,
|
|
120
|
-
step
|
|
121
|
-
)
|
|
122
|
-
|
|
123
|
-
def nan_to_num(self, nan=0, posinf=None, neginf=None):
|
|
124
|
-
return LayerOperation(
|
|
125
|
-
self,
|
|
126
|
-
op.NAN_TO_NUM,
|
|
127
|
-
window_op=WindowOperation.NONE,
|
|
128
|
-
copy=False,
|
|
129
|
-
nan=nan,
|
|
130
|
-
posinf=posinf,
|
|
131
|
-
neginf=neginf,
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
def isin(self, test_elements):
|
|
135
|
-
return LayerOperation(
|
|
136
|
-
self,
|
|
137
|
-
op.ISIN,
|
|
138
|
-
window_op=WindowOperation.NONE,
|
|
139
|
-
test_elements=test_elements,
|
|
140
|
-
)
|
|
141
|
-
|
|
142
|
-
def isnan(self):
|
|
143
|
-
return LayerOperation(
|
|
144
|
-
self,
|
|
145
|
-
op.ISNAN,
|
|
146
|
-
window_op=WindowOperation.NONE,
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
def abs(self):
|
|
150
|
-
return LayerOperation(
|
|
151
|
-
self,
|
|
152
|
-
op.ABS,
|
|
153
|
-
window_op=WindowOperation.NONE,
|
|
154
|
-
)
|
|
155
|
-
|
|
156
|
-
def floor(self):
|
|
157
|
-
return LayerOperation(
|
|
158
|
-
self,
|
|
159
|
-
op.FLOOR,
|
|
160
|
-
window_op=WindowOperation.NONE,
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
def round(self):
|
|
164
|
-
return LayerOperation(
|
|
165
|
-
self,
|
|
166
|
-
op.ROUND,
|
|
167
|
-
window_op=WindowOperation.NONE,
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
def ceil(self):
|
|
171
|
-
return LayerOperation(
|
|
172
|
-
self,
|
|
173
|
-
op.CEIL,
|
|
174
|
-
window_op=WindowOperation.NONE,
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
def log(self):
|
|
178
|
-
return LayerOperation(
|
|
179
|
-
self,
|
|
180
|
-
op.LOG,
|
|
181
|
-
window_op=WindowOperation.NONE,
|
|
182
|
-
)
|
|
183
|
-
|
|
184
|
-
def log2(self):
|
|
185
|
-
return LayerOperation(
|
|
186
|
-
self,
|
|
187
|
-
op.LOG2,
|
|
188
|
-
window_op=WindowOperation.NONE,
|
|
189
|
-
)
|
|
190
|
-
|
|
191
|
-
def log10(self):
|
|
192
|
-
return LayerOperation(
|
|
193
|
-
self,
|
|
194
|
-
op.LOG10,
|
|
195
|
-
window_op=WindowOperation.NONE,
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
def exp(self):
|
|
199
|
-
return LayerOperation(
|
|
200
|
-
self,
|
|
201
|
-
op.EXP,
|
|
202
|
-
window_op=WindowOperation.NONE,
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
def exp2(self):
|
|
206
|
-
return LayerOperation(
|
|
207
|
-
self,
|
|
208
|
-
op.EXP2,
|
|
209
|
-
window_op=WindowOperation.NONE,
|
|
210
|
-
)
|
|
211
|
-
|
|
212
|
-
def clip(self, min=None, max=None): # pylint: disable=W0622
|
|
213
|
-
# In the numpy 1 API np.clip(array) used a_max, a_min arguments and array.clip() used max and min as arguments
|
|
214
|
-
# In numpy 2 they moved so that max and min worked on both, but still support a_max, and a_min on np.clip.
|
|
215
|
-
# For now I'm only going to support the newer max/min everywhere notion, but I have to internally call
|
|
216
|
-
# a_max, a_min so that yirgacheffe can work on older numpy installs.
|
|
217
|
-
return LayerOperation(
|
|
218
|
-
self,
|
|
219
|
-
op.CLIP,
|
|
220
|
-
window_op=WindowOperation.NONE,
|
|
221
|
-
a_min=min,
|
|
222
|
-
a_max=max,
|
|
223
|
-
)
|
|
224
|
-
|
|
225
|
-
def conv2d(self, weights):
|
|
226
|
-
# A set of limitations that are just down to implementation time restrictions
|
|
227
|
-
weights_width, weights_height = weights.shape
|
|
228
|
-
if weights_width != weights_height:
|
|
229
|
-
raise ValueError("Currently only square matrixes are supported for weights")
|
|
230
|
-
padding = (weights_width - 1) / 2
|
|
231
|
-
if padding != int(padding):
|
|
232
|
-
raise ValueError("Currently weights dimensions must be odd")
|
|
233
|
-
|
|
234
|
-
return LayerOperation(
|
|
235
|
-
self,
|
|
236
|
-
op.CONV2D,
|
|
237
|
-
window_op=WindowOperation.NONE,
|
|
238
|
-
buffer_padding=padding,
|
|
239
|
-
weights=weights.astype(np.float32),
|
|
240
|
-
)
|
|
241
|
-
|
|
242
|
-
def numpy_apply(self, func, other=None):
|
|
243
|
-
return LayerOperation(self, func, other)
|
|
244
|
-
|
|
245
|
-
def shader_apply(self, func, other=None):
|
|
246
|
-
return ShaderStyleOperation(self, func, other)
|
|
247
|
-
|
|
248
|
-
def save(self, destination_layer, and_sum=False, callback=None, band=1):
|
|
249
|
-
return LayerOperation(self).save(destination_layer, and_sum, callback, band)
|
|
250
|
-
|
|
251
|
-
def parallel_save(self, destination_layer, and_sum=False, callback=None, parallelism=None, band=1):
|
|
252
|
-
return LayerOperation(self).parallel_save(destination_layer, and_sum, callback, parallelism, band)
|
|
253
|
-
|
|
254
|
-
def parallel_sum(self, callback=None, parallelism=None, band=1):
|
|
255
|
-
return LayerOperation(self).parallel_sum(callback, parallelism, band)
|
|
256
|
-
|
|
257
|
-
def to_geotiff(
|
|
258
|
-
self,
|
|
259
|
-
filename: Union[Path,str],
|
|
260
|
-
and_sum: bool = False,
|
|
261
|
-
parallelism:Optional[Union[int,bool]]=None
|
|
262
|
-
) -> Optional[float]:
|
|
263
|
-
return LayerOperation(self).to_geotiff(filename, and_sum, parallelism)
|
|
264
|
-
|
|
265
|
-
def sum(self):
|
|
266
|
-
return LayerOperation(self).sum()
|
|
267
|
-
|
|
268
|
-
def min(self):
|
|
269
|
-
return LayerOperation(self).min()
|
|
270
|
-
|
|
271
|
-
def max(self):
|
|
272
|
-
return LayerOperation(self).max()
|
|
273
|
-
|
|
274
|
-
def astype(self, datatype):
|
|
275
|
-
return LayerOperation(
|
|
276
|
-
self,
|
|
277
|
-
op.ASTYPE,
|
|
278
|
-
window_op=WindowOperation.NONE,
|
|
279
|
-
datatype=datatype
|
|
280
|
-
)
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
class LayerOperation(LayerMathMixin):
|
|
284
|
-
|
|
285
|
-
@staticmethod
|
|
286
|
-
def where(cond, a, b):
|
|
287
|
-
return LayerOperation(
|
|
288
|
-
cond,
|
|
289
|
-
op.WHERE,
|
|
290
|
-
rhs=a,
|
|
291
|
-
other=b
|
|
292
|
-
)
|
|
293
|
-
|
|
294
|
-
@staticmethod
|
|
295
|
-
def maximum(a, b):
|
|
296
|
-
return LayerOperation(
|
|
297
|
-
a,
|
|
298
|
-
op.MAXIMUM,
|
|
299
|
-
b,
|
|
300
|
-
window_op=WindowOperation.UNION,
|
|
301
|
-
)
|
|
302
|
-
|
|
303
|
-
@staticmethod
|
|
304
|
-
def minimum(a, b):
|
|
305
|
-
return LayerOperation(
|
|
306
|
-
a,
|
|
307
|
-
op.MINIMUM,
|
|
308
|
-
rhs=b,
|
|
309
|
-
window_op=WindowOperation.UNION,
|
|
310
|
-
)
|
|
311
|
-
|
|
312
|
-
def __init__(
|
|
313
|
-
self,
|
|
314
|
-
lhs,
|
|
315
|
-
operator=None,
|
|
316
|
-
rhs=None,
|
|
317
|
-
other=None,
|
|
318
|
-
window_op=WindowOperation.NONE,
|
|
319
|
-
buffer_padding=0,
|
|
320
|
-
**kwargs
|
|
321
|
-
):
|
|
322
|
-
self.ystep = constants.YSTEP
|
|
323
|
-
self.kwargs = kwargs
|
|
324
|
-
self.window_op = window_op
|
|
325
|
-
self.buffer_padding = buffer_padding
|
|
326
|
-
|
|
327
|
-
if lhs is None:
|
|
328
|
-
raise ValueError("LHS on operation should not be none")
|
|
329
|
-
self.lhs = lhs
|
|
330
|
-
|
|
331
|
-
self.operator = operator
|
|
332
|
-
|
|
333
|
-
if rhs is not None:
|
|
334
|
-
if backend.isscalar(rhs):
|
|
335
|
-
self.rhs = LayerConstant(rhs)
|
|
336
|
-
elif isinstance(rhs, (backend.array_t)):
|
|
337
|
-
if rhs.shape == ():
|
|
338
|
-
self.rhs = LayerConstant(rhs.item())
|
|
339
|
-
else:
|
|
340
|
-
raise ValueError("Numpy arrays are no allowed")
|
|
341
|
-
else:
|
|
342
|
-
if not lhs.map_projection == rhs.map_projection:
|
|
343
|
-
raise ValueError("Not all layers are at the same pixel scale")
|
|
344
|
-
self.rhs = rhs
|
|
345
|
-
else:
|
|
346
|
-
self.rhs = None
|
|
347
|
-
|
|
348
|
-
if other is not None:
|
|
349
|
-
if backend.isscalar(other):
|
|
350
|
-
self.other = LayerConstant(other)
|
|
351
|
-
elif isinstance(other, (backend.array_t)):
|
|
352
|
-
if other.shape == ():
|
|
353
|
-
self.rhs = LayerConstant(other.item())
|
|
354
|
-
else:
|
|
355
|
-
raise ValueError("Numpy arrays are no allowed")
|
|
356
|
-
else:
|
|
357
|
-
if not lhs.map_projection == other.map_projection:
|
|
358
|
-
raise ValueError("Not all layers are at the same pixel scale")
|
|
359
|
-
self.other = other
|
|
360
|
-
else:
|
|
361
|
-
self.other = None
|
|
362
|
-
|
|
363
|
-
def __str__(self) -> str:
|
|
364
|
-
try:
|
|
365
|
-
return f"({self.lhs} {self.operator} {self.rhs})"
|
|
366
|
-
except AttributeError:
|
|
367
|
-
try:
|
|
368
|
-
return f"({self.operator} {self.lhs})"
|
|
369
|
-
except AttributeError:
|
|
370
|
-
return str(self.lhs)
|
|
371
|
-
|
|
372
|
-
def __len__(self) -> int:
|
|
373
|
-
return len(self.lhs)
|
|
374
|
-
|
|
375
|
-
def __getstate__(self) -> object:
|
|
376
|
-
odict = self.__dict__.copy()
|
|
377
|
-
if isinstance(self.operator, types.LambdaType):
|
|
378
|
-
odict['operator_dill'] = dumps(self.operator)
|
|
379
|
-
del odict['operator']
|
|
380
|
-
return odict
|
|
381
|
-
|
|
382
|
-
def __setstate__(self, state) -> None:
|
|
383
|
-
if 'operator_dill' in state:
|
|
384
|
-
state['operator'] = loads(state['operator_dill'])
|
|
385
|
-
del state['operator_dill']
|
|
386
|
-
self.__dict__.update(state)
|
|
387
|
-
|
|
388
|
-
@property
|
|
389
|
-
def area(self) -> Area:
|
|
390
|
-
return self._get_operation_area(self.map_projection)
|
|
391
|
-
|
|
392
|
-
def _get_operation_area(self, projection: Optional[MapProjection]) -> Area:
|
|
393
|
-
lhs_area = self.lhs._get_operation_area(projection)
|
|
394
|
-
try:
|
|
395
|
-
rhs_area = self.rhs._get_operation_area(projection)
|
|
396
|
-
except AttributeError:
|
|
397
|
-
rhs_area = None
|
|
398
|
-
try:
|
|
399
|
-
other_area = self.other._get_operation_area(projection)
|
|
400
|
-
except AttributeError:
|
|
401
|
-
other_area = None
|
|
402
|
-
|
|
403
|
-
all_areas = [x for x in [lhs_area, rhs_area, other_area] if (x is not None) and (not x.is_world)]
|
|
404
|
-
|
|
405
|
-
match self.window_op:
|
|
406
|
-
case WindowOperation.NONE:
|
|
407
|
-
return all_areas[0]
|
|
408
|
-
case WindowOperation.LEFT:
|
|
409
|
-
return lhs_area
|
|
410
|
-
case WindowOperation.RIGHT:
|
|
411
|
-
assert rhs_area is not None
|
|
412
|
-
return rhs_area
|
|
413
|
-
case WindowOperation.INTERSECTION:
|
|
414
|
-
intersection = Area(
|
|
415
|
-
left=max(x.left for x in all_areas),
|
|
416
|
-
top=min(x.top for x in all_areas),
|
|
417
|
-
right=min(x.right for x in all_areas),
|
|
418
|
-
bottom=max(x.bottom for x in all_areas)
|
|
419
|
-
)
|
|
420
|
-
if (intersection.left >= intersection.right) or (intersection.bottom >= intersection.top):
|
|
421
|
-
raise ValueError('No intersection possible')
|
|
422
|
-
return intersection
|
|
423
|
-
case WindowOperation.UNION:
|
|
424
|
-
union = Area(
|
|
425
|
-
left=min(x.left for x in all_areas),
|
|
426
|
-
top=max(x.top for x in all_areas),
|
|
427
|
-
right=max(x.right for x in all_areas),
|
|
428
|
-
bottom=min(x.bottom for x in all_areas)
|
|
429
|
-
)
|
|
430
|
-
return union
|
|
431
|
-
case _:
|
|
432
|
-
assert False, "Should not be reached"
|
|
433
|
-
|
|
434
|
-
@property
|
|
435
|
-
@deprecation.deprecated(
|
|
436
|
-
deprecated_in="1.7",
|
|
437
|
-
removed_in="2.0",
|
|
438
|
-
current_version=__version__,
|
|
439
|
-
details="Use `map_projection` instead."
|
|
440
|
-
)
|
|
441
|
-
def pixel_scale(self) -> PixelScale:
|
|
442
|
-
# Because we test at construction that pixel scales for RHS/other are roughly equal,
|
|
443
|
-
# I believe this should be sufficient...
|
|
444
|
-
try:
|
|
445
|
-
pixel_scale = self.lhs.pixel_scale
|
|
446
|
-
except AttributeError:
|
|
447
|
-
pixel_scale = None
|
|
448
|
-
|
|
449
|
-
if pixel_scale is None:
|
|
450
|
-
return self.rhs.pixel_scale
|
|
451
|
-
return pixel_scale
|
|
452
|
-
|
|
453
|
-
@property
|
|
454
|
-
def window(self) -> Window:
|
|
455
|
-
projection = self.map_projection
|
|
456
|
-
if projection is None:
|
|
457
|
-
# This can happen if your source layers are say just constants
|
|
458
|
-
raise AttributeError("No window without projection")
|
|
459
|
-
area = self._get_operation_area(projection)
|
|
460
|
-
assert area is not None
|
|
461
|
-
|
|
462
|
-
return Window(
|
|
463
|
-
xoff=round_down_pixels(area.left / projection.xstep, projection.xstep),
|
|
464
|
-
yoff=round_down_pixels(area.top / (projection.ystep * -1.0), projection.ystep * -1.0),
|
|
465
|
-
xsize=round_up_pixels(
|
|
466
|
-
(area.right - area.left) / projection.xstep, projection.xstep
|
|
467
|
-
),
|
|
468
|
-
ysize=round_up_pixels(
|
|
469
|
-
(area.top - area.bottom) / (projection.ystep * -1.0),
|
|
470
|
-
(projection.ystep * -1.0)
|
|
471
|
-
),
|
|
472
|
-
)
|
|
473
|
-
|
|
474
|
-
@property
|
|
475
|
-
def datatype(self) -> DataType:
|
|
476
|
-
# TODO: Work out how to indicate type promotion via numpy
|
|
477
|
-
return self.lhs.datatype
|
|
478
|
-
|
|
479
|
-
@property
|
|
480
|
-
@deprecation.deprecated(
|
|
481
|
-
deprecated_in="1.7",
|
|
482
|
-
removed_in="2.0",
|
|
483
|
-
current_version=__version__,
|
|
484
|
-
details="Use `map_projection` instead."
|
|
485
|
-
)
|
|
486
|
-
def projection(self):
|
|
487
|
-
try:
|
|
488
|
-
projection = self.lhs.projection
|
|
489
|
-
except AttributeError:
|
|
490
|
-
projection = None
|
|
491
|
-
|
|
492
|
-
if projection is None:
|
|
493
|
-
projection = self.rhs.projection
|
|
494
|
-
return projection
|
|
495
|
-
|
|
496
|
-
@property
|
|
497
|
-
def map_projection(self) -> Optional[MapProjection]:
|
|
498
|
-
try:
|
|
499
|
-
projection = self.lhs.map_projection
|
|
500
|
-
except AttributeError:
|
|
501
|
-
projection = None
|
|
502
|
-
|
|
503
|
-
if projection is None:
|
|
504
|
-
try:
|
|
505
|
-
projection = self.rhs.map_projection
|
|
506
|
-
except AttributeError:
|
|
507
|
-
pass
|
|
508
|
-
return projection
|
|
509
|
-
|
|
510
|
-
def _eval(
|
|
511
|
-
self,
|
|
512
|
-
area: Area,
|
|
513
|
-
projection: MapProjection,
|
|
514
|
-
index: int,
|
|
515
|
-
step: int,
|
|
516
|
-
target_window:Optional[Window]=None
|
|
517
|
-
):
|
|
518
|
-
|
|
519
|
-
if self.buffer_padding:
|
|
520
|
-
if target_window:
|
|
521
|
-
target_window = target_window.grow(self.buffer_padding)
|
|
522
|
-
area = area.grow(self.buffer_padding * projection.xstep)
|
|
523
|
-
# The index doesn't need updating because we updated area/window
|
|
524
|
-
step += (2 * self.buffer_padding)
|
|
525
|
-
|
|
526
|
-
lhs_data = self.lhs._eval(area, projection, index, step, target_window)
|
|
527
|
-
|
|
528
|
-
if self.operator is None:
|
|
529
|
-
return lhs_data
|
|
530
|
-
|
|
531
|
-
try:
|
|
532
|
-
operator: Callable = backend.operator_map[self.operator]
|
|
533
|
-
except KeyError:
|
|
534
|
-
# Handles things like `numpy_apply` where a custom operator is provided
|
|
535
|
-
operator = self.operator
|
|
536
|
-
|
|
537
|
-
if self.other is not None:
|
|
538
|
-
assert self.rhs is not None
|
|
539
|
-
rhs_data = self.rhs._eval(area, projection, index, step, target_window)
|
|
540
|
-
other_data = self.other._eval(area, projection, index, step, target_window)
|
|
541
|
-
return operator(lhs_data, rhs_data, other_data, **self.kwargs)
|
|
542
|
-
|
|
543
|
-
if self.rhs is not None:
|
|
544
|
-
rhs_data = self.rhs._eval(area, projection, index, step, target_window)
|
|
545
|
-
return operator(lhs_data, rhs_data, **self.kwargs)
|
|
546
|
-
|
|
547
|
-
return operator(lhs_data, **self.kwargs)
|
|
548
|
-
|
|
549
|
-
def sum(self):
|
|
550
|
-
# The result accumulator is float64, and for precision reasons
|
|
551
|
-
# we force the sum to be done in float64 also. Otherwise we
|
|
552
|
-
# see variable results depending on chunk size, as different parts
|
|
553
|
-
# of the sum are done in different types.
|
|
554
|
-
res = 0.0
|
|
555
|
-
computation_window = self.window
|
|
556
|
-
projection = self.map_projection
|
|
557
|
-
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
558
|
-
step=self.ystep
|
|
559
|
-
if yoffset+step > computation_window.ysize:
|
|
560
|
-
step = computation_window.ysize - yoffset
|
|
561
|
-
chunk = self._eval(self._get_operation_area(projection), projection, yoffset, step, computation_window)
|
|
562
|
-
res += backend.sum_op(chunk)
|
|
563
|
-
return res
|
|
564
|
-
|
|
565
|
-
def min(self):
|
|
566
|
-
res = None
|
|
567
|
-
computation_window = self.window
|
|
568
|
-
projection = self.map_projection
|
|
569
|
-
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
570
|
-
step=self.ystep
|
|
571
|
-
if yoffset+step > computation_window.ysize:
|
|
572
|
-
step = computation_window.ysize - yoffset
|
|
573
|
-
chunk = self._eval(self._get_operation_area(projection), projection, yoffset, step, computation_window)
|
|
574
|
-
chunk_min = backend.min_op(chunk)
|
|
575
|
-
if (res is None) or (res > chunk_min):
|
|
576
|
-
res = chunk_min
|
|
577
|
-
return res
|
|
578
|
-
|
|
579
|
-
def max(self):
|
|
580
|
-
res = None
|
|
581
|
-
computation_window = self.window
|
|
582
|
-
projection = self.map_projection
|
|
583
|
-
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
584
|
-
step=self.ystep
|
|
585
|
-
if yoffset+step > computation_window.ysize:
|
|
586
|
-
step = computation_window.ysize - yoffset
|
|
587
|
-
chunk = self._eval(self._get_operation_area(projection), projection, yoffset, step, computation_window)
|
|
588
|
-
chunk_max = backend.max_op(chunk)
|
|
589
|
-
if (res is None) or (chunk_max > res):
|
|
590
|
-
res = chunk_max
|
|
591
|
-
return res
|
|
592
|
-
|
|
593
|
-
def save(self, destination_layer, and_sum=False, callback=None, band=1) -> Optional[float]:
|
|
594
|
-
"""
|
|
595
|
-
Calling save will write the output of the operation to the provied layer.
|
|
596
|
-
If you provide sum as true it will additionall compute the sum and return that.
|
|
597
|
-
"""
|
|
598
|
-
|
|
599
|
-
if destination_layer is None:
|
|
600
|
-
raise ValueError("Layer is required")
|
|
601
|
-
try:
|
|
602
|
-
band = destination_layer._dataset.GetRasterBand(band)
|
|
603
|
-
except AttributeError as exc:
|
|
604
|
-
raise ValueError("Layer must be a raster backed layer") from exc
|
|
605
|
-
|
|
606
|
-
projection = self.map_projection
|
|
607
|
-
|
|
608
|
-
destination_window = destination_layer.window
|
|
609
|
-
destination_projection = destination_layer.map_projection
|
|
610
|
-
assert destination_projection is not None
|
|
611
|
-
|
|
612
|
-
if projection is None:
|
|
613
|
-
projection = destination_projection
|
|
614
|
-
else:
|
|
615
|
-
if projection != destination_projection:
|
|
616
|
-
raise ValueError("Destination layer and input layers have different projection/scale")
|
|
617
|
-
|
|
618
|
-
# If we're calculating purely from a constant layer, then we don't have a window or area
|
|
619
|
-
# so we should use the destination raster details.
|
|
620
|
-
try:
|
|
621
|
-
computation_window = self.window
|
|
622
|
-
computation_area = self._get_operation_area(projection)
|
|
623
|
-
except (AttributeError, IndexError):
|
|
624
|
-
computation_window = destination_window
|
|
625
|
-
computation_area = destination_layer.area
|
|
626
|
-
|
|
627
|
-
if (computation_window.xsize != destination_window.xsize) \
|
|
628
|
-
or (computation_window.ysize != destination_window.ysize):
|
|
629
|
-
raise ValueError((f"Destination raster window size does not match input raster window size: "
|
|
630
|
-
f"{(destination_window.xsize, destination_window.ysize)} vs "
|
|
631
|
-
f"{(computation_window.xsize, computation_window.ysize)}"))
|
|
632
|
-
|
|
633
|
-
total = 0.0
|
|
634
|
-
|
|
635
|
-
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
636
|
-
if callback:
|
|
637
|
-
callback(yoffset / computation_window.ysize)
|
|
638
|
-
step=self.ystep
|
|
639
|
-
if yoffset+step > computation_window.ysize:
|
|
640
|
-
step = computation_window.ysize - yoffset
|
|
641
|
-
chunk = self._eval(computation_area, projection, yoffset, step, computation_window)
|
|
642
|
-
if isinstance(chunk, (float, int)):
|
|
643
|
-
chunk = backend.full((step, destination_window.xsize), chunk)
|
|
644
|
-
band.WriteArray(
|
|
645
|
-
backend.demote_array(chunk),
|
|
646
|
-
destination_window.xoff,
|
|
647
|
-
yoffset + destination_window.yoff,
|
|
648
|
-
)
|
|
649
|
-
if and_sum:
|
|
650
|
-
total += backend.sum_op(chunk)
|
|
651
|
-
if callback:
|
|
652
|
-
callback(1.0)
|
|
653
|
-
|
|
654
|
-
return total if and_sum else None
|
|
655
|
-
|
|
656
|
-
def _parallel_worker(self, index, shared_mem, sem, np_dtype, width, input_queue, output_queue, computation_window):
|
|
657
|
-
arr = np.ndarray((self.ystep, width), dtype=np_dtype, buffer=shared_mem.buf)
|
|
658
|
-
projection = self.map_projection
|
|
659
|
-
try:
|
|
660
|
-
while True:
|
|
661
|
-
# We acquire the lock so we know we have somewhere to put the
|
|
662
|
-
# result before we take work. This is because in practice
|
|
663
|
-
# it seems the writing to GeoTIFF is the bottleneck, and
|
|
664
|
-
# we had workers taking a task, then waiting for somewhere to
|
|
665
|
-
# write to for ages when other workers were exiting because there
|
|
666
|
-
# was nothing to do.
|
|
667
|
-
sem.acquire()
|
|
668
|
-
|
|
669
|
-
task = input_queue.get()
|
|
670
|
-
if task is None:
|
|
671
|
-
sem.release()
|
|
672
|
-
output_queue.put(None)
|
|
673
|
-
break
|
|
674
|
-
yoffset, step = task
|
|
675
|
-
|
|
676
|
-
result = self._eval(self._get_operation_area(projection), projection, yoffset, step, computation_window)
|
|
677
|
-
backend.eval_op(result)
|
|
678
|
-
|
|
679
|
-
arr[:step] = backend.demote_array(result)
|
|
680
|
-
|
|
681
|
-
output_queue.put((index, yoffset, step))
|
|
682
|
-
|
|
683
|
-
except Exception as e: # pylint: disable=W0718
|
|
684
|
-
logger.exception(e)
|
|
685
|
-
sem.release()
|
|
686
|
-
output_queue.put(None)
|
|
687
|
-
|
|
688
|
-
def _park(self):
|
|
689
|
-
try:
|
|
690
|
-
self.lhs._park()
|
|
691
|
-
except AttributeError:
|
|
692
|
-
pass
|
|
693
|
-
try:
|
|
694
|
-
self.rhs._park()
|
|
695
|
-
except AttributeError:
|
|
696
|
-
pass
|
|
697
|
-
try:
|
|
698
|
-
self.other._park()
|
|
699
|
-
except AttributeError:
|
|
700
|
-
pass
|
|
701
|
-
|
|
702
|
-
def _parallel_save(
|
|
703
|
-
self,
|
|
704
|
-
destination_layer,
|
|
705
|
-
and_sum=False,
|
|
706
|
-
callback=None,
|
|
707
|
-
parallelism=None,
|
|
708
|
-
band=1
|
|
709
|
-
) -> Optional[float]:
|
|
710
|
-
assert (destination_layer is not None) or and_sum
|
|
711
|
-
try:
|
|
712
|
-
computation_window = self.window
|
|
713
|
-
except (AttributeError, IndexError):
|
|
714
|
-
# This is most likely because the calculation is on a constant layer (or combination of only constant
|
|
715
|
-
# layers) and there's no real benefit to parallel saving then, so to keep this code from getting yet
|
|
716
|
-
# more complicated just fall back to the single threaded path
|
|
717
|
-
if destination_layer:
|
|
718
|
-
return self.save(destination_layer, and_sum, callback, band)
|
|
719
|
-
elif and_sum:
|
|
720
|
-
return self.sum()
|
|
721
|
-
else:
|
|
722
|
-
assert False
|
|
723
|
-
|
|
724
|
-
worker_count = parallelism or multiprocessing.cpu_count()
|
|
725
|
-
work_blocks = len(range(0, computation_window.ysize, self.ystep))
|
|
726
|
-
adjusted_blocks = math.ceil(work_blocks / constants.MINIMUM_CHUNKS_PER_THREAD)
|
|
727
|
-
worker_count = min(adjusted_blocks, worker_count)
|
|
728
|
-
|
|
729
|
-
if worker_count == 1:
|
|
730
|
-
if destination_layer:
|
|
731
|
-
return self.save(destination_layer, and_sum, callback, band)
|
|
732
|
-
elif and_sum:
|
|
733
|
-
return self.sum()
|
|
734
|
-
else:
|
|
735
|
-
assert False
|
|
736
|
-
|
|
737
|
-
if destination_layer is not None:
|
|
738
|
-
try:
|
|
739
|
-
band = destination_layer._dataset.GetRasterBand(band)
|
|
740
|
-
except AttributeError as exc:
|
|
741
|
-
raise ValueError("Layer must be a raster backed layer") from exc
|
|
742
|
-
|
|
743
|
-
destination_window = destination_layer.window
|
|
744
|
-
|
|
745
|
-
if (computation_window.xsize != destination_window.xsize) \
|
|
746
|
-
or (computation_window.ysize != destination_window.ysize):
|
|
747
|
-
raise ValueError("Destination raster window size does not match input raster window size.")
|
|
748
|
-
|
|
749
|
-
np_type_map : Dict[int, np.dtype] = {
|
|
750
|
-
gdal.GDT_Byte: np.dtype('byte'),
|
|
751
|
-
gdal.GDT_Float32: np.dtype('float32'),
|
|
752
|
-
gdal.GDT_Float64: np.dtype('float64'),
|
|
753
|
-
gdal.GDT_Int8: np.dtype('int8'),
|
|
754
|
-
gdal.GDT_Int16: np.dtype('int16'),
|
|
755
|
-
gdal.GDT_Int32: np.dtype('int32'),
|
|
756
|
-
gdal.GDT_Int64: np.dtype('int64'),
|
|
757
|
-
gdal.GDT_UInt16: np.dtype('uint16'),
|
|
758
|
-
gdal.GDT_UInt32: np.dtype('uint32'),
|
|
759
|
-
gdal.GDT_UInt64: np.dtype('uint64'),
|
|
760
|
-
}
|
|
761
|
-
np_dtype = np_type_map[band.DataType]
|
|
762
|
-
else:
|
|
763
|
-
band = None
|
|
764
|
-
np_dtype = np.dtype('float64')
|
|
765
|
-
|
|
766
|
-
# The parallel save will cause a fork on linux, so we need to
|
|
767
|
-
# remove all SWIG references
|
|
768
|
-
self._park()
|
|
769
|
-
|
|
770
|
-
total = 0.0
|
|
771
|
-
|
|
772
|
-
with multiprocessing.Manager() as manager:
|
|
773
|
-
with SharedMemoryManager() as smm:
|
|
774
|
-
|
|
775
|
-
mem_sem_cast = []
|
|
776
|
-
for _ in range(worker_count):
|
|
777
|
-
shared_buf = smm.SharedMemory(size=np_dtype.itemsize * self.ystep * computation_window.xsize)
|
|
778
|
-
cast_buf : npt.NDArray = np.ndarray(
|
|
779
|
-
(self.ystep, computation_window.xsize),
|
|
780
|
-
dtype=np_dtype,
|
|
781
|
-
buffer=shared_buf.buf
|
|
782
|
-
)
|
|
783
|
-
cast_buf[:] = np.zeros((self.ystep, computation_window.xsize), np_dtype)
|
|
784
|
-
mem_sem_cast.append((shared_buf, Semaphore(), cast_buf))
|
|
785
|
-
|
|
786
|
-
source_queue = manager.Queue()
|
|
787
|
-
result_queue = manager.Queue()
|
|
788
|
-
|
|
789
|
-
for yoffset in range(0, computation_window.ysize, self.ystep):
|
|
790
|
-
step = ((computation_window.ysize - yoffset)
|
|
791
|
-
if yoffset+self.ystep > computation_window.ysize
|
|
792
|
-
else self.ystep)
|
|
793
|
-
source_queue.put((
|
|
794
|
-
yoffset,
|
|
795
|
-
step
|
|
796
|
-
))
|
|
797
|
-
for _ in range(worker_count):
|
|
798
|
-
source_queue.put(None)
|
|
799
|
-
|
|
800
|
-
if callback:
|
|
801
|
-
callback(0.0)
|
|
802
|
-
|
|
803
|
-
workers = [Process(target=self._parallel_worker, args=(
|
|
804
|
-
i,
|
|
805
|
-
mem_sem_cast[i][0],
|
|
806
|
-
mem_sem_cast[i][1],
|
|
807
|
-
np_dtype,
|
|
808
|
-
computation_window.xsize,
|
|
809
|
-
source_queue,
|
|
810
|
-
result_queue,
|
|
811
|
-
computation_window
|
|
812
|
-
)) for i in range(worker_count)]
|
|
813
|
-
for worker in workers:
|
|
814
|
-
worker.start()
|
|
815
|
-
|
|
816
|
-
sentinal_count = len(workers)
|
|
817
|
-
retired_blocks = 0
|
|
818
|
-
while sentinal_count > 0:
|
|
819
|
-
res = result_queue.get()
|
|
820
|
-
if res is None:
|
|
821
|
-
sentinal_count -= 1
|
|
822
|
-
continue
|
|
823
|
-
index, yoffset, step = res
|
|
824
|
-
_, sem, arr = mem_sem_cast[index]
|
|
825
|
-
if band:
|
|
826
|
-
band.WriteArray(
|
|
827
|
-
arr[0:step],
|
|
828
|
-
destination_window.xoff,
|
|
829
|
-
yoffset + destination_window.yoff,
|
|
830
|
-
)
|
|
831
|
-
if and_sum:
|
|
832
|
-
total += np.sum(np.array(arr[0:step]).astype(np.float64))
|
|
833
|
-
sem.release()
|
|
834
|
-
retired_blocks += 1
|
|
835
|
-
if callback:
|
|
836
|
-
callback(retired_blocks / work_blocks)
|
|
837
|
-
|
|
838
|
-
processes = workers
|
|
839
|
-
while processes:
|
|
840
|
-
candidates = [x for x in processes if not x.is_alive()]
|
|
841
|
-
for candidate in candidates:
|
|
842
|
-
candidate.join()
|
|
843
|
-
if candidate.exitcode:
|
|
844
|
-
for victim in processes:
|
|
845
|
-
victim.kill()
|
|
846
|
-
sys.exit(candidate.exitcode)
|
|
847
|
-
processes.remove(candidate)
|
|
848
|
-
time.sleep(0.01)
|
|
849
|
-
|
|
850
|
-
return total if and_sum else None
|
|
851
|
-
|
|
852
|
-
def parallel_save(
|
|
853
|
-
self,
|
|
854
|
-
destination_layer,
|
|
855
|
-
and_sum=False,
|
|
856
|
-
callback=None,
|
|
857
|
-
parallelism=None,
|
|
858
|
-
band=1
|
|
859
|
-
) -> Optional[float]:
|
|
860
|
-
if destination_layer is None:
|
|
861
|
-
raise ValueError("Layer is required")
|
|
862
|
-
return self._parallel_save(destination_layer, and_sum, callback, parallelism, band)
|
|
863
|
-
|
|
864
|
-
def parallel_sum(self, callback=None, parallelism=None, band=1):
|
|
865
|
-
return self._parallel_save(None, True, callback, parallelism, band)
|
|
866
|
-
|
|
867
|
-
def to_geotiff(
|
|
868
|
-
self,
|
|
869
|
-
filename: Union[Path,str],
|
|
870
|
-
and_sum: bool = False,
|
|
871
|
-
parallelism:Optional[Union[int,bool]] = None
|
|
872
|
-
) -> Optional[float]:
|
|
873
|
-
"""Saves a calculation to a raster file, optionally also returning the sum of pixels.
|
|
874
|
-
|
|
875
|
-
Parameters
|
|
876
|
-
----------
|
|
877
|
-
filename : Path
|
|
878
|
-
Path of the raster to save the result to.
|
|
879
|
-
and_sum : bool, default=False
|
|
880
|
-
If true then the function will also calculate the sum of the raster as it goes and return that value.
|
|
881
|
-
parallelism : int or bool, optional, default=None
|
|
882
|
-
If passed, attempt to use multiple CPU cores up to the number provided, or if set to True, yirgacheffe
|
|
883
|
-
will pick a sensible value.
|
|
884
|
-
|
|
885
|
-
Returns
|
|
886
|
-
-------
|
|
887
|
-
float, optional
|
|
888
|
-
Either returns None, or the sum of the pixels in the resulting raster if `and_sum` was specified.
|
|
889
|
-
"""
|
|
890
|
-
|
|
891
|
-
# We want to write to a tempfile before we move the result into place, but we can't use
|
|
892
|
-
# the actual $TMPDIR as that might be on a different device, and so we use a file next to where
|
|
893
|
-
# the final file will be, so we just need to rename the file at the end, not move it.
|
|
894
|
-
if isinstance(filename, str):
|
|
895
|
-
filename = Path(filename)
|
|
896
|
-
target_dir = filename.parent
|
|
897
|
-
|
|
898
|
-
with tempfile.NamedTemporaryFile(dir=target_dir, delete=False) as tempory_file:
|
|
899
|
-
# Local import due to circular dependancy
|
|
900
|
-
from yirgacheffe.layers.rasters import RasterLayer # type: ignore # pylint: disable=C0415
|
|
901
|
-
with RasterLayer.empty_raster_layer_like(self, filename=tempory_file.name) as layer:
|
|
902
|
-
if parallelism is None:
|
|
903
|
-
result = self.save(layer, and_sum=and_sum)
|
|
904
|
-
else:
|
|
905
|
-
if isinstance(parallelism, bool):
|
|
906
|
-
# Parallel save treats None as "work it out"
|
|
907
|
-
parallelism = None
|
|
908
|
-
result = self.parallel_save(layer, and_sum=and_sum, parallelism=parallelism)
|
|
909
|
-
|
|
910
|
-
os.makedirs(target_dir, exist_ok=True)
|
|
911
|
-
os.rename(src=tempory_file.name, dst=filename)
|
|
912
|
-
|
|
913
|
-
return result
|
|
914
|
-
|
|
915
|
-
class ShaderStyleOperation(LayerOperation):
|
|
916
|
-
|
|
917
|
-
def _eval(self, area, projection, index, step, target_window=None):
|
|
918
|
-
if target_window is None:
|
|
919
|
-
target_window = self.window
|
|
920
|
-
lhs_data = self.lhs._eval(area, projection, index, step, target_window)
|
|
921
|
-
if self.rhs is not None:
|
|
922
|
-
rhs_data = self.rhs._eval(area, projection, index, step, target_window)
|
|
923
|
-
else:
|
|
924
|
-
rhs_data = None
|
|
925
|
-
|
|
926
|
-
# Constant results make this a bit messier. Might in future
|
|
927
|
-
# be nicer to promote them to arrays sooner?
|
|
928
|
-
if isinstance(lhs_data, (int, float)):
|
|
929
|
-
if rhs_data is None:
|
|
930
|
-
return self.operator(lhs_data, **self.kwargs)
|
|
931
|
-
if isinstance(rhs_data, (int, float)):
|
|
932
|
-
return self.operator(lhs_data, rhs_data, **self.kwargs)
|
|
933
|
-
else:
|
|
934
|
-
result = np.empty_like(rhs_data)
|
|
935
|
-
else:
|
|
936
|
-
result = np.empty_like(lhs_data)
|
|
937
|
-
|
|
938
|
-
window = self.window
|
|
939
|
-
for yoffset in range(step):
|
|
940
|
-
for xoffset in range(window.xsize):
|
|
941
|
-
try:
|
|
942
|
-
lhs_val = lhs_data[yoffset][xoffset]
|
|
943
|
-
except TypeError:
|
|
944
|
-
lhs_val = lhs_data
|
|
945
|
-
if rhs_data is not None:
|
|
946
|
-
try:
|
|
947
|
-
rhs_val = rhs_data[yoffset][xoffset]
|
|
948
|
-
except TypeError:
|
|
949
|
-
rhs_val = rhs_data
|
|
950
|
-
result[yoffset][xoffset] = self.operator(lhs_val, rhs_val, **self.kwargs)
|
|
951
|
-
else:
|
|
952
|
-
result[yoffset][xoffset] = self.operator(lhs_val, **self.kwargs)
|
|
953
|
-
|
|
954
|
-
return result
|
|
955
|
-
|
|
956
|
-
# We provide these module level accessors as it's often nicer to write `log(x/y)` rather than `(x/y).log()`
|
|
957
|
-
where = LayerOperation.where
|
|
958
|
-
minumum = LayerOperation.minimum
|
|
959
|
-
maximum = LayerOperation.maximum
|
|
960
|
-
clip = LayerOperation.clip
|
|
961
|
-
log = LayerOperation.log
|
|
962
|
-
log2 = LayerOperation.log2
|
|
963
|
-
log10 = LayerOperation.log10
|
|
964
|
-
exp = LayerOperation.exp
|
|
965
|
-
exp2 = LayerOperation.exp2
|
|
966
|
-
nan_to_num = LayerOperation.nan_to_num
|
|
967
|
-
isin = LayerOperation.isin
|
|
968
|
-
abs = LayerOperation.abs # pylint: disable=W0622
|
|
969
|
-
floor = LayerOperation.floor
|
|
970
|
-
round = LayerOperation.round # pylint: disable=W0622
|
|
971
|
-
ceil = LayerOperation.ceil
|
|
4
|
+
from ._operators import where, minumum, maximum, clip, log, log2, log10, exp, exp2, nan_to_num, isin, \
|
|
5
|
+
floor, ceil # pylint: disable=W0611
|
|
6
|
+
from ._operators import abs, round # pylint: disable=W0611,W0622
|
|
7
|
+
from ._backends.enumeration import dtype as DataType # pylint: disable=W0611
|