foscat 3.7.3__py3-none-any.whl → 3.8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foscat/BkBase.py +555 -0
- foscat/BkNumpy.py +392 -0
- foscat/BkTensorflow.py +488 -0
- foscat/BkTorch.py +454 -0
- foscat/FoCUS.py +50 -33
- foscat/alm.py +170 -144
- foscat/backend.py +5 -2
- foscat/scat_cov.py +772 -274
- foscat/scat_cov2D.py +1 -1
- {foscat-3.7.3.dist-info → foscat-3.8.2.dist-info}/METADATA +1 -1
- {foscat-3.7.3.dist-info → foscat-3.8.2.dist-info}/RECORD +14 -10
- {foscat-3.7.3.dist-info → foscat-3.8.2.dist-info}/LICENSE +0 -0
- {foscat-3.7.3.dist-info → foscat-3.8.2.dist-info}/WHEEL +0 -0
- {foscat-3.7.3.dist-info → foscat-3.8.2.dist-info}/top_level.txt +0 -0
foscat/BkTensorflow.py
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
|
|
3
|
+
import foscat.BkBase as BackendBase
|
|
4
|
+
import numpy as np
|
|
5
|
+
import tensorflow as tf
|
|
6
|
+
|
|
7
|
+
class BkTensorflow(BackendBase.BackendBase):
|
|
8
|
+
|
|
9
|
+
def __init__(self, *args, **kwargs):
|
|
10
|
+
# Impose que use_2D=True pour la classe scat
|
|
11
|
+
super().__init__(name='tensorflow', *args, **kwargs)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# ===========================================================================
|
|
15
|
+
# INIT
|
|
16
|
+
|
|
17
|
+
self.backend = tf
|
|
18
|
+
# tf.config.threading.set_inter_op_parallelism_threads(1)
|
|
19
|
+
# tf.config.threading.set_intra_op_parallelism_threads(1)
|
|
20
|
+
self.tf_function = tf.function
|
|
21
|
+
|
|
22
|
+
self.float64 = self.backend.float64
|
|
23
|
+
self.float32 = self.backend.float32
|
|
24
|
+
self.int64 = self.backend.int64
|
|
25
|
+
self.int32 = self.backend.int32
|
|
26
|
+
self.complex64 = self.backend.complex128
|
|
27
|
+
self.complex128 = self.backend.complex64
|
|
28
|
+
|
|
29
|
+
dtype_map = {
|
|
30
|
+
"float32": (self.backend.float32, self.backend.complex64),
|
|
31
|
+
"float64": (self.backend.float64, self.backend.complex128),
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
if self.all_type in dtype_map:
|
|
35
|
+
self.all_bk_type, self.all_cbk_type = dtype_map[self.all_type]
|
|
36
|
+
else:
|
|
37
|
+
raise ValueError(f"ERROR INIT foscat: {all_type} should be float32 or float64")
|
|
38
|
+
|
|
39
|
+
if self.mpi_rank == 0:
|
|
40
|
+
if not self.silent:
|
|
41
|
+
print(
|
|
42
|
+
"Num GPUs Available: ",
|
|
43
|
+
len(self.backend.config.experimental.list_physical_devices("GPU")),
|
|
44
|
+
)
|
|
45
|
+
sys.stdout.flush()
|
|
46
|
+
|
|
47
|
+
self.backend.debugging.set_log_device_placement(False)
|
|
48
|
+
self.backend.config.set_soft_device_placement(True)
|
|
49
|
+
|
|
50
|
+
gpus = self.backend.config.experimental.list_physical_devices("GPU")
|
|
51
|
+
|
|
52
|
+
gpuname = "CPU:0"
|
|
53
|
+
self.gpulist = {}
|
|
54
|
+
self.gpulist[0] = gpuname
|
|
55
|
+
self.ngpu = 1
|
|
56
|
+
|
|
57
|
+
if gpus:
|
|
58
|
+
try:
|
|
59
|
+
# Currently, memory growth needs to be the same across GPUs
|
|
60
|
+
for gpu in gpus:
|
|
61
|
+
self.backend.config.experimental.set_memory_growth(gpu, True)
|
|
62
|
+
logical_gpus = (
|
|
63
|
+
self.backend.config.experimental.list_logical_devices("GPU")
|
|
64
|
+
)
|
|
65
|
+
print(
|
|
66
|
+
len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs"
|
|
67
|
+
)
|
|
68
|
+
sys.stdout.flush()
|
|
69
|
+
self.ngpu = len(logical_gpus)
|
|
70
|
+
gpuname = logical_gpus[self.gpupos % self.ngpu].name
|
|
71
|
+
self.gpulist = {}
|
|
72
|
+
for i in range(self.ngpu):
|
|
73
|
+
self.gpulist[i] = logical_gpus[i].name
|
|
74
|
+
|
|
75
|
+
except RuntimeError as e:
|
|
76
|
+
# Memory growth must be set before GPUs have been initialized
|
|
77
|
+
print(e)
|
|
78
|
+
|
|
79
|
+
def tf_loc_function(self, func):
|
|
80
|
+
return func
|
|
81
|
+
|
|
82
|
+
# ---------------------------------------------−---------
|
|
83
|
+
# -- BACKEND DEFINITION --
|
|
84
|
+
# ---------------------------------------------−---------
|
|
85
|
+
def bk_SparseTensor(self, indice, w, dense_shape=[]):
|
|
86
|
+
return self.backend.SparseTensor(indice, w, dense_shape=dense_shape)
|
|
87
|
+
|
|
88
|
+
def bk_stack(self, list, axis=0):
|
|
89
|
+
return self.backend.stack(list, axis=axis)
|
|
90
|
+
|
|
91
|
+
def bk_sparse_dense_matmul(self, smat, mat):
|
|
92
|
+
return self.backend.sparse.sparse_dense_matmul(smat, mat)
|
|
93
|
+
|
|
94
|
+
# for tensorflow wrapping only
|
|
95
|
+
def periodic_pad(self,x, pad_height, pad_width):
|
|
96
|
+
"""
|
|
97
|
+
Applies periodic ('wrap') padding to a 4D TensorFlow tensor (N, H, W, C).
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
x (tf.Tensor): Input tensor with shape (batch_size, height, width, channels).
|
|
101
|
+
pad_height (tuple): Tuple (top, bottom) defining the vertical padding size.
|
|
102
|
+
pad_width (tuple): Tuple (left, right) defining the horizontal padding size.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
tf.Tensor: Tensor with periodic padding applied.
|
|
106
|
+
"""
|
|
107
|
+
#Vertical padding: take slices from bottom and top to wrap around
|
|
108
|
+
top_pad = x[:, -pad_height:, :, :] # Top padding from the bottom rows
|
|
109
|
+
bottom_pad = x[:, :pad_height, :, :] # Bottom padding from the top rows
|
|
110
|
+
x_padded = self.backend.concat([top_pad, x, bottom_pad], axis=1) # Concatenate vertically
|
|
111
|
+
|
|
112
|
+
#Horizontal padding: take slices from right and left to wrap around
|
|
113
|
+
left_pad = x_padded[:, :, -pad_width:, :] # Left padding from right columns
|
|
114
|
+
right_pad = x_padded[:, :, :pad_width, :] # Right padding from left columns
|
|
115
|
+
|
|
116
|
+
x_padded = self.backend.concat([left_pad, x_padded, right_pad], axis=2) # Concatenate horizontally
|
|
117
|
+
|
|
118
|
+
return x_padded
|
|
119
|
+
|
|
120
|
+
def conv2d(self, x, w, strides=[1, 1, 1, 1], padding="SAME"):
|
|
121
|
+
kx = w.shape[0]
|
|
122
|
+
ky = w.shape[1]
|
|
123
|
+
x_padded = self.periodic_pad(x, kx // 2, ky // 2)
|
|
124
|
+
return self.backend.nn.conv2d(x_padded, w, strides=strides, padding="VALID")
|
|
125
|
+
|
|
126
|
+
def conv1d(self, x, w, strides=[1, 1, 1], padding="SAME"):
|
|
127
|
+
kx = w.shape[0]
|
|
128
|
+
paddings = self.backend.constant([[0, 0], [kx // 2, kx // 2], [0, 0]])
|
|
129
|
+
tmp = self.backend.pad(x, paddings, "SYMMETRIC")
|
|
130
|
+
|
|
131
|
+
return self.backend.nn.conv1d(tmp, w, stride=strides, padding="VALID")
|
|
132
|
+
|
|
133
|
+
def bk_threshold(self, x, threshold, greater=True):
|
|
134
|
+
|
|
135
|
+
return self.backend.cast(x > threshold, x.dtype) * x
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def bk_maximum(self, x1, x2):
|
|
139
|
+
return self.backend.maximum(x1, x2)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def bk_device(self, device_name):
|
|
143
|
+
return self.backend.device(device_name)
|
|
144
|
+
|
|
145
|
+
def bk_ones(self, shape, dtype=None):
|
|
146
|
+
if dtype is None:
|
|
147
|
+
dtype = self.all_type
|
|
148
|
+
return self.backend.ones(shape, dtype=dtype)
|
|
149
|
+
|
|
150
|
+
def bk_conv1d(self, x, w):
|
|
151
|
+
return self.backend.nn.conv1d(x, w, stride=[1, 1, 1], padding="SAME")
|
|
152
|
+
|
|
153
|
+
def bk_flattenR(self, x):
|
|
154
|
+
if self.bk_is_complex(x):
|
|
155
|
+
rr = self.backend.reshape(
|
|
156
|
+
self.bk_real(x), [np.prod(np.array(list(x.shape)))]
|
|
157
|
+
)
|
|
158
|
+
ii = self.backend.reshape(
|
|
159
|
+
self.bk_imag(x), [np.prod(np.array(list(x.shape)))]
|
|
160
|
+
)
|
|
161
|
+
return self.bk_concat([rr, ii], axis=0)
|
|
162
|
+
else:
|
|
163
|
+
return self.backend.reshape(x, [np.prod(np.array(list(x.shape)))])
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def bk_flatten(self, x):
|
|
167
|
+
return self.backend.flatten(x)
|
|
168
|
+
|
|
169
|
+
def bk_size(self, x):
|
|
170
|
+
return self.backend.size(x)
|
|
171
|
+
|
|
172
|
+
def bk_resize_image(self, x, shape):
|
|
173
|
+
return self.bk_cast(self.backend.image.resize(x, shape, method="bilinear"))
|
|
174
|
+
|
|
175
|
+
def bk_L1(self, x):
|
|
176
|
+
if x.dtype == self.all_cbk_type:
|
|
177
|
+
xr = self.bk_real(x)
|
|
178
|
+
xi = self.bk_imag(x)
|
|
179
|
+
|
|
180
|
+
r = self.backend.sign(xr) * self.backend.sqrt(self.backend.sign(xr) * xr)
|
|
181
|
+
# return r
|
|
182
|
+
i = self.backend.sign(xi) * self.backend.sqrt(self.backend.sign(xi) * xi)
|
|
183
|
+
|
|
184
|
+
return self.bk_complex(r, i)
|
|
185
|
+
else:
|
|
186
|
+
return self.backend.sign(x) * self.backend.sqrt(self.backend.sign(x) * x)
|
|
187
|
+
|
|
188
|
+
def bk_square_comp(self, x):
|
|
189
|
+
xr = self.bk_real(x)
|
|
190
|
+
xi = self.bk_imag(x)
|
|
191
|
+
|
|
192
|
+
r = xr * xr
|
|
193
|
+
i = xi * xi
|
|
194
|
+
return self.bk_complex(r, i)
|
|
195
|
+
|
|
196
|
+
def bk_reduce_sum(self, data, axis=None):
|
|
197
|
+
|
|
198
|
+
if axis is None:
|
|
199
|
+
return self.backend.reduce_sum(data)
|
|
200
|
+
else:
|
|
201
|
+
return self.backend.reduce_sum(data, axis=axis)
|
|
202
|
+
|
|
203
|
+
# ---------------------------------------------−---------
|
|
204
|
+
# return a tensor size
|
|
205
|
+
|
|
206
|
+
def bk_size(self, data):
|
|
207
|
+
return self.backend.size(data)
|
|
208
|
+
|
|
209
|
+
def bk_reduce_mean(self, data, axis=None):
|
|
210
|
+
|
|
211
|
+
if axis is None:
|
|
212
|
+
return self.backend.reduce_mean(data)
|
|
213
|
+
else:
|
|
214
|
+
return self.backend.reduce_mean(data, axis=axis)
|
|
215
|
+
|
|
216
|
+
def bk_reduce_min(self, data, axis=None):
|
|
217
|
+
|
|
218
|
+
if axis is None:
|
|
219
|
+
return self.backend.reduce_min(data)
|
|
220
|
+
else:
|
|
221
|
+
return self.backend.reduce_min(data, axis=axis)
|
|
222
|
+
|
|
223
|
+
def bk_random_seed(self, value):
|
|
224
|
+
|
|
225
|
+
return self.backend.random.set_seed(value)
|
|
226
|
+
|
|
227
|
+
def bk_random_uniform(self, shape):
|
|
228
|
+
|
|
229
|
+
return self.backend.random.uniform(shape)
|
|
230
|
+
|
|
231
|
+
def bk_reduce_std(self, data, axis=None):
|
|
232
|
+
if axis is None:
|
|
233
|
+
r = self.backend.math.reduce_std(data)
|
|
234
|
+
else:
|
|
235
|
+
r = self.backend.math.reduce_std(data, axis=axis)
|
|
236
|
+
if self.bk_is_complex(data):
|
|
237
|
+
return self.bk_complex(r, 0 * r)
|
|
238
|
+
else:
|
|
239
|
+
return r
|
|
240
|
+
|
|
241
|
+
def bk_sqrt(self, data):
|
|
242
|
+
|
|
243
|
+
return self.backend.sqrt(self.backend.abs(data))
|
|
244
|
+
|
|
245
|
+
def bk_abs(self, data):
|
|
246
|
+
return self.backend.abs(data)
|
|
247
|
+
|
|
248
|
+
def bk_is_complex(self, data):
|
|
249
|
+
|
|
250
|
+
if isinstance(data, np.ndarray):
|
|
251
|
+
return data.dtype == "complex64" or data.dtype == "complex128"
|
|
252
|
+
return data.dtype.is_complex
|
|
253
|
+
|
|
254
|
+
def bk_distcomp(self, data):
|
|
255
|
+
if self.bk_is_complex(data):
|
|
256
|
+
res = self.bk_square(self.bk_real(data)) + self.bk_square(
|
|
257
|
+
self.bk_imag(data)
|
|
258
|
+
)
|
|
259
|
+
return res
|
|
260
|
+
else:
|
|
261
|
+
return self.bk_square(data)
|
|
262
|
+
|
|
263
|
+
def bk_norm(self, data):
|
|
264
|
+
if self.bk_is_complex(data):
|
|
265
|
+
res = self.bk_square(self.bk_real(data)) + self.bk_square(
|
|
266
|
+
self.bk_imag(data)
|
|
267
|
+
)
|
|
268
|
+
return self.bk_sqrt(res)
|
|
269
|
+
|
|
270
|
+
else:
|
|
271
|
+
return self.bk_abs(data)
|
|
272
|
+
|
|
273
|
+
def bk_square(self, data):
|
|
274
|
+
|
|
275
|
+
return self.backend.square(data)
|
|
276
|
+
|
|
277
|
+
def bk_log(self, data):
|
|
278
|
+
return self.backend.math.log(data)
|
|
279
|
+
|
|
280
|
+
def bk_matmul(self, a, b):
|
|
281
|
+
return self.backend.matmul(a, b)
|
|
282
|
+
|
|
283
|
+
def bk_tensor(self, data):
|
|
284
|
+
return self.backend.constant(data)
|
|
285
|
+
|
|
286
|
+
def bk_shape_tensor(self, shape):
|
|
287
|
+
return self.backend.tensor(shape=shape)
|
|
288
|
+
|
|
289
|
+
def bk_complex(self, real, imag):
|
|
290
|
+
return self.backend.dtypes.complex(real, imag)
|
|
291
|
+
|
|
292
|
+
def bk_exp(self, data):
|
|
293
|
+
|
|
294
|
+
return self.backend.exp(data)
|
|
295
|
+
|
|
296
|
+
def bk_min(self, data):
|
|
297
|
+
|
|
298
|
+
return self.backend.reduce_min(data)
|
|
299
|
+
|
|
300
|
+
def bk_argmin(self, data):
|
|
301
|
+
|
|
302
|
+
return self.backend.argmin(data)
|
|
303
|
+
|
|
304
|
+
def bk_tanh(self, data):
|
|
305
|
+
|
|
306
|
+
return self.backend.math.tanh(data)
|
|
307
|
+
|
|
308
|
+
def bk_max(self, data):
|
|
309
|
+
|
|
310
|
+
return self.backend.reduce_max(data)
|
|
311
|
+
|
|
312
|
+
def bk_argmax(self, data):
|
|
313
|
+
|
|
314
|
+
return self.backend.argmax(data)
|
|
315
|
+
|
|
316
|
+
def bk_reshape(self, data, shape):
|
|
317
|
+
return self.backend.reshape(data, shape)
|
|
318
|
+
|
|
319
|
+
def bk_repeat(self, data, nn, axis=0):
|
|
320
|
+
return self.backend.repeat(data, nn, axis=axis)
|
|
321
|
+
|
|
322
|
+
def bk_tile(self, data, nn, axis=0):
|
|
323
|
+
order=[1 for k in data.shape]
|
|
324
|
+
order[axis]=nn
|
|
325
|
+
return self.backend.tile(data, self.backend.constant(order, tf.int32))
|
|
326
|
+
|
|
327
|
+
def bk_roll(self, data, nn, axis=0):
|
|
328
|
+
return self.backend.roll(data, nn, axis=axis)
|
|
329
|
+
|
|
330
|
+
def bk_expand_dims(self, data, axis=0):
|
|
331
|
+
return self.backend.expand_dims(data, axis=axis)
|
|
332
|
+
|
|
333
|
+
def bk_transpose(self, data, thelist):
|
|
334
|
+
return self.backend.transpose(data, thelist)
|
|
335
|
+
|
|
336
|
+
def bk_concat(self, data, axis=None):
|
|
337
|
+
|
|
338
|
+
if axis is None:
|
|
339
|
+
if data[0].dtype == self.all_cbk_type:
|
|
340
|
+
ndata = len(data)
|
|
341
|
+
xr = self.backend.concat(
|
|
342
|
+
[self.bk_real(data[k]) for k in range(ndata)]
|
|
343
|
+
)
|
|
344
|
+
xi = self.backend.concat(
|
|
345
|
+
[self.bk_imag(data[k]) for k in range(ndata)]
|
|
346
|
+
)
|
|
347
|
+
return self.bk_complex(xr, xi)
|
|
348
|
+
else:
|
|
349
|
+
return self.backend.concat(data)
|
|
350
|
+
else:
|
|
351
|
+
if data[0].dtype == self.all_cbk_type:
|
|
352
|
+
ndata = len(data)
|
|
353
|
+
xr = self.backend.concat(
|
|
354
|
+
[self.bk_real(data[k]) for k in range(ndata)], axis=axis
|
|
355
|
+
)
|
|
356
|
+
xi = self.backend.concat(
|
|
357
|
+
[self.bk_imag(data[k]) for k in range(ndata)], axis=axis
|
|
358
|
+
)
|
|
359
|
+
return self.bk_complex(xr, xi)
|
|
360
|
+
else:
|
|
361
|
+
return self.backend.concat(data, axis=axis)
|
|
362
|
+
|
|
363
|
+
def bk_zeros(self, shape, dtype=None):
|
|
364
|
+
return self.backend.zeros(shape, dtype=dtype)
|
|
365
|
+
|
|
366
|
+
def bk_gather(self, data, idx,axis=0):
|
|
367
|
+
return self.backend.gather(data, idx,axis=axis)
|
|
368
|
+
|
|
369
|
+
def bk_reverse(self, data, axis=0):
|
|
370
|
+
return self.backend.reverse(data, axis=[axis])
|
|
371
|
+
|
|
372
|
+
def bk_fft(self, data):
|
|
373
|
+
return self.backend.signal.fft(data)
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def bk_fftn(self, data,dim=None):
|
|
377
|
+
#Equivalent of torch.fft.fftn(x, dim=dims) in TensorFlow
|
|
378
|
+
if len(dim)==2:
|
|
379
|
+
return self.backend.signal.fft2d(self.bk_complex(data, 0*data))
|
|
380
|
+
else:
|
|
381
|
+
return self.backend.signal.fft1d(self.bk_complex(data, 0*data))
|
|
382
|
+
|
|
383
|
+
def bk_ifftn(self, data,dim=None,norm=None):
|
|
384
|
+
if norm is not None:
|
|
385
|
+
if len(dim)==2:
|
|
386
|
+
normalization=self.backend.sqrt(self.backend.cast(data.shape[dim[0]]*data.shape[dim[1]], self.all_cbk_type))
|
|
387
|
+
return self.backend.signal.ifft2d(data)*normalization
|
|
388
|
+
|
|
389
|
+
else:
|
|
390
|
+
normalization=self.backend.sqrt(self.backend.cast(data.shape[dim[0]], self.all_cbk_type))
|
|
391
|
+
return self.backend.signal.ifft1d(data)*normalization
|
|
392
|
+
else:
|
|
393
|
+
if len(dim)==2:
|
|
394
|
+
return self.backend.signal.ifft2d(data)
|
|
395
|
+
else:
|
|
396
|
+
return self.backend.signal.ifft1d(data)
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
def bk_rfft(self, data):
|
|
400
|
+
return self.backend.signal.rfft(data)
|
|
401
|
+
|
|
402
|
+
def bk_irfft(self, data):
|
|
403
|
+
return self.backend.signal.irfft(data)
|
|
404
|
+
|
|
405
|
+
def bk_conjugate(self, data):
|
|
406
|
+
|
|
407
|
+
return self.backend.math.conj(data)
|
|
408
|
+
|
|
409
|
+
def bk_real(self, data):
|
|
410
|
+
return self.backend.math.real(data)
|
|
411
|
+
|
|
412
|
+
def bk_imag(self, data):
|
|
413
|
+
return self.backend.math.imag(data)
|
|
414
|
+
|
|
415
|
+
def bk_relu(self, x):
|
|
416
|
+
if x.dtype == self.all_cbk_type:
|
|
417
|
+
xr = self.backend.nn.relu(self.bk_real(x))
|
|
418
|
+
xi = self.backend.nn.relu(self.bk_imag(x))
|
|
419
|
+
return self.bk_complex(xr, xi)
|
|
420
|
+
else:
|
|
421
|
+
return self.backend.nn.relu(x)
|
|
422
|
+
|
|
423
|
+
def bk_clip_by_value(self, x,xmin,xmax):
|
|
424
|
+
if isinstance(x, np.ndarray):
|
|
425
|
+
x = np.clip(x,xmin,xmax)
|
|
426
|
+
return self.backend.clip_by_value(x,xmin,xmax)
|
|
427
|
+
|
|
428
|
+
def bk_cast(self, x):
|
|
429
|
+
if isinstance(x, np.float64):
|
|
430
|
+
if self.all_bk_type == "float32":
|
|
431
|
+
return np.float32(x)
|
|
432
|
+
else:
|
|
433
|
+
return x
|
|
434
|
+
if isinstance(x, np.float32):
|
|
435
|
+
if self.all_bk_type == "float64":
|
|
436
|
+
return np.float64(x)
|
|
437
|
+
else:
|
|
438
|
+
return x
|
|
439
|
+
if isinstance(x, np.complex128):
|
|
440
|
+
if self.all_bk_type == "float32":
|
|
441
|
+
return np.complex64(x)
|
|
442
|
+
else:
|
|
443
|
+
return x
|
|
444
|
+
if isinstance(x, np.complex64):
|
|
445
|
+
if self.all_bk_type == "float64":
|
|
446
|
+
return np.complex128(x)
|
|
447
|
+
else:
|
|
448
|
+
return x
|
|
449
|
+
|
|
450
|
+
if isinstance(x, np.int32) or isinstance(x, np.int64) or isinstance(x, int):
|
|
451
|
+
if self.all_bk_type == "float64":
|
|
452
|
+
return np.float64(x)
|
|
453
|
+
else:
|
|
454
|
+
return np.float32(x)
|
|
455
|
+
|
|
456
|
+
if self.bk_is_complex(x):
|
|
457
|
+
out_type = self.all_cbk_type
|
|
458
|
+
else:
|
|
459
|
+
out_type = self.all_bk_type
|
|
460
|
+
|
|
461
|
+
return self.backend.cast(x, out_type)
|
|
462
|
+
|
|
463
|
+
def bk_variable(self,x):
|
|
464
|
+
return self.backend.Variable(x)
|
|
465
|
+
|
|
466
|
+
def bk_assign(self,x,y):
|
|
467
|
+
x.assign(y)
|
|
468
|
+
|
|
469
|
+
def bk_constant(self,x):
|
|
470
|
+
return self.backend.constant(x)
|
|
471
|
+
|
|
472
|
+
def bk_cos(self,x):
|
|
473
|
+
return self.backend.cos(x)
|
|
474
|
+
|
|
475
|
+
def bk_sin(self,x):
|
|
476
|
+
return self.backend.sin(x)
|
|
477
|
+
|
|
478
|
+
def bk_arctan2(self,c,s):
|
|
479
|
+
return self.backend.arctan2(c,s)
|
|
480
|
+
|
|
481
|
+
def bk_empty(self,list):
|
|
482
|
+
return self.backend.constant(list)
|
|
483
|
+
|
|
484
|
+
def to_numpy(self,x):
|
|
485
|
+
if isinstance(x, np.ndarray):
|
|
486
|
+
return x
|
|
487
|
+
|
|
488
|
+
return x.numpy()
|