foscat 3.7.2__py3-none-any.whl → 3.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
foscat/BkTorch.py ADDED
@@ -0,0 +1,437 @@
1
+ import sys
2
+ import foscat.BkBase as BackendBase
3
+ import numpy as np
4
+ import torch
5
+
6
+ class BkTorch(BackendBase.BackendBase):
7
+
8
+ def __init__(self, *args, **kwargs):
9
+ # Impose que use_2D=True pour la classe scat
10
+ super().__init__(name='torch', *args, **kwargs)
11
+ self.backend = torch
12
+ self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
13
+
14
+ self.float64 = self.backend.float64
15
+ self.float32 = self.backend.float32
16
+ self.int64 = self.backend.int64
17
+ self.int32 = self.backend.int32
18
+ self.complex64 = self.backend.complex128
19
+ self.complex128 = self.backend.complex64
20
+
21
+ dtype_map = {
22
+ "float32": (self.backend.float32, self.backend.complex64),
23
+ "float64": (self.backend.float64, self.backend.complex128),
24
+ }
25
+
26
+ if self.all_type in dtype_map:
27
+ self.all_bk_type, self.all_cbk_type = dtype_map[self.all_type]
28
+ else:
29
+ raise ValueError(f"ERROR INIT foscat: {all_type} should be float32 or float64")
30
+
31
+ # ===========================================================================
32
+ # INIT
33
+ if self.mpi_rank == 0:
34
+ sys.stdout.flush()
35
+
36
+ gpus = torch.cuda.is_available()
37
+
38
+ gpuname = "CPU:0"
39
+ self.gpulist = {}
40
+ self.gpulist[0] = gpuname
41
+ self.ngpu = 1
42
+
43
+ if gpus:
44
+ try:
45
+ self.ngpu = torch.cuda.device_count()
46
+ self.gpulist = {}
47
+ for k in range(self.ngpu):
48
+ self.gpulist[k] = torch.cuda.get_device_name(k)
49
+
50
+ except RuntimeError as e:
51
+ # Memory growth must be set before GPUs have been initialized
52
+ print(e)
53
+
54
+ self.torch_device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
55
+
56
+ # ---------------------------------------------−---------
57
+ # -- BACKEND DEFINITION --
58
+ # ---------------------------------------------−---------
59
+ def bk_SparseTensor(self, indice, w, dense_shape=[]):
60
+ return self.backend.sparse_coo_tensor(indice.T, w, dense_shape).to_sparse_csr().to(self.torch_device)
61
+
62
+ def bk_stack(self, list, axis=0):
63
+ return self.backend.stack(list, axis=axis).to(self.torch_device)
64
+
65
+ def bk_sparse_dense_matmul(self, smat, mat):
66
+ return smat.matmul(mat)
67
+
68
+
69
+
70
+ def conv2d(self, x, w, strides=[1, 1, 1, 1], padding="SAME"):
71
+ import torch.nn.functional as F
72
+ lx = x.permute(0, 3, 1, 2)
73
+ wx = w.permute(3, 2, 0, 1) # de (5, 5, 1, 4) à (4, 1, 5, 5)
74
+
75
+ # Calculer le padding symétrique
76
+ kx, ky = w.shape[0], w.shape[1]
77
+
78
+ # Appliquer le padding
79
+ x_padded = F.pad(lx, (ky // 2, ky // 2, kx // 2, kx // 2), mode='circular')
80
+
81
+ # Appliquer la convolution
82
+ return F.conv2d(x_padded, wx, stride=1, padding=0).permute(0,2,3,1)
83
+
84
+ def conv1d(self, x, w, strides=[1, 1, 1], padding="SAME"):
85
+ # to be written!!!
86
+ return x
87
+
88
+ def bk_threshold(self, x, threshold, greater=True):
89
+
90
+ x.to(x.dtype)
91
+ return (x > threshold) * x
92
+
93
+ def bk_maximum(self, x1, x2):
94
+ return self.backend.maximum(x1, x2)
95
+
96
+ def bk_device(self, device_name):
97
+ return self.backend.device(device_name)
98
+
99
+ def bk_ones(self, shape, dtype=None):
100
+ if dtype is None:
101
+ dtype = self.all_type
102
+ return self.bk_cast(np.ones(shape))
103
+
104
+ def bk_conv1d(self, x, w):
105
+ # Torch not yet done !!!
106
+ return self.backend.nn.conv1d(x, w, stride=1, padding="SAME")
107
+
108
+ def bk_flattenR(self, x):
109
+ if self.bk_is_complex(x):
110
+ rr = self.backend.reshape(
111
+ self.bk_real(x), [np.prod(np.array(list(x.shape)))]
112
+ )
113
+ ii = self.backend.reshape(
114
+ self.bk_imag(x), [np.prod(np.array(list(x.shape)))]
115
+ )
116
+ return self.bk_concat([rr, ii], axis=0)
117
+ else:
118
+ return self.backend.reshape(x, [np.prod(np.array(list(x.shape)))])
119
+
120
+ def bk_flatten(self, x):
121
+ return self.backend.reshape(x, [np.prod(np.array(list(x.shape)))])
122
+
123
+ def bk_resize_image(self, x, shape):
124
+ tmp = self.backend.nn.functional.interpolate(
125
+ x.permute(0,3,1,2), size=shape, mode="bilinear", align_corners=False
126
+ )
127
+ return self.bk_cast(tmp.permute(0,2,3,1))
128
+
129
+ def bk_L1(self, x):
130
+ if x.dtype == self.all_cbk_type:
131
+ xr = self.bk_real(x)
132
+ xi = self.bk_imag(x)
133
+
134
+ r = self.backend.sign(xr) * self.backend.sqrt(self.backend.sign(xr) * xr)
135
+ # return r
136
+ i = self.backend.sign(xi) * self.backend.sqrt(self.backend.sign(xi) * xi)
137
+
138
+ return r
139
+ else:
140
+ return self.backend.sign(x) * self.backend.sqrt(self.backend.sign(x) * x)
141
+
142
+ def bk_square_comp(self, x):
143
+ if x.dtype == self.all_cbk_type:
144
+ xr = self.bk_real(x)
145
+ xi = self.bk_imag(x)
146
+
147
+ r = xr * xr
148
+ i = xi * xi
149
+ return self.bk_complex(r, i)
150
+ else:
151
+ return x * x
152
+
153
+ def bk_reduce_sum(self, data, axis=None):
154
+
155
+ if axis is None:
156
+ return self.backend.sum(data)
157
+ else:
158
+ return self.backend.sum(data, axis)
159
+
160
+ # ---------------------------------------------−---------
161
+ # return a tensor size
162
+
163
+ def bk_size(self, data):
164
+ return data.numel()
165
+
166
+
167
+ def constant(self, data):
168
+ return data
169
+
170
+ def bk_reduce_mean(self, data, axis=None):
171
+
172
+ if axis is None:
173
+ return self.backend.mean(data)
174
+ else:
175
+ return self.backend.mean(data, axis)
176
+
177
+ def bk_reduce_min(self, data, axis=None):
178
+
179
+ if axis is None:
180
+ return self.backend.min(data)
181
+ else:
182
+ return self.backend.min(data, axis)
183
+
184
+ def bk_random_seed(self, value):
185
+
186
+ return self.backend.random.set_seed(value)
187
+
188
+ def bk_random_uniform(self, shape):
189
+
190
+ return self.backend.random.uniform(shape)
191
+
192
+ def bk_reduce_std(self, data, axis=None):
193
+ if axis is None:
194
+ r = self.backend.std(data)
195
+ else:
196
+ r = self.backend.std(data, axis)
197
+
198
+ if self.bk_is_complex(data):
199
+ return self.bk_complex(r, 0 * r)
200
+ else:
201
+ return r
202
+
203
+ def bk_sqrt(self, data):
204
+
205
+ return self.backend.sqrt(self.backend.abs(data))
206
+
207
+ def bk_abs(self, data):
208
+ return self.backend.abs(data)
209
+
210
+ def bk_is_complex(self, data):
211
+
212
+ if isinstance(data, np.ndarray):
213
+ return data.dtype == "complex64" or data.dtype == "complex128"
214
+
215
+ return data.dtype.is_complex
216
+
217
+ def bk_distcomp(self, data):
218
+ if self.bk_is_complex(data):
219
+ res = self.bk_square(self.bk_real(data)) + self.bk_square(
220
+ self.bk_imag(data)
221
+ )
222
+ return res
223
+ else:
224
+ return self.bk_square(data)
225
+
226
+ def bk_norm(self, data):
227
+ if self.bk_is_complex(data):
228
+ res = self.bk_square(self.bk_real(data)) + self.bk_square(
229
+ self.bk_imag(data)
230
+ )
231
+ return self.bk_sqrt(res)
232
+
233
+ else:
234
+ return self.bk_abs(data)
235
+
236
+ def bk_square(self, data):
237
+
238
+ return self.backend.square(data)
239
+
240
+ def bk_log(self, data):
241
+ return self.backend.log(data)
242
+
243
+ def bk_matmul(self, a, b):
244
+ return self.backend.matmul(a, b)
245
+
246
+ def bk_tensor(self, data):
247
+ return self.backend.constant(data).to(self.torch_device)
248
+
249
+ def bk_shape_tensor(self, shape):
250
+ return self.backend.tensor(shape=shape).to(self.torch_device)
251
+
252
+ def bk_complex(self, real, imag):
253
+ return self.backend.complex(real, imag).to(self.torch_device)
254
+
255
+ def bk_exp(self, data):
256
+
257
+ return self.backend.exp(data)
258
+
259
+ def bk_min(self, data):
260
+
261
+ return self.backend.reduce_min(data)
262
+
263
+ def bk_argmin(self, data):
264
+
265
+ return self.backend.argmin(data)
266
+
267
+ def bk_tanh(self, data):
268
+
269
+ return self.backend.math.tanh(data)
270
+
271
+ def bk_max(self, data):
272
+
273
+ return self.backend.reduce_max(data)
274
+
275
+ def bk_argmax(self, data):
276
+
277
+ return self.backend.argmax(data)
278
+
279
+ def bk_reshape(self, data, shape):
280
+ if isinstance(data, np.ndarray):
281
+ return data.reshape(shape)
282
+ return data.view(shape)
283
+
284
+ def bk_repeat(self, data, nn, axis=0):
285
+ return self.backend.repeat(data, nn, axis=axis)
286
+
287
+ def bk_tile(self, data, nn, axis=0):
288
+
289
+ return self.backend.tile(data, dims=[nn])
290
+
291
+ def bk_roll(self, data, nn, axis=0):
292
+ return self.backend.roll(data, nn, axis=axis)
293
+
294
+ def bk_expand_dims(self, data, axis=0):
295
+ if isinstance(data, np.ndarray):
296
+ data = self.backend.from_numpy(data)
297
+ return self.backend.unsqueeze(data, axis)
298
+
299
+ def bk_transpose(self, data, thelist):
300
+ return self.backend.transpose(data, thelist)
301
+
302
+ def bk_concat(self, data, axis=None):
303
+
304
+ if axis is None:
305
+ if data[0].dtype == self.all_cbk_type:
306
+ ndata = len(data)
307
+ xr = self.backend.concat(
308
+ [self.bk_real(data[k]) for k in range(ndata)]
309
+ )
310
+ xi = self.backend.concat(
311
+ [self.bk_imag(data[k]) for k in range(ndata)]
312
+ )
313
+ return self.bk_complex(xr, xi)
314
+ else:
315
+ return self.backend.concat(data)
316
+ else:
317
+ if data[0].dtype == self.all_cbk_type:
318
+ ndata = len(data)
319
+ xr = self.backend.concat(
320
+ [self.bk_real(data[k]) for k in range(ndata)], axis=axis
321
+ )
322
+ xi = self.backend.concat(
323
+ [self.bk_imag(data[k]) for k in range(ndata)], axis=axis
324
+ )
325
+ return self.bk_complex(xr, xi)
326
+ else:
327
+ return self.backend.concat(data, axis=axis)
328
+
329
+ def bk_zeros(self, shape, dtype=None):
330
+ return self.backend.zeros(shape, dtype=dtype).to(self.torch_device)
331
+
332
+ def bk_gather(self, data, idx):
333
+ return data[idx]
334
+
335
+ def bk_reverse(self, data, axis=0):
336
+ return self.backend.flip(data, dims=[axis])
337
+
338
+ def bk_fft(self, data):
339
+ return self.backend.fft.fft(data)
340
+
341
+ def bk_fftn(self, data,dim=None):
342
+ return self.backend.fft.fftn(data,dim=dim)
343
+
344
+ def bk_ifftn(self, data,dim=None,norm=None):
345
+ return self.backend.fft.ifftn(data,dim=dim,norm=norm)
346
+
347
+ def bk_rfft(self, data):
348
+ return self.backend.fft.rfft(data)
349
+
350
+ def bk_irfft(self, data):
351
+ return self.backend.fft.irfft(data)
352
+
353
+ def bk_conjugate(self, data):
354
+
355
+ return self.backend.conj(data)
356
+
357
+ def bk_real(self, data):
358
+ return data.real
359
+
360
+ def bk_imag(self, data):
361
+ if data.dtype == self.all_cbk_type:
362
+ return data.imag
363
+ else:
364
+ return 0
365
+
366
+ def bk_relu(self, x):
367
+ return self.backend.relu(x)
368
+
369
+ def bk_clip_by_value(self, x,xmin,xmax):
370
+ if isinstance(x, np.ndarray):
371
+ x = np.clip(x,xmin,xmax)
372
+ x = self.backend.tensor(x, dtype=self.backend.float32) if not isinstance(x, self.backend.Tensor) else x
373
+ xmin = self.backend.tensor(xmin, dtype=self.backend.float32) if not isinstance(xmin, self.backend.Tensor) else xmin
374
+ xmax = self.backend.tensor(xmax, dtype=self.backend.float32) if not isinstance(xmax, self.backend.Tensor) else xmax
375
+ return self.backend.clamp(x, min=xmin, max=xmax)
376
+
377
+ def bk_cast(self, x):
378
+ if isinstance(x, np.float64):
379
+ if self.all_bk_type == "float32":
380
+ return self.backend.tensor(np.float32(x)).to(self.torch_device)
381
+ else:
382
+ return self.backend.tensor(x).to(self.torch_device)
383
+ if isinstance(x, np.float32):
384
+ if self.all_bk_type == "float64":
385
+ return self.backend.tensor(np.float64(x)).to(self.torch_device)
386
+ else:
387
+ return self.backend.tensor(x).to(self.torch_device)
388
+ if isinstance(x, np.complex128):
389
+ if self.all_bk_type == "float32":
390
+ return self.backend.tensor(np.complex64(x)).to(self.torch_device)
391
+ else:
392
+ return self.backend.tensor(x).to(self.torch_device)
393
+ if isinstance(x, np.complex64):
394
+ if self.all_bk_type == "float64":
395
+ return self.backend.tensor(np.complex128(x)).to(self.torch_device)
396
+ else:
397
+ return self.backend.tensor(x).to(self.torch_device)
398
+
399
+ if isinstance(x, np.int32) or isinstance(x, np.int64) or isinstance(x, int):
400
+ if self.all_bk_type == "float64":
401
+ return self.backend.tensor(np.float64(x)).to(self.torch_device)
402
+ else:
403
+ return self.backend.tensor(np.float32(x)).to(self.torch_device)
404
+
405
+ if self.bk_is_complex(x):
406
+ out_type = self.all_cbk_type
407
+ else:
408
+ out_type = self.all_bk_type
409
+
410
+ if isinstance(x, np.ndarray):
411
+ x = self.backend.from_numpy(x).to(self.torch_device)
412
+
413
+ if x.dtype.is_complex:
414
+ out_type = self.all_cbk_type
415
+ else:
416
+ out_type = self.all_bk_type
417
+
418
+ return x.type(out_type).to(self.torch_device)
419
+
420
+ def bk_variable(self,x):
421
+ return self.bk_cast(x)
422
+
423
+ def bk_assign(self,x,y):
424
+ x=y
425
+
426
+ def bk_constant(self,x):
427
+
428
+ return self.bk_cast(x)
429
+
430
+ def bk_empty(self,list):
431
+ return self.backend.empty(list)
432
+
433
+ def to_numpy(self,x):
434
+ if isinstance(x, np.ndarray):
435
+ return x
436
+
437
+ return x.cpu().numpy()
foscat/FoCUS.py CHANGED
@@ -1,12 +1,11 @@
1
1
  import os
2
+ import os
2
3
  import sys
3
4
 
4
5
  import healpy as hp
5
6
  import numpy as np
6
7
  from scipy.interpolate import griddata
7
8
 
8
- import foscat.backend as bk
9
-
10
9
  TMPFILE_VERSION = "V4_0"
11
10
 
12
11
 
@@ -37,7 +36,7 @@ class FoCUS:
37
36
  mpi_rank=0,
38
37
  ):
39
38
 
40
- self.__version__ = "3.7.2"
39
+ self.__version__ = "3.8.0"
41
40
  # P00 coeff for normalization for scat_cov
42
41
  self.TMPFILE_VERSION = TMPFILE_VERSION
43
42
  self.P1_dic = None
@@ -106,13 +105,31 @@ class FoCUS:
106
105
 
107
106
  self.all_type = all_type
108
107
  self.BACKEND = BACKEND
109
- self.backend = bk.foscat_backend(
110
- BACKEND,
111
- all_type=all_type,
112
- mpi_rank=mpi_rank,
113
- gpupos=gpupos,
114
- silent=self.silent,
115
- )
108
+
109
+ if BACKEND=='torch':
110
+ from foscat.BkTorch import BkTorch
111
+ self.backend = BkTorch(
112
+ all_type=all_type,
113
+ mpi_rank=mpi_rank,
114
+ gpupos=gpupos,
115
+ silent=self.silent,
116
+ )
117
+ elif BACKEND=='tensorflow':
118
+ from foscat.BkTensorflow import BkTensorflow
119
+ self.backend = BkTensorflow(
120
+ all_type=all_type,
121
+ mpi_rank=mpi_rank,
122
+ gpupos=gpupos,
123
+ silent=self.silent,
124
+ )
125
+ else:
126
+ from foscat.BkNumpy import BkNumpy
127
+ self.backend = BkNumpy(
128
+ all_type=all_type,
129
+ mpi_rank=mpi_rank,
130
+ gpupos=gpupos,
131
+ silent=self.silent,
132
+ )
116
133
 
117
134
  self.all_bk_type = self.backend.all_bk_type
118
135
  self.all_cbk_type = self.backend.all_cbk_type
@@ -262,7 +279,7 @@ class FoCUS:
262
279
  else:
263
280
  wr, wi, ws, widx = self.InitWave(self, lout)
264
281
 
265
- self.Idx_Neighbours[lout] = 1 # self.backend.constant(widx)
282
+ self.Idx_Neighbours[lout] = 1 # self.backend.bk_constant(widx)
266
283
  self.ww_Real[lout] = wr
267
284
  self.ww_Imag[lout] = wi
268
285
  self.w_smooth[lout] = ws
@@ -283,13 +300,13 @@ class FoCUS:
283
300
  r = np.sum(np.sqrt(c * c + s * s))
284
301
  c = c / r
285
302
  s = s / r
286
- self.ww_RealT[1] = self.backend.constant(
303
+ self.ww_RealT[1] = self.backend.bk_constant(
287
304
  np.array(c).reshape(xx.shape[0], 1, 1)
288
305
  )
289
- self.ww_ImagT[1] = self.backend.constant(
306
+ self.ww_ImagT[1] = self.backend.bk_constant(
290
307
  np.array(s).reshape(xx.shape[0], 1, 1)
291
308
  )
292
- self.ww_SmoothT[1] = self.backend.constant(
309
+ self.ww_SmoothT[1] = self.backend.bk_constant(
293
310
  np.array(w).reshape(xx.shape[0], 1, 1)
294
311
  )
295
312
 
@@ -299,21 +316,21 @@ class FoCUS:
299
316
  self.ww_ImagT = {}
300
317
  self.ww_SmoothT = {}
301
318
 
302
- self.ww_SmoothT[1] = self.backend.constant(
319
+ self.ww_SmoothT[1] = self.backend.bk_constant(
303
320
  self.w_smooth.reshape(KERNELSZ, KERNELSZ, 1, 1)
304
321
  )
305
322
  www = np.zeros([KERNELSZ, KERNELSZ, NORIENT, NORIENT], dtype=self.all_type)
306
323
  for k in range(NORIENT):
307
324
  www[:, :, k, k] = self.w_smooth.reshape(KERNELSZ, KERNELSZ)
308
- self.ww_SmoothT[NORIENT] = self.backend.constant(
325
+ self.ww_SmoothT[NORIENT] = self.backend.bk_constant(
309
326
  www.reshape(KERNELSZ, KERNELSZ, NORIENT, NORIENT)
310
327
  )
311
- self.ww_RealT[1] = self.backend.constant(
328
+ self.ww_RealT[1] = self.backend.bk_constant(
312
329
  self.backend.bk_reshape(
313
330
  wwc.astype(self.all_type), [KERNELSZ, KERNELSZ, 1, NORIENT]
314
331
  )
315
332
  )
316
- self.ww_ImagT[1] = self.backend.constant(
333
+ self.ww_ImagT[1] = self.backend.bk_constant(
317
334
  self.backend.bk_reshape(
318
335
  wws.astype(self.all_type), [KERNELSZ, KERNELSZ, 1, NORIENT]
319
336
  )
@@ -330,10 +347,10 @@ class FoCUS:
330
347
  )
331
348
  return y
332
349
 
333
- self.ww_RealT[NORIENT] = self.backend.constant(
350
+ self.ww_RealT[NORIENT] = self.backend.bk_constant(
334
351
  doorientw(wwc.astype(self.all_type))
335
352
  )
336
- self.ww_ImagT[NORIENT] = self.backend.constant(
353
+ self.ww_ImagT[NORIENT] = self.backend.bk_constant(
337
354
  doorientw(wws.astype(self.all_type))
338
355
  )
339
356
  self.pix_interp_val = {}
@@ -706,7 +723,7 @@ class FoCUS:
706
723
  def ud_grade(self, im, j, axis=0):
707
724
  rim = im
708
725
  for k in range(j):
709
- rim = self.smooth(rim, axis=axis)
726
+ #rim = self.smooth(rim, axis=axis)
710
727
  rim = self.ud_grade_2(rim, axis=axis)
711
728
  return rim
712
729
 
@@ -962,8 +979,8 @@ class FoCUS:
962
979
 
963
980
  self.pix_interp_val[lout][nout] = 1
964
981
  self.weight_interp_val[lout][nout] = self.backend.bk_SparseTensor(
965
- self.backend.constant(indice),
966
- self.backend.constant(self.backend.bk_cast(w.flatten())),
982
+ self.backend.bk_constant(indice),
983
+ self.backend.bk_constant(self.backend.bk_cast(w.flatten())),
967
984
  dense_shape=[12 * nout**2, 12 * lout**2],
968
985
  )
969
986
 
@@ -1238,8 +1255,8 @@ class FoCUS:
1238
1255
  wr = np.repeat(wr, odata, 2)
1239
1256
  wi = np.repeat(wi, odata, 2)
1240
1257
 
1241
- wr = self.backend.bk_cast(self.backend.constant(wr))
1242
- wi = self.backend.bk_cast(self.backend.constant(wi))
1258
+ wr = self.backend.bk_cast(self.backend.bk_constant(wr))
1259
+ wi = self.backend.bk_cast(self.backend.bk_constant(wi))
1243
1260
 
1244
1261
  tim = self.backend.bk_reshape(self.backend.bk_cast(im), [ndata, npix, odata])
1245
1262
 
@@ -1291,7 +1308,7 @@ class FoCUS:
1291
1308
  if odata > 1:
1292
1309
  w = np.repeat(w, odata, 2)
1293
1310
 
1294
- w = self.backend.bk_cast(self.backend.constant(w))
1311
+ w = self.backend.bk_cast(self.backend.bk_constant(w))
1295
1312
 
1296
1313
  tim = self.backend.bk_reshape(self.backend.bk_cast(im), [ndata, npix, odata])
1297
1314
 
@@ -1676,18 +1693,18 @@ class FoCUS:
1676
1693
  )
1677
1694
 
1678
1695
  wr = self.backend.bk_SparseTensor(
1679
- self.backend.constant(tmp),
1680
- self.backend.constant(self.backend.bk_cast(wr)),
1696
+ self.backend.bk_constant(tmp),
1697
+ self.backend.bk_constant(self.backend.bk_cast(wr)),
1681
1698
  dense_shape=[12 * nside**2 * self.NORIENT, 12 * nside**2],
1682
1699
  )
1683
1700
  wi = self.backend.bk_SparseTensor(
1684
- self.backend.constant(tmp),
1685
- self.backend.constant(self.backend.bk_cast(wi)),
1701
+ self.backend.bk_constant(tmp),
1702
+ self.backend.bk_constant(self.backend.bk_cast(wi)),
1686
1703
  dense_shape=[12 * nside**2 * self.NORIENT, 12 * nside**2],
1687
1704
  )
1688
1705
  ws = self.backend.bk_SparseTensor(
1689
- self.backend.constant(tmp2),
1690
- self.backend.constant(self.backend.bk_cast(ws)),
1706
+ self.backend.bk_constant(tmp2),
1707
+ self.backend.bk_constant(self.backend.bk_cast(ws)),
1691
1708
  dense_shape=[12 * nside**2, 12 * nside**2],
1692
1709
  )
1693
1710
 
@@ -2281,7 +2298,7 @@ class FoCUS:
2281
2298
  else:
2282
2299
  wr, wi, ws, widx = self.InitWave(self, nside)
2283
2300
 
2284
- self.Idx_Neighbours[nside] = 1 # self.backend.constant(tmp)
2301
+ self.Idx_Neighbours[nside] = 1 # self.backend.bk_constant(tmp)
2285
2302
  self.ww_Real[nside] = wr
2286
2303
  self.ww_Imag[nside] = wi
2287
2304
  self.w_smooth[nside] = ws