foscat 3.3.4__py3-none-any.whl → 3.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
foscat/CircSpline.py CHANGED
@@ -61,13 +61,57 @@ class CircSpline:
61
61
  coefficients[0] = self.cubic_spline_function(0.5 - fractional_part / 2) / 2
62
62
 
63
63
  # Assign indices for the support points
64
- indices[3] = (base_idx + 3)%N
65
- indices[2] = (base_idx + 2)%N
66
- indices[1] = (base_idx + 1)%N
67
- indices[0] = base_idx
64
+ indices[3] = (base_idx + 2+N)%N
65
+ indices[2] = (base_idx + 1+N)%N
66
+ indices[1] = (base_idx + N )%N
67
+ indices[0] = (base_idx + N-1)%N
68
+
69
+ # Square coefficients and normalize
70
+ coefficients = coefficients * coefficients
71
+ coefficients /= np.sum(coefficients, axis=0)
72
+
73
+ return indices, coefficients
74
+
75
+
76
+ def eval_N(self,x,N):
77
+ """
78
+ Compute a 3rd-degree cubic spline with 4-point support.
79
+
80
+ Args:
81
+ x (float or array): Input value(s) to compute the spline.
82
+
83
+ Returns:
84
+ indices (array): Indices of the spline support points.
85
+ coefficients (array): Normalized spline coefficients.
86
+ """
87
+
88
+ if isinstance(x, float):
89
+ # Single scalar input
90
+ base_idx = int(x * (N))
91
+ indices = np.zeros([4], dtype="int")
92
+ coefficients = np.zeros([4])
93
+ else:
94
+ # Array input
95
+ base_idx = (x * (N)).astype("int")
96
+ indices = np.zeros([4, x.shape[0]], dtype="int")
97
+ coefficients = np.zeros([4, x.shape[0]])
98
+
99
+ # Compute the fractional part of the input
100
+ fractional_part = x * (N) - base_idx
101
+
102
+ # Compute spline coefficients for 4 support points
103
+ coefficients[3] = self.cubic_spline_function(fractional_part / 2) / 2
104
+ coefficients[2] = self.cubic_spline_function(0.5 + fractional_part / 2) / 2
105
+ coefficients[1] = self.cubic_spline_function(1 - fractional_part / 2) / 2
106
+ coefficients[0] = self.cubic_spline_function(0.5 - fractional_part / 2) / 2
107
+
108
+ # Assign indices for the support points
109
+ indices[3] = (base_idx + 2+N)%N
110
+ indices[2] = (base_idx + 1+N)%N
111
+ indices[1] = (base_idx + N )%N
112
+ indices[0] =( base_idx + N-1)%N
68
113
 
69
114
  # Adjust indices to start from 0
70
- indices = indices - 1
71
115
  # Square coefficients and normalize
72
116
  coefficients = coefficients * coefficients
73
117
  coefficients /= np.sum(coefficients, axis=0)
foscat/FoCUS.py CHANGED
@@ -38,7 +38,7 @@ class FoCUS:
38
38
  mpi_rank=0,
39
39
  ):
40
40
 
41
- self.__version__ = "3.3.0"
41
+ self.__version__ = "3.3.6"
42
42
  # P00 coeff for normalization for scat_cov
43
43
  self.TMPFILE_VERSION = TMPFILE_VERSION
44
44
  self.P1_dic = None
foscat/alm.py CHANGED
@@ -3,23 +3,73 @@ import numpy as np
3
3
 
4
4
  class alm():
5
5
 
6
- def __init__(self,backend=None,lmax=24,nside=None,limit_range=1E7):
6
+ def __init__(self,backend=None,lmax=24,nside=None,limit_range=1E10):
7
7
  self._logtab={}
8
+ self.lth={}
8
9
  if nside is not None:
9
10
  self.lmax=3*nside
11
+ th,ph=hp.pix2ang(nside,np.arange(12*nside*nside))
12
+
13
+ lth=np.unique(th)
14
+
15
+ self.lth[nside]=lth
10
16
  else:
11
17
  self.lmax=lmax
18
+
12
19
  for k in range(1,2*self.lmax+1):
13
- self._logtab[k]=np.log(k)
20
+ self._logtab[k]=np.log(k)
21
+ self._logtab[0]=0.0
14
22
  self._limit_range=1/limit_range
15
23
  self._log_limit_range=np.log(limit_range)
24
+
16
25
  if backend is None:
17
26
  import foscat.scat_cov as sc
18
27
  self.sc=sc.funct()
19
28
  self.backend=self.sc.backend
20
29
  else:
21
30
  self.backend=backend.backend
22
-
31
+
32
+ self.Yp={}
33
+ self.Ym={}
34
+
35
+ def ring_th(self,nside):
36
+ if nside not in self.lth:
37
+ th,ph=hp.pix2ang(nside,np.arange(12*nside*nside))
38
+
39
+ lth=np.unique(th)
40
+
41
+ self.lth[nside]=lth
42
+ return self.lth[nside]
43
+
44
+
45
+ def init_Ys(self,s,nside):
46
+
47
+ if (s,nside) not in self.Yp:
48
+ import quaternionic
49
+ import spherical
50
+
51
+ ell_max = 3*nside-1 # Use the largest ℓ value you expect to need
52
+ wigner = spherical.Wigner(ell_max)
53
+
54
+ th,ph=hp.pix2ang(nside,np.arange(12*nside*nside))
55
+
56
+ lth=self.ring_th(nside)
57
+
58
+ R = quaternionic.array.from_spherical_coordinates(lth, 0*lth)
59
+ self.Yp[s,nside] = {}
60
+ self.Ym[s,nside] = {}
61
+ iplus = (wigner.sYlm( s, R)*(4*np.pi/(12*nside**2))).T.real
62
+ imoins = (wigner.sYlm(-s, R)*(4*np.pi/(12*nside**2))).T.real
63
+
64
+ for m in range(ell_max+1):
65
+ idx=np.array([wigner.Yindex(k, m) for k in range(m,ell_max+1)])
66
+ self.Yp[s,nside][m] = iplus[idx]
67
+ self.Ym[s,nside][m] = imoins[idx]
68
+
69
+ del(iplus)
70
+ del(imoins)
71
+ del(wigner)
72
+
23
73
  def log(self,v):
24
74
  #return np.log(v)
25
75
  if isinstance(v,np.ndarray):
@@ -41,18 +91,21 @@ class alm():
41
91
 
42
92
  # Calcul des P_{lm}(x) pour tout l inclus dans [m,lmax]
43
93
  def compute_legendre_m(self,x,m,lmax):
94
+ result=np.zeros([lmax-m+1,x.shape[0]])
95
+ ratio=np.zeros([lmax-m+1,1])
96
+
97
+ ratio[0,0] = self.double_factorial_log(2*m - 1)-0.5*np.sum(self.log(1+np.arange(2*m)))
98
+
44
99
  # Étape 1 : Calcul de P_{mm}(x)
45
100
  if m == 0:
46
101
  Pmm = 1.0
47
102
  else:
48
- Pmm = (-1)**m * (1 - x**2)**(m/2)
103
+ #Pmm = (-1)**m * (1 - x**2)**(m/2)
104
+ Pmm = (0.5-m%2)*2 * (1 - x**2)**(m/2)
49
105
 
50
- result=np.zeros([lmax-m+1,x.shape[0]])
51
- ratio=np.zeros([lmax-m+1,1])
52
106
 
53
107
  # Si l == m, c'est directement P_{mm}
54
- result[0]=Pmm
55
- ratio[0,0]= self.double_factorial_log(2*m - 1)-0.5*np.sum(self.log(1+np.arange(2*m)))
108
+ result[0] = Pmm
56
109
 
57
110
  if m == lmax:
58
111
  return result*np.exp(ratio)*np.sqrt(4*np.pi*(2*(np.arange(lmax-m+1)+m)+1)).reshape(lmax+1-m,1)
@@ -60,19 +113,86 @@ class alm():
60
113
  # Étape 2 : Calcul de P_{l+1, m}(x)
61
114
  result[1] = x * (2*m + 1) * result[0]
62
115
 
63
- ratio[1,0]=ratio[0,0]-0.5*self.log(2*m+1)
116
+ ratio[1,0] = ratio[0,0]-0.5*self.log(2*m+1)
64
117
 
65
118
  # Étape 3 : Récurence pour l > m + 1
66
119
  for l in range(m + 2, lmax+1):
67
- result[l-m] = ((2*l - 1) * x * result[l-m-1] - (l + m - 1) * result[l-m-2]) / (l - m)
120
+ result[l-m] = ((2*l - 1) * x * result[l-m-1] - (l + m - 1) * result[l-m-2]) / (l - m)
68
121
  ratio[l-m,0] = 0.5*self.log(l-m)-0.5*self.log(l+m)+ratio[l-m-1,0]
69
122
  if np.max(abs(result[l-m]))>self._limit_range:
70
- result[l-m-1]*=self._limit_range
71
- result[l-m]*=self._limit_range
72
- ratio[l-m-1,0]+=self._log_limit_range
73
- ratio[l-m,0]+=self._log_limit_range
123
+ result[l-m-1]*= self._limit_range
124
+ result[l-m]*= self._limit_range
125
+ ratio[l-m-1,0]+= self._log_limit_range
126
+ ratio[l-m,0]+= self._log_limit_range
74
127
 
75
128
  return result*np.exp(ratio)*(np.sqrt(4*np.pi*(2*(np.arange(lmax-m+1)+m)+1))).reshape(lmax+1-m,1)
129
+
130
+
131
+ # Calcul des s_P_{lm}(x) pour tout l inclus dans [m,lmax]
132
+ def compute_legendre_spin2_m(self,co_th,si_th,m,lmax):
133
+ result=np.zeros([lmax-m+2,co_th.shape[0]])
134
+ ratio =np.zeros([lmax-m+2,1])
135
+
136
+ ratio[1,0] = self.double_factorial_log(2*m - 1)-0.5*np.sum(self.log(1+np.arange(2*m)))
137
+ # Étape 1 : Calcul de P_{mm}(x)
138
+ if m == 0:
139
+ Pmm = 1.0
140
+ else:
141
+ #Pmm = (-1)**m * (1 - x**2)**(m/2)
142
+ Pmm = (0.5-m%2)*2 * (1 - co_th**2)**(m/2)
143
+
144
+
145
+ # Si l == m, c'est directement P_{mm}
146
+ result[1] = Pmm
147
+
148
+ if m == lmax:
149
+ ylm=result*np.exp(ratio)
150
+ ylm[1:]*=(np.sqrt(4*np.pi*(2*(np.arange(lmax-m+1)+m)+1))).reshape(lmax+1-m,1)
151
+
152
+ else:
153
+ # Étape 2 : Calcul de P_{l+1, m}(x)
154
+ result[2] = co_th * (2*m + 1) * result[0]
155
+
156
+ ratio[2,0] = ratio[1,0]-self.log(2*m+1)/2
157
+
158
+ # Étape 3 : Récurence pour l > m + 1
159
+ for l in range(m + 2, lmax+1):
160
+ result[l-m+1] = ((2*l - 1) * co_th * result[l-m] - (l + m - 1) * result[l-m-1]) / (l - m)
161
+ ratio[l-m+1,0] = (self.log(l-m)-self.log(l+m))/2+ratio[l-m,0]
162
+ if np.max(abs(result[l-m+1]))>self._limit_range:
163
+ result[l-m]*= self._limit_range
164
+ result[l-m+1]*= self._limit_range
165
+ ratio[l-m,0]+= self._log_limit_range
166
+ ratio[l-m+1,0]+= self._log_limit_range
167
+
168
+ ylm=result*np.exp(ratio)
169
+ ylm[1:]*=(np.sqrt(4*np.pi*(2*(np.arange(lmax-m+1)+m)+1))).reshape(lmax+1-m,1)
170
+
171
+ ell=(np.arange(lmax+1-m)+m).reshape(lmax+1-m,1)
172
+
173
+ cot_th=co_th/si_th
174
+ si2_th=si_th*si_th
175
+
176
+ a = (2*m**2-ell*(ell+1))/(si2_th.reshape(1,si2_th.shape[0]))+ell*(ell-1)*cot_th*cot_th
177
+ b = 2*m*(ell-1)*cot_th/si_th
178
+ w=np.zeros([lmax+1-m,1])
179
+ l=ell[ell>1]
180
+ w[ell>1]=np.sqrt(1/((l+2)*(l+1)*(l)*(l-1)))
181
+ w=w.reshape(lmax+1-m,1)
182
+
183
+ alpha_plus=w*(a+b)
184
+ alpha_moins=w*(a-b)
185
+
186
+ a=2*np.sqrt((2*ell+1)/(2*ell-1)*(ell*ell-m*m))
187
+ b=m/si2_th
188
+
189
+ beta_plus=w*a*(cot_th/si_th+b)
190
+ beta_moins=w*a*(cot_th/si_th-b)
191
+
192
+ ylm_plus = alpha_plus*ylm[1:]+ beta_plus*ylm[:-1]
193
+ ylm_moins = alpha_moins*ylm[1:] + beta_moins*ylm[:-1]
194
+
195
+ return ylm_plus,ylm_moins
76
196
 
77
197
  def comp_tf(self,im,ph):
78
198
  nside=int(np.sqrt(im.shape[0]//12))
@@ -81,44 +201,115 @@ class alm():
81
201
  ft_im=[]
82
202
  for k in range(nside-1):
83
203
  N=4*(k+1)
84
- ft_im.append(self.backend.bk_fft(im[n:n+N])[:N//2+1]*np.exp(-1J*np.arange(N//2+1)/N*ph[n]))
85
- ft_im.append(self.backend.bk_zeros((3*nside-N//2-1),dtype=self.backend.all_cbk_type))
204
+ l_n=N
205
+ if l_n>3*nside:
206
+ l_n=3*nside
207
+ tmp=self.backend.bk_fft(im[n:n+N])[0:l_n]
208
+ ft_im.append(tmp*np.exp(-1J*np.arange(l_n)*ph[n]))
209
+ ft_im.append(self.backend.bk_zeros((3*nside-l_n),dtype=self.backend.all_cbk_type))
210
+ # if N<3*nside fill the tf with rotational values to mimic alm_tools.F90 of healpix (Minor effect)
211
+ #for m in range(l_n,3*nside,l_n):
212
+ # ft_im.append(tmp[0:np.min([3*nside-m,l_n])])
86
213
  n+=N
87
214
  ii+=1
88
215
  for k in range(2*nside+1):
89
216
  N=4*nside
90
- ft_im.append(self.backend.bk_fft(im[n:n+N])[:N//2+1]*np.exp(-1J*np.arange(N//2+1)/N*ph[n]))
91
- ft_im.append(self.backend.bk_zeros((3*nside-N//2-1),dtype=self.backend.all_cbk_type))
217
+ ft_im.append(self.backend.bk_fft(im[n:n+N])[:3*nside]*np.exp(-1J*np.arange(3*nside)*ph[n]))
92
218
  n+=N
93
219
  ii+=1
94
220
  for k in range(nside-1):
95
221
  N=4*(nside-1-k)
96
- ft_im.append(self.backend.bk_fft(im[n:n+N])[:N//2+1]*np.exp(-1J*np.arange(N//2+1)/N*ph[n]))
97
- ft_im.append(self.backend.bk_zeros((3*nside-N//2-1),dtype=self.backend.all_cbk_type))
222
+ l_n=N
223
+ if l_n>3*nside:
224
+ l_n=3*nside
225
+ tmp=self.backend.bk_fft(im[n:n+N])[0:l_n]
226
+ ft_im.append(tmp*np.exp(-1J*np.arange(l_n)*ph[n]))
227
+ ft_im.append(self.backend.bk_zeros((3*nside-l_n),dtype=self.backend.all_cbk_type))
228
+ # if N<3*nside fill the tf with rotational values to mimic alm_tools.F90 of healpix (Minor effect)
229
+ #for m in range(l_n,3*nside,l_n):
230
+ # ft_im.append(tmp[0:np.min([3*nside-m,l_n])])
98
231
  n+=N
99
232
  ii+=1
100
233
  return self.backend.bk_reshape(self.backend.bk_concat(ft_im,axis=0),[4*nside-1,3*nside])
101
234
 
102
- def anafast(self,im,map2=None,nest=True):
103
- nside=int(np.sqrt(im.shape[0]//12))
235
+ def anafast(self,im,map2=None,nest=False):
236
+ """The `anafast` function computes the L1 and L2 norm power spectra.
237
+
238
+ Currently, it is not optimized for single-pass computation due to the relatively inefficient computation of \(Y_{lm}\).
239
+ Nonetheless, it utilizes TensorFlow and can be integrated into gradient computations.
240
+
241
+ Input:
242
+ - `im`: a vector of size \([12 \times \text{Nside}^2]\) for scalar data, or of size \([3, 12 \times \text{Nside}^2]\) for polar data.
243
+ - `map2` (optional): a vector of size \([12 \times \text{Nside}^2]\) for scalar data, or of size
244
+ \([3, 12 \times \text{Nside}^2]\) for polar data. If provided, cross power spectra will be computed.
245
+ - `nest=True`: alters the ordering of the input maps.
246
+
247
+ Output:
248
+ -A tensor of size \([l_{\text{max}} \times (l_{\text{max}}-1)]\) formatted as \([6, \ldots]\),
249
+ ordered as TT, EE, BB, TE, EB.TBanafast function computes L1 and L2 norm powerspctra.
250
+
251
+ """
252
+ if len(im.shape)==1: # nopol
253
+ nside=int(np.sqrt(im.shape[0]//12))
254
+ else:
255
+ nside=int(np.sqrt(im.shape[1]//12))
104
256
  th,ph=hp.pix2ang(nside,np.arange(12*nside*nside))
105
257
  if nest:
106
258
  idx=hp.ring2nest(nside,np.arange(12*nside**2))
107
- ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im,idx),0*im),ph)
108
- if map2 is not None:
109
- ft_im2=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(map2,idx),0*im),ph)
259
+ if len(im.shape)==1: # nopol
260
+ ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im,idx),0*im),ph)
261
+ if map2 is not None:
262
+ ft_im2=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(map2,idx),0*im),ph)
263
+ else:
264
+ ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im[0],idx),0*im[0]),ph)
265
+ if map2 is not None:
266
+ ft_im2=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(map2[0],idx),0*im[0]),ph)
110
267
  else:
111
- ft_im=self.comp_tf(self.backend.bk_complex(im,0*im),ph)
112
- if map2 is not None:
113
- ft_im2=self.comp_tf(self.backend.bk_complex(map2,0*im),ph)
114
-
115
- co_th=np.cos(np.unique(th))
268
+ if len(im.shape)==1: # nopol
269
+ ft_im=self.comp_tf(self.backend.bk_complex(im,0*im),ph)
270
+ if map2 is not None:
271
+ ft_im2=self.comp_tf(self.backend.bk_complex(map2,0*im),ph)
272
+ else:
273
+ ft_im=self.comp_tf(self.backend.bk_complex(im[0],0*im[0]),ph)
274
+ if map2 is not None:
275
+ ft_im2=self.comp_tf(self.backend.bk_complex(map2[0],0*im[0]),ph)
276
+
277
+ lth=self.ring_th(nside)
116
278
 
279
+ co_th=np.cos(lth)
280
+
117
281
  lmax=3*nside-1
118
-
282
+
119
283
  cl2=None
120
284
  cl2_L1=None
285
+
286
+
287
+ if len(im.shape)==2: # nopol
288
+
289
+ spin=2
290
+
291
+ self.init_Ys(spin,nside)
292
+
293
+ if nest:
294
+ idx=hp.ring2nest(nside,np.arange(12*nside**2))
295
+ l_Q=self.backend.bk_gather(im[1],idx)
296
+ l_U=self.backend.bk_gather(im[2],idx)
297
+ ft_im_Pp=self.comp_tf(self.backend.bk_complex(l_Q,l_U),ph)
298
+ ft_im_Pm=self.comp_tf(self.backend.bk_complex(l_Q,-l_U),ph)
299
+ if map2 is not None:
300
+ l_Q=self.backend.bk_gather(map2[1],idx)
301
+ l_U=self.backend.bk_gather(map2[2],idx)
302
+ ft_im2_Pp=self.comp_tf(self.backend.bk_complex(l_Q,l_U),ph)
303
+ ft_im2_Pm=self.comp_tf(self.backend.bk_complex(l_Q,-l_U),ph)
304
+ else:
305
+ ft_im_Pp=self.comp_tf(self.backend.bk_complex(im[1],im[2]),ph)
306
+ ft_im_Pm=self.comp_tf(self.backend.bk_complex(im[1],-im[2]),ph)
307
+ if map2 is not None:
308
+ ft_im2_Pp=self.comp_tf(self.backend.bk_complex(map2[1],map2[2]),ph)
309
+ ft_im2_Pm=self.comp_tf(self.backend.bk_complex(map2[1],-map2[2]),ph)
310
+
121
311
  for m in range(lmax+1):
312
+
122
313
  plm=self.compute_legendre_m(co_th,m,3*nside-1)/(12*nside**2)
123
314
 
124
315
  tmp=self.backend.bk_reduce_sum(plm*ft_im[:,m],1)
@@ -128,16 +319,142 @@ class alm():
128
319
  else:
129
320
  tmp2=tmp
130
321
 
131
- tmp=self.backend.bk_real((tmp*self.backend.bk_conjugate(tmp2)))
322
+ if len(im.shape)==2: # pol
323
+ plmp=self.Yp[spin,nside][m]
324
+ plmm=self.Ym[spin,nside][m]
325
+
326
+ tmpp=self.backend.bk_reduce_sum(plmp*ft_im_Pp[:,m],1)
327
+ tmpm=self.backend.bk_reduce_sum(plmm*ft_im_Pm[:,m],1)
328
+
329
+ almE=-(tmpp+tmpm)/2.0
330
+ almB=(tmpp-tmpm)/(2J)
331
+
332
+ if map2 is not None:
333
+ tmpp2=self.backend.bk_reduce_sum(plmp*ft_im2_Pp[:,m],1)
334
+ tmpm2=self.backend.bk_reduce_sum(plmm*ft_im2_Pm[:,m],1)
335
+
336
+ almE2=-(tmpp2+tmpm2)/2.0
337
+ almB2=(tmpp2-tmpm2)/(2J)
338
+ else:
339
+ almE2=almE
340
+ almB2=almB
341
+
342
+ tmpTT=self.backend.bk_real((tmp*self.backend.bk_conjugate(tmp2)))
343
+ tmpEE=self.backend.bk_real((almE*self.backend.bk_conjugate(almE2)))
344
+ tmpBB=self.backend.bk_real((almB*self.backend.bk_conjugate(almB2)))
345
+ tmpTE=self.backend.bk_real((tmp*self.backend.bk_conjugate(almE2)))
346
+ tmpTB=-self.backend.bk_real((tmp*self.backend.bk_conjugate(almB2)))
347
+ tmpEB=-self.backend.bk_real((almE*self.backend.bk_conjugate(almB2)))
348
+
349
+ if map2 is not None:
350
+ tmpTE=(tmpTE+self.backend.bk_real((tmp2*self.backend.bk_conjugate(almE))))/2
351
+ tmpTB=(tmpTB-self.backend.bk_real((tmp2*self.backend.bk_conjugate(almB))))/2
352
+ tmpEB=(tmpEB-self.backend.bk_real((almE2*self.backend.bk_conjugate(almB))))/2
353
+
354
+
355
+ if m==0:
356
+ l_cl=self.backend.bk_concat([tmpTT,tmpEE,tmpBB,tmpTE,tmpEB,tmpTB],0)
357
+ else:
358
+ offset_tensor=self.backend.bk_zeros((m),dtype=self.backend.all_bk_type)
359
+ l_cl=self.backend.bk_concat([self.backend.bk_concat([offset_tensor,tmpTT],axis=0),
360
+ self.backend.bk_concat([offset_tensor,tmpEE],axis=0),
361
+ self.backend.bk_concat([offset_tensor,tmpBB],axis=0),
362
+ self.backend.bk_concat([offset_tensor,tmpTE],axis=0),
363
+ self.backend.bk_concat([offset_tensor,tmpEB],axis=0),
364
+ self.backend.bk_concat([offset_tensor,tmpTB],axis=0)],axis=0)
365
+
366
+ l_cl=self.backend.bk_reshape(l_cl,[6,lmax+1])
367
+ else:
368
+ tmp=self.backend.bk_real((tmp*self.backend.bk_conjugate(tmp2)))
369
+ if m==0:
370
+ l_cl=tmp
371
+ else:
372
+ offset_tensor=self.backend.bk_zeros((m),dtype=self.backend.all_bk_type)
373
+ l_cl=self.backend.bk_concat([offset_tensor,tmp],axis=0)
374
+
132
375
  if cl2 is None:
133
- cl2=tmp
134
- cl2_l1=self.backend.bk_L1(tmp)
376
+ cl2=l_cl
377
+ cl2_l1=self.backend.bk_L1(l_cl)
135
378
  else:
136
- tmp=self.backend.bk_concat([self.backend.bk_zeros((m),dtype=self.backend.all_bk_type),tmp],axis=0)
137
- cl2+=2*tmp
138
- cl2_l1+=2*self.backend.bk_L1(tmp)
139
- cl2=cl2*(1+np.clip((np.arange(cl2.shape[0])-2*nside)/(3*nside),0,1))/(2*np.arange(cl2.shape[0])+1)* \
140
- (1+np.clip((np.arange(cl2.shape[0])-2.4*nside)/(2.5*nside),0,1))
141
- cl2_l1=cl2_l1*(1+np.clip((np.arange(cl2.shape[0])-2*nside)/(3*nside),0,1))/(2*np.arange(cl2.shape[0])+1)* \
142
- (1+np.clip((np.arange(cl2.shape[0])-2.4*nside)/(2.5*nside),0,1))
379
+ cl2+=2*l_cl
380
+ cl2_l1+=2*self.backend.bk_L1(l_cl)
381
+
382
+ if len(im.shape)==1: # nopol
383
+ cl2=cl2/(2*np.arange(cl2.shape[0])+1)
384
+ cl2_l1=cl2_l1/(2*np.arange(cl2.shape[0])+1)
385
+ else:
386
+ cl2=cl2/np.expand_dims(2*np.arange(cl2.shape[1])+1,0)
387
+ cl2_l1=cl2_l1/np.expand_dims(2*np.arange(cl2.shape[1])+1,0)
143
388
  return cl2,cl2_l1
389
+
390
+ def map2alm(self,im,nest=False):
391
+ nside=int(np.sqrt(im.shape[0]//12))
392
+ th,ph=hp.pix2ang(nside,np.arange(12*nside*nside))
393
+ if nest:
394
+ idx=hp.ring2nest(nside,np.arange(12*nside**2))
395
+ ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im,idx),0*im),ph)
396
+ else:
397
+ ft_im=self.comp_tf(self.backend.bk_complex(im,0*im),ph)
398
+
399
+ co_th=np.cos(self.ring_th(nside))
400
+
401
+ lmax=3*nside-1
402
+
403
+ alm=None
404
+ for m in range(lmax+1):
405
+ plm=self.compute_legendre_m(co_th,m,3*nside-1)/(12*nside**2)
406
+
407
+ tmp=self.backend.bk_reduce_sum(plm*ft_im[:,m],1)
408
+ if m==0:
409
+ alm=tmp
410
+ else:
411
+ alm=self.backend.bk_concat([alm,tmp],axis=0)
412
+
413
+ return alm
414
+
415
+ def map2alm_spin(self,im_Q,im_U,spin=2,nest=False):
416
+
417
+ if spin==0:
418
+ return self.map2alm(im_Q,nest=nest),self.map2alm(im_U,nest=nest)
419
+
420
+
421
+ nside=int(np.sqrt(im_Q.shape[0]//12))
422
+ th,ph=hp.pix2ang(nside,np.arange(12*nside*nside))
423
+
424
+ self.init_Ys(spin,nside)
425
+
426
+ if nest:
427
+ idx=hp.ring2nest(nside,np.arange(12*nside**2))
428
+ l_Q=self.backend.bk_gather(im_Q,idx)
429
+ l_U=self.backend.bk_gather(im_U,idx)
430
+ ft_im_1=self.comp_tf(self.backend.bk_complex(l_Q,l_U),ph)
431
+ ft_im_2=self.comp_tf(self.backend.bk_complex(l_Q,-l_U),ph)
432
+ else:
433
+ ft_im_1=self.comp_tf(self.backend.bk_complex(im_Q,im_U),ph)
434
+ ft_im_2=self.comp_tf(self.backend.bk_complex(im_Q,-im_U),ph)
435
+
436
+ #co_th=np.cos(self.ring_th[nside])
437
+ #si_th=np.sin(self.ring_th[nside])
438
+
439
+ lmax=3*nside-1
440
+
441
+ alm=None
442
+ for m in range(lmax+1):
443
+ #not yet debug use spherical
444
+ #plmp1,plmm1=self.compute_legendre_spin2_m(co_th,si_th,m,3*nside-1)
445
+ #plmp1/=(12*nside**2)
446
+ #plmm1/=(12*nside**2)
447
+
448
+ plmp=self.Yp[spin,nside][m]
449
+ plmm=self.Ym[spin,nside][m]
450
+
451
+ tmpp=self.backend.bk_reduce_sum(plmp*ft_im_1[:,m],1)
452
+ tmpm=self.backend.bk_reduce_sum(plmm*ft_im_2[:,m],1)
453
+ if m==0:
454
+ almE=-(tmpp+tmpm)/2.0
455
+ almB=(tmpp-tmpm)/(2J)
456
+ else:
457
+ almE=self.backend.bk_concat([almE,-(tmpp+tmpm)/2],axis=0)
458
+ almB=self.backend.bk_concat([almB,(tmpp-tmpm)/(2J)],axis=0)
459
+
460
+ return almE,almB
foscat/alm_tools.py ADDED
@@ -0,0 +1,192 @@
1
+ import numpy as np
2
+
3
+ #====================================================================================================================
4
+ # This class is an automatic traduction of the fortran healpix software
5
+ #====================================================================================================================
6
+
7
+
8
+ class alm_tools():
9
+ def __init__(self):
10
+ pass
11
+
12
+ @staticmethod
13
+ def gen_recfac(l_max, m):
14
+ """
15
+ Generate recursion factors used to compute the Ylm of degree m for all l in m <= l <= l_max.
16
+
17
+ Parameters:
18
+ l_max (int): Maximum degree l.
19
+ m (int): Degree m.
20
+
21
+ Returns:
22
+ np.ndarray: Recursion factors as a 2D array of shape (2, l_max + 1).
23
+ """
24
+ recfac = np.zeros((2, l_max + 1), dtype=np.float64)
25
+ fm2 = float(m)**2
26
+
27
+ for l in range(m, l_max + 1):
28
+ fl2 = float(l + 1)**2
29
+ recfac[0, l] = np.sqrt((4.0 * fl2 - 1.0) / (fl2 - fm2))
30
+
31
+ recfac[1, m:l_max + 1] = 1.0 / recfac[0, m:l_max + 1]
32
+
33
+ return recfac
34
+
35
+ @staticmethod
36
+ def gen_recfac_spin(l_max, m, spin):
37
+ """
38
+ Generate recursion factors for spin-weighted spherical harmonics.
39
+
40
+ Parameters:
41
+ l_max (int): Maximum degree l.
42
+ m (int): Degree m.
43
+ spin (int): Spin weight.
44
+
45
+ Returns:
46
+ np.ndarray: Recursion factors as a 2D array of shape (2, l_max + 1).
47
+ """
48
+ recfac_spin = np.zeros((2, l_max + 1), dtype=np.float64)
49
+ fm2 = float(m)**2
50
+ s2 = float(spin)**2
51
+
52
+ for l in range(m, l_max + 1):
53
+ fl2 = float(l + 1)**2
54
+ recfac_spin[0, l] = np.sqrt((4.0 * fl2 - 1.0) / (fl2 - fm2))
55
+
56
+ recfac_spin[1, m:l_max + 1] = (1.0 - s2 / (float(m) + 1.0)**2) / recfac_spin[0, m:l_max + 1]
57
+
58
+ return recfac_spin
59
+
60
+ @staticmethod
61
+ def gen_lamfac(l_max):
62
+ """
63
+ Generate lambda factors for spherical harmonics.
64
+
65
+ Parameters:
66
+ l_max (int): Maximum degree l.
67
+
68
+ Returns:
69
+ np.ndarray: Lambda factors as a 1D array of size l_max + 1.
70
+ """
71
+ lamfac = np.zeros(l_max + 1, dtype=np.float64)
72
+
73
+ for l in range(1, l_max + 1):
74
+ lamfac[l] = np.sqrt(2.0 * l + 1.0)
75
+
76
+ return lamfac
77
+
78
+ @staticmethod
79
+ def gen_lamfac_der(l_max):
80
+ """
81
+ Generate the derivatives of lambda factors.
82
+
83
+ Parameters:
84
+ l_max (int): Maximum degree l.
85
+
86
+ Returns:
87
+ np.ndarray: Lambda factor derivatives as a 1D array of size l_max + 1.
88
+ """
89
+ lamfac_der = np.zeros(l_max + 1, dtype=np.float64)
90
+
91
+ for l in range(1, l_max + 1):
92
+ lamfac_der[l] = (2.0 * l + 1.0) / np.sqrt(2.0 * l + 1.0)
93
+
94
+ return lamfac_der
95
+
96
+ @staticmethod
97
+ def gen_mfac(m_max):
98
+ """
99
+ Generate m factors for spherical harmonics.
100
+
101
+ Parameters:
102
+ m_max (int): Maximum degree m.
103
+
104
+ Returns:
105
+ np.ndarray: M factors as a 1D array of size m_max + 1.
106
+ """
107
+ mfac = np.zeros(m_max + 1, dtype=np.float64)
108
+
109
+ for m in range(1, m_max + 1):
110
+ mfac[m] = np.sqrt(2.0 * m)
111
+
112
+ return mfac
113
+
114
+ @staticmethod
115
+ def gen_mfac_spin(m_max, spin):
116
+ """
117
+ Generate m factors for spin-weighted spherical harmonics.
118
+
119
+ Parameters:
120
+ m_max (int): Maximum degree m.
121
+ spin (int): Spin weight.
122
+
123
+ Returns:
124
+ np.ndarray: Spin-weighted m factors as a 1D array of size m_max + 1.
125
+ """
126
+ mfac_spin = np.zeros(m_max + 1, dtype=np.float64)
127
+
128
+ for m in range(1, m_max + 1):
129
+ mfac_spin[m] = np.sqrt(2.0 * m) * (1.0 - spin**2 / (m + 1)**2)
130
+
131
+ return mfac_spin
132
+
133
+ @staticmethod
134
+ def compute_lam_mm(l_max, m):
135
+ """
136
+ Compute lambda values for specific m.
137
+
138
+ Parameters:
139
+ l_max (int): Maximum degree l.
140
+ m (int): Degree m.
141
+
142
+ Returns:
143
+ np.ndarray: Lambda values as a 1D array of size l_max + 1.
144
+ """
145
+ lam_mm = np.zeros(l_max + 1, dtype=np.float64)
146
+
147
+ for l in range(m, l_max + 1):
148
+ lam_mm[l] = (2.0 * l + 1.0) * (1.0 - (m / (l + 1.0))**2)
149
+
150
+ return lam_mm
151
+
152
+ @staticmethod
153
+ def do_lam_lm(l_max, m):
154
+ """
155
+ Perform computations for lambda values for all l, m.
156
+
157
+ Parameters:
158
+ l_max (int): Maximum degree l.
159
+ m (int): Degree m.
160
+
161
+ Returns:
162
+ np.ndarray: Computed lambda values as a 2D array of size (l_max + 1, l_max + 1).
163
+ """
164
+ lam_lm = np.zeros((l_max + 1, l_max + 1), dtype=np.float64)
165
+
166
+ for l in range(m, l_max + 1):
167
+ for mp in range(m, l + 1):
168
+ lam_lm[l, mp] = (2.0 * l + 1.0) * (1.0 - (mp / (l + 1.0))**2)
169
+
170
+ return lam_lm
171
+
172
+ @staticmethod
173
+ def do_lam_lm_spin(l_max, m, spin):
174
+ """
175
+ Perform computations for spin-weighted lambda values for all l, m.
176
+
177
+ Parameters:
178
+ l_max (int): Maximum degree l.
179
+ m (int): Degree m.
180
+ spin (int): Spin weight.
181
+
182
+ Returns:
183
+ np.ndarray: Computed spin-weighted lambda values as a 2D array of size (l_max + 1, l_max + 1).
184
+ """
185
+ lam_lm_spin = np.zeros((l_max + 1, l_max + 1), dtype=np.float64)
186
+
187
+ for l in range(m, l_max + 1):
188
+ for mp in range(m, l + 1):
189
+ lam_lm_spin[l, mp] = (2.0 * l + 1.0) * (1.0 - spin**2 / (mp + 1.0)**2)
190
+
191
+ return lam_lm_spin
192
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: foscat
3
- Version: 3.3.4
3
+ Version: 3.3.6
4
4
  Summary: Generate synthetic Healpix or 2D data using Cross Scattering Transform
5
5
  Author-email: Jean-Marc DELOUIS <jean.marc.delouis@ifremer.fr>
6
6
  Maintainer-email: Theo Foulquier <theo.foulquier@ifremer.fr>
@@ -25,6 +25,7 @@ Requires-Dist: matplotlib
25
25
  Requires-Dist: numpy
26
26
  Requires-Dist: tensorflow
27
27
  Requires-Dist: healpy
28
+ Requires-Dist: spherical
28
29
 
29
30
  # foscat
30
31
 
@@ -1,12 +1,13 @@
1
1
  foscat/CNN.py,sha256=j0F2a4Xf3LijhyD_WVZ6Eg_IjGuXw3ddH6Iudj1xVaw,4874
2
- foscat/CircSpline.py,sha256=fTPOUq5fECevQTKpnwPsYSBTafKyncECUS3fKF03Xls,2423
3
- foscat/FoCUS.py,sha256=hmHO3cEEaEZyzFEvOIogBlhjUobRfwDbWQTAT1F5r-c,101774
2
+ foscat/CircSpline.py,sha256=DjP1gy88cnXu2O21ww_lNnsHAHXc3OAWk_8ey84yicg,4053
3
+ foscat/FoCUS.py,sha256=YcoV2SEtdAHCm52knC8PsC3oxYXtdpqTGQcA_32Wl-Y,101774
4
4
  foscat/GCNN.py,sha256=5RV-FKuvqbD-k99TwiM4CttM2LMZE21WD0IK0j5Mkko,7599
5
5
  foscat/Softmax.py,sha256=aBLQauoG0q2SJYPotV6U-cxAhsJcspWHNRWdnA_nAiQ,2854
6
6
  foscat/Spline1D.py,sha256=a5Jb8I9tb8y20iM8W-z6iZsIqDFByRp6eZdChpmuI5k,3010
7
7
  foscat/Synthesis.py,sha256=3_Lq5-gUM-WmO2h15kajMES8XjRo2BGseoxvTLW_xEc,13626
8
8
  foscat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- foscat/alm.py,sha256=L29DxMFuKUPk3LO9wVErhDKDqyWUxk5OndyxFHphgps,5597
9
+ foscat/alm.py,sha256=Ya0TE-KiRzoQD7gCYwnhpB2O7Ku58k0OwAAXstivvCA,18187
10
+ foscat/alm_tools.py,sha256=zI6r7VWt4oCEFoHyK3uEaIsLqazYfDg6MblsQwzozAs,5402
10
11
  foscat/backend.py,sha256=TZnOyPYjqbHjOTkH2Zk0zN5DQf2-WNHlaxenuPo_wO0,38822
11
12
  foscat/backend_tens.py,sha256=9Dp136m9frkclkwifJQLLbIpl3ETI3_txdPUZcKfuMw,1618
12
13
  foscat/loss_backend_tens.py,sha256=dCOVN6faDtIpN3VO78HTmYP2i5fnFAf-Ddy5qVBlGrM,1783
@@ -19,8 +20,8 @@ foscat/scat_cov1D.py,sha256=XOxsZZ5TYq8f34i2tUgIfzyaqaTDlICB3HzD2l_puro,531
19
20
  foscat/scat_cov2D.py,sha256=3gn6xjKvfKsyHJoPfYIu8q9LLVAbU3tsiS2l1LAJ0XM,531
20
21
  foscat/scat_cov_map.py,sha256=0wTRo4Nc7rYfI09RI2mh2bYixoukt5lrvAXR6wa9kjA,2744
21
22
  foscat/scat_cov_map2D.py,sha256=FqF45FBcoiQbvuVsrLWUIPRUc95GsKsrnH6fKzB3GlE,2841
22
- foscat-3.3.4.dist-info/LICENCE,sha256=i0ukIr8ZUpkSY2sZaE9XZK-6vuSU5iG6IgX_3pjatP8,1505
23
- foscat-3.3.4.dist-info/METADATA,sha256=yaXfM1oeDDodXs_qfv1UlHx_Fmd4oeMFk-L3noimUmo,7191
24
- foscat-3.3.4.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
25
- foscat-3.3.4.dist-info/top_level.txt,sha256=AGySXBBAlJgb8Tj8af6m_F-aiNg2zNTcybCUPVOKjAg,7
26
- foscat-3.3.4.dist-info/RECORD,,
23
+ foscat-3.3.6.dist-info/LICENCE,sha256=i0ukIr8ZUpkSY2sZaE9XZK-6vuSU5iG6IgX_3pjatP8,1505
24
+ foscat-3.3.6.dist-info/METADATA,sha256=1nIPnSEyqu-Dkei-G86_kGd8c6Y6IqQGYp3a3VolmYk,7216
25
+ foscat-3.3.6.dist-info/WHEEL,sha256=A3WOREP4zgxI0fKrHUG8DC8013e3dK3n7a6HDbcEIwE,91
26
+ foscat-3.3.6.dist-info/top_level.txt,sha256=AGySXBBAlJgb8Tj8af6m_F-aiNg2zNTcybCUPVOKjAg,7
27
+ foscat-3.3.6.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.42.0)
2
+ Generator: setuptools (75.7.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5