foscat 3.3.5__tar.gz → 3.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. {foscat-3.3.5/src/foscat.egg-info → foscat-3.4.0}/PKG-INFO +2 -2
  2. {foscat-3.3.5 → foscat-3.4.0}/pyproject.toml +1 -1
  3. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/FoCUS.py +1 -1
  4. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/alm.py +85 -51
  5. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/backend.py +11 -0
  6. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat_cov.py +70 -25
  7. {foscat-3.3.5 → foscat-3.4.0/src/foscat.egg-info}/PKG-INFO +2 -2
  8. {foscat-3.3.5 → foscat-3.4.0}/LICENCE +0 -0
  9. {foscat-3.3.5 → foscat-3.4.0}/README.md +0 -0
  10. {foscat-3.3.5 → foscat-3.4.0}/setup.cfg +0 -0
  11. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/CNN.py +0 -0
  12. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/CircSpline.py +0 -0
  13. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/GCNN.py +0 -0
  14. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/Softmax.py +0 -0
  15. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/Spline1D.py +0 -0
  16. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/Synthesis.py +0 -0
  17. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/__init__.py +0 -0
  18. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/alm_tools.py +0 -0
  19. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/backend_tens.py +0 -0
  20. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/loss_backend_tens.py +0 -0
  21. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/loss_backend_torch.py +0 -0
  22. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat.py +0 -0
  23. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat1D.py +0 -0
  24. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat2D.py +0 -0
  25. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat_cov1D.py +0 -0
  26. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat_cov2D.py +0 -0
  27. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat_cov_map.py +0 -0
  28. {foscat-3.3.5 → foscat-3.4.0}/src/foscat/scat_cov_map2D.py +0 -0
  29. {foscat-3.3.5 → foscat-3.4.0}/src/foscat.egg-info/SOURCES.txt +0 -0
  30. {foscat-3.3.5 → foscat-3.4.0}/src/foscat.egg-info/dependency_links.txt +0 -0
  31. {foscat-3.3.5 → foscat-3.4.0}/src/foscat.egg-info/requires.txt +0 -0
  32. {foscat-3.3.5 → foscat-3.4.0}/src/foscat.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: foscat
3
- Version: 3.3.5
3
+ Version: 3.4.0
4
4
  Summary: Generate synthetic Healpix or 2D data using Cross Scattering Transform
5
5
  Author-email: Jean-Marc DELOUIS <jean.marc.delouis@ifremer.fr>
6
6
  Maintainer-email: Theo Foulquier <theo.foulquier@ifremer.fr>
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "foscat"
3
- version = "3.3.5"
3
+ version = "3.4.0"
4
4
  description = "Generate synthetic Healpix or 2D data using Cross Scattering Transform"
5
5
  readme = "README.md"
6
6
  license = { text = "BSD-3-Clause" }
@@ -38,7 +38,7 @@ class FoCUS:
38
38
  mpi_rank=0,
39
39
  ):
40
40
 
41
- self.__version__ = "3.3.5"
41
+ self.__version__ = "3.4.0"
42
42
  # P00 coeff for normalization for scat_cov
43
43
  self.TMPFILE_VERSION = TMPFILE_VERSION
44
44
  self.P1_dic = None
@@ -3,7 +3,8 @@ import numpy as np
3
3
 
4
4
  class alm():
5
5
 
6
- def __init__(self,backend=None,lmax=24,nside=None,limit_range=1E10):
6
+ def __init__(self,backend=None,lmax=24,
7
+ nside=None,limit_range=1E10):
7
8
  self._logtab={}
8
9
  self.lth={}
9
10
  if nside is not None:
@@ -232,47 +233,57 @@ class alm():
232
233
  ii+=1
233
234
  return self.backend.bk_reshape(self.backend.bk_concat(ft_im,axis=0),[4*nside-1,3*nside])
234
235
 
235
- def anafast(self,im,map2=None,nest=False):
236
+ def anafast(self,im,map2=None,nest=False,spin=2):
237
+
236
238
  """The `anafast` function computes the L1 and L2 norm power spectra.
237
239
 
238
240
  Currently, it is not optimized for single-pass computation due to the relatively inefficient computation of \(Y_{lm}\).
239
241
  Nonetheless, it utilizes TensorFlow and can be integrated into gradient computations.
240
242
 
241
243
  Input:
242
- - `im`: a vector of size \([12 \times \text{Nside}^2]\) for scalar data, or of size \([3, 12 \times \text{Nside}^2]\) for polar data.
244
+ - `im`: a vector of size \([12 \times \text{Nside}^2]\) for scalar data, or of size \([2, 12 \times \text{Nside}^2]\) for Q,U polar data,
245
+ or of size \([3, 12 \times \text{Nside}^2]\) for I,Q,U polar data.
243
246
  - `map2` (optional): a vector of size \([12 \times \text{Nside}^2]\) for scalar data, or of size
244
247
  \([3, 12 \times \text{Nside}^2]\) for polar data. If provided, cross power spectra will be computed.
245
248
  - `nest=True`: alters the ordering of the input maps.
249
+ - `spin=2` for 1/2 spin data as Q and U. Spin=1 for seep fields
246
250
 
247
251
  Output:
248
252
  -A tensor of size \([l_{\text{max}} \times (l_{\text{max}}-1)]\) formatted as \([6, \ldots]\),
249
253
  ordered as TT, EE, BB, TE, EB.TBanafast function computes L1 and L2 norm powerspctra.
250
254
 
251
255
  """
256
+ doT=True
252
257
  if len(im.shape)==1: # nopol
253
258
  nside=int(np.sqrt(im.shape[0]//12))
254
259
  else:
260
+ if im.shape[0]==2:
261
+ doT=False
262
+
255
263
  nside=int(np.sqrt(im.shape[1]//12))
264
+
256
265
  th,ph=hp.pix2ang(nside,np.arange(12*nside*nside))
257
- if nest:
258
- idx=hp.ring2nest(nside,np.arange(12*nside**2))
259
- if len(im.shape)==1: # nopol
260
- ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im,idx),0*im),ph)
261
- if map2 is not None:
262
- ft_im2=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(map2,idx),0*im),ph)
263
- else:
264
- ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im[0],idx),0*im[0]),ph)
265
- if map2 is not None:
266
- ft_im2=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(map2[0],idx),0*im[0]),ph)
267
- else:
268
- if len(im.shape)==1: # nopol
269
- ft_im=self.comp_tf(self.backend.bk_complex(im,0*im),ph)
270
- if map2 is not None:
271
- ft_im2=self.comp_tf(self.backend.bk_complex(map2,0*im),ph)
266
+
267
+ if doT: # nopol
268
+ if nest:
269
+ idx=hp.ring2nest(nside,np.arange(12*nside**2))
270
+ if len(im.shape)==1: # nopol
271
+ ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im,idx),0*im),ph)
272
+ if map2 is not None:
273
+ ft_im2=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(map2,idx),0*im),ph)
274
+ else:
275
+ ft_im=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(im[0],idx),0*im[0]),ph)
276
+ if map2 is not None:
277
+ ft_im2=self.comp_tf(self.backend.bk_complex(self.backend.bk_gather(map2[0],idx),0*im[0]),ph)
272
278
  else:
273
- ft_im=self.comp_tf(self.backend.bk_complex(im[0],0*im[0]),ph)
274
- if map2 is not None:
275
- ft_im2=self.comp_tf(self.backend.bk_complex(map2[0],0*im[0]),ph)
279
+ if len(im.shape)==1: # nopol
280
+ ft_im=self.comp_tf(self.backend.bk_complex(im,0*im),ph)
281
+ if map2 is not None:
282
+ ft_im2=self.comp_tf(self.backend.bk_complex(map2,0*im),ph)
283
+ else:
284
+ ft_im=self.comp_tf(self.backend.bk_complex(im[0],0*im[0]),ph)
285
+ if map2 is not None:
286
+ ft_im2=self.comp_tf(self.backend.bk_complex(map2[0],0*im[0]),ph)
276
287
 
277
288
  lth=self.ring_th(nside)
278
289
 
@@ -282,34 +293,40 @@ class alm():
282
293
 
283
294
  cl2=None
284
295
  cl2_L1=None
285
-
286
296
 
287
297
  if len(im.shape)==2: # nopol
288
-
289
- spin=2
290
298
 
291
299
  self.init_Ys(spin,nside)
292
300
 
293
301
  if nest:
294
302
  idx=hp.ring2nest(nside,np.arange(12*nside**2))
295
- l_Q=self.backend.bk_gather(im[1],idx)
296
- l_U=self.backend.bk_gather(im[2],idx)
303
+ l_Q=self.backend.bk_gather(im[int(doT)],idx)
304
+ l_U=self.backend.bk_gather(im[1+int(doT)],idx)
297
305
  ft_im_Pp=self.comp_tf(self.backend.bk_complex(l_Q,l_U),ph)
298
306
  ft_im_Pm=self.comp_tf(self.backend.bk_complex(l_Q,-l_U),ph)
307
+ if map2 is not None:
308
+ l_Q=self.backend.bk_gather(map2[int(doT)],idx)
309
+ l_U=self.backend.bk_gather(map2[1+int(doT)],idx)
310
+ ft_im2_Pp=self.comp_tf(self.backend.bk_complex(l_Q,l_U),ph)
311
+ ft_im2_Pm=self.comp_tf(self.backend.bk_complex(l_Q,-l_U),ph)
299
312
  else:
300
- ft_im_Pp=self.comp_tf(self.backend.bk_complex(im[1],im[2]),ph)
301
- ft_im_Pm=self.comp_tf(self.backend.bk_complex(im[1],-im[2]),ph)
313
+ ft_im_Pp=self.comp_tf(self.backend.bk_complex(im[int(doT)],im[1+int(doT)]),ph)
314
+ ft_im_Pm=self.comp_tf(self.backend.bk_complex(im[int(doT)],-im[1+int(doT)]),ph)
315
+ if map2 is not None:
316
+ ft_im2_Pp=self.comp_tf(self.backend.bk_complex(map2[int(doT)],map2[1+int(doT)]),ph)
317
+ ft_im2_Pm=self.comp_tf(self.backend.bk_complex(map2[int(doT)],-map2[1+int(doT)]),ph)
302
318
 
303
319
  for m in range(lmax+1):
304
320
 
305
321
  plm=self.compute_legendre_m(co_th,m,3*nside-1)/(12*nside**2)
306
-
307
- tmp=self.backend.bk_reduce_sum(plm*ft_im[:,m],1)
308
-
309
- if map2 is not None:
310
- tmp2=self.backend.bk_reduce_sum(plm*ft_im2[:,m],1)
311
- else:
312
- tmp2=tmp
322
+
323
+ if doT:
324
+ tmp=self.backend.bk_reduce_sum(plm*ft_im[:,m],1)
325
+
326
+ if map2 is not None:
327
+ tmp2=self.backend.bk_reduce_sum(plm*ft_im2[:,m],1)
328
+ else:
329
+ tmp2=tmp
313
330
 
314
331
  if len(im.shape)==2: # pol
315
332
  plmp=self.Yp[spin,nside][m]
@@ -325,35 +342,52 @@ class alm():
325
342
  tmpp2=self.backend.bk_reduce_sum(plmp*ft_im2_Pp[:,m],1)
326
343
  tmpm2=self.backend.bk_reduce_sum(plmm*ft_im2_Pm[:,m],1)
327
344
 
328
- almE2=-(tmpp+tmpm)/2.0
329
- almB2=(tmpp-tmpm)/(2J)
345
+ almE2=-(tmpp2+tmpm2)/2.0
346
+ almB2=(tmpp2-tmpm2)/(2J)
330
347
  else:
331
348
  almE2=almE
332
349
  almB2=almB
333
-
334
- tmpTT=self.backend.bk_real((tmp*self.backend.bk_conjugate(tmp2)))
350
+
351
+ if doT:
352
+ tmpTT=self.backend.bk_real((tmp*self.backend.bk_conjugate(tmp2)))
353
+ tmpTE=self.backend.bk_real((tmp*self.backend.bk_conjugate(almE2)))
354
+ tmpTB=-self.backend.bk_real((tmp*self.backend.bk_conjugate(almB2)))
355
+
335
356
  tmpEE=self.backend.bk_real((almE*self.backend.bk_conjugate(almE2)))
336
357
  tmpBB=self.backend.bk_real((almB*self.backend.bk_conjugate(almB2)))
337
- tmpTE=self.backend.bk_real((tmp*self.backend.bk_conjugate(almE2)))
338
- tmpTB=-self.backend.bk_real((tmp*self.backend.bk_conjugate(almB2)))
339
358
  tmpEB=-self.backend.bk_real((almE*self.backend.bk_conjugate(almB2)))
359
+
340
360
  if map2 is not None:
341
- tmpTE=(tmpTE+self.backend.bk_real((tmp2*self.backend.bk_conjugate(almE))))/2
342
- tmpTB=(tmpTB-self.backend.bk_real((tmp2*self.backend.bk_conjugate(almB))))/2
343
361
  tmpEB=(tmpEB-self.backend.bk_real((almE2*self.backend.bk_conjugate(almB))))/2
362
+
363
+ if doT:
364
+ tmpTE=(tmpTE+self.backend.bk_real((tmp2*self.backend.bk_conjugate(almE))))/2
365
+ tmpTB=(tmpTB-self.backend.bk_real((tmp2*self.backend.bk_conjugate(almB))))/2
366
+
344
367
 
345
368
  if m==0:
346
- l_cl=self.backend.bk_concat([tmpTT,tmpEE,tmpBB,tmpTE,tmpEB,tmpTB],0)
369
+ if doT:
370
+ l_cl=self.backend.bk_concat([tmpTT,tmpEE,tmpBB,tmpTE,tmpEB,tmpTB],0)
371
+ else:
372
+ l_cl=self.backend.bk_concat([tmpEE,tmpBB,tmpEB],0)
347
373
  else:
348
374
  offset_tensor=self.backend.bk_zeros((m),dtype=self.backend.all_bk_type)
349
- l_cl=self.backend.bk_concat([self.backend.bk_concat([offset_tensor,tmpTT],axis=0),
350
- self.backend.bk_concat([offset_tensor,tmpEE],axis=0),
351
- self.backend.bk_concat([offset_tensor,tmpBB],axis=0),
352
- self.backend.bk_concat([offset_tensor,tmpTE],axis=0),
353
- self.backend.bk_concat([offset_tensor,tmpEB],axis=0),
354
- self.backend.bk_concat([offset_tensor,tmpTB],axis=0)],axis=0)
375
+ if doT:
376
+ l_cl=self.backend.bk_concat([self.backend.bk_concat([offset_tensor,tmpTT],axis=0),
377
+ self.backend.bk_concat([offset_tensor,tmpEE],axis=0),
378
+ self.backend.bk_concat([offset_tensor,tmpBB],axis=0),
379
+ self.backend.bk_concat([offset_tensor,tmpTE],axis=0),
380
+ self.backend.bk_concat([offset_tensor,tmpEB],axis=0),
381
+ self.backend.bk_concat([offset_tensor,tmpTB],axis=0)],axis=0)
382
+ else:
383
+ l_cl=self.backend.bk_concat([self.backend.bk_concat([offset_tensor,tmpEE],axis=0),
384
+ self.backend.bk_concat([offset_tensor,tmpBB],axis=0),
385
+ self.backend.bk_concat([offset_tensor,tmpEB],axis=0)],axis=0)
355
386
 
356
- l_cl=self.backend.bk_reshape(l_cl,[6,lmax+1])
387
+ if doT:
388
+ l_cl=self.backend.bk_reshape(l_cl,[6,lmax+1])
389
+ else:
390
+ l_cl=self.backend.bk_reshape(l_cl,[3,lmax+1])
357
391
  else:
358
392
  tmp=self.backend.bk_real((tmp*self.backend.bk_conjugate(tmp2)))
359
393
  if m==0:
@@ -590,6 +590,17 @@ class foscat_backend:
590
590
  if self.BACKEND == self.NUMPY:
591
591
  return np.sum(data, axis)
592
592
 
593
+ # ---------------------------------------------−---------
594
+ # return a tensor size
595
+
596
+ def bk_size(self, data):
597
+ if self.BACKEND == self.TENSORFLOW:
598
+ return self.backend.size(data)
599
+ if self.BACKEND == self.TORCH:
600
+ return data.numel()
601
+ if self.BACKEND == self.NUMPY:
602
+ return data.size
603
+
593
604
  # ---------------------------------------------−---------
594
605
 
595
606
  def iso_mean(self, x, use_2D=False):
@@ -3548,36 +3548,81 @@ class funct(FOC.FoCUS):
3548
3548
  return self.backend.bk_abs(self.backend.bk_sqrt(x))
3549
3549
 
3550
3550
  def reduce_mean(self, x):
3551
+
3551
3552
  if isinstance(x, scat_cov):
3552
- if x.S1 is None:
3553
- result = (
3554
- self.backend.bk_reduce_mean(self.backend.bk_abs(x.S0))
3555
- + self.backend.bk_reduce_mean(self.backend.bk_abs(x.S2))
3556
- + self.backend.bk_reduce_mean(self.backend.bk_abs(x.S3))
3557
- + self.backend.bk_reduce_mean(self.backend.bk_abs(x.S4))
3558
- ) / 3
3559
- else:
3560
- result = (
3561
- self.backend.bk_reduce_mean(self.backend.bk_abs(x.S0))
3562
- + self.backend.bk_reduce_mean(self.backend.bk_abs(x.S2))
3563
- + self.backend.bk_reduce_mean(self.backend.bk_abs(x.S1))
3564
- + self.backend.bk_reduce_mean(self.backend.bk_abs(x.S3))
3565
- + self.backend.bk_reduce_mean(self.backend.bk_abs(x.S4))
3566
- ) / 4
3567
- else:
3568
- return self.backend.bk_reduce_mean(x)
3569
- return result
3553
+ result = self.backend.bk_reduce_sum(self.backend.bk_abs(x.S0)) + \
3554
+ self.backend.bk_reduce_sum(self.backend.bk_abs(x.S2)) + \
3555
+ self.backend.bk_reduce_sum(self.backend.bk_abs(x.S3)) + \
3556
+ self.backend.bk_reduce_sum(self.backend.bk_abs(x.S4))
3557
+
3558
+ N = self.backend.bk_size(x.S0)+self.backend.bk_size(x.S2)+ \
3559
+ self.backend.bk_size(x.S3)+self.backend.bk_size(x.S4)
3560
+
3561
+ if x.S1 is not None:
3562
+ result = result+self.backend.bk_reduce_sum(self.backend.bk_abs(x.S1))
3563
+ N = N + self.backend.bk_size(x.S1)
3564
+ if x.S3P is not None:
3565
+ result = result+self.backend.bk_reduce_sum(self.backend.bk_abs(x.S3P))
3566
+ N = N + self.backend.bk_size(x.S3P)
3567
+ return result/self.backend.bk_cast(N)
3568
+ else:
3569
+ return self.backend.bk_reduce_mean(x, axis=0)
3570
+
3570
3571
 
3571
3572
  def reduce_mean_batch(self, x):
3573
+
3572
3574
  if isinstance(x, scat_cov):
3573
- result = scat_cov()
3574
- # Assuming the batch dimension is the first dimension
3575
- result.S0 = self.backend.bk_reduce_mean(x.S0, axis=0)
3576
- result.S2 = self.backend.bk_reduce_mean(x.S2, axis=0)
3575
+
3576
+ sS0=self.backend.bk_reduce_mean(x.S0, axis=0)
3577
+ sS2=self.backend.bk_reduce_mean(x.S2, axis=0)
3578
+ sS3=self.backend.bk_reduce_mean(x.S3, axis=0)
3579
+ sS4=self.backend.bk_reduce_mean(x.S4, axis=0)
3580
+ sS1=None
3581
+ sS3P=None
3577
3582
  if x.S1 is not None:
3578
- result.S1 = self.backend.bk_reduce_mean(x.S1, axis=0)
3579
- result.S3 = self.backend.bk_reduce_mean(x.S3, axis=0)
3580
- result.S4 = self.backend.bk_reduce_mean(x.S4, axis=0)
3583
+ sS1 = self.backend.bk_reduce_mean(x.S1, axis=0)
3584
+ if x.S3P is not None:
3585
+ sS3P = self.backend.bk_reduce_mean(x.S3P, axis=0)
3586
+
3587
+ result = scat_cov(
3588
+ sS0,
3589
+ sS2,
3590
+ sS3,
3591
+ sS4,
3592
+ s1=sS1,
3593
+ s3p=sS3P,
3594
+ backend=self.backend,
3595
+ use_1D=self.use_1D,
3596
+ )
3597
+ return result
3598
+ else:
3599
+ return self.backend.bk_reduce_mean(x, axis=0)
3600
+
3601
+ def reduce_sum_batch(self, x):
3602
+
3603
+ if isinstance(x, scat_cov):
3604
+
3605
+ sS0=self.backend.bk_reduce_sum(x.S0, axis=0)
3606
+ sS2=self.backend.bk_reduce_sum(x.S2, axis=0)
3607
+ sS3=self.backend.bk_reduce_sum(x.S3, axis=0)
3608
+ sS4=self.backend.bk_reduce_sum(x.S4, axis=0)
3609
+ sS1=None
3610
+ sS3P=None
3611
+ if x.S1 is not None:
3612
+ sS1 = self.backend.bk_reduce_sum(x.S1, axis=0)
3613
+ if x.S3P is not None:
3614
+ sS3P = self.backend.bk_reduce_sum(x.S3P, axis=0)
3615
+
3616
+ result = scat_cov(
3617
+ sS0,
3618
+ sS2,
3619
+ sS3,
3620
+ sS4,
3621
+ s1=sS1,
3622
+ s3p=sS3P,
3623
+ backend=self.backend,
3624
+ use_1D=self.use_1D,
3625
+ )
3581
3626
  return result
3582
3627
  else:
3583
3628
  return self.backend.bk_reduce_mean(x, axis=0)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: foscat
3
- Version: 3.3.5
3
+ Version: 3.4.0
4
4
  Summary: Generate synthetic Healpix or 2D data using Cross Scattering Transform
5
5
  Author-email: Jean-Marc DELOUIS <jean.marc.delouis@ifremer.fr>
6
6
  Maintainer-email: Theo Foulquier <theo.foulquier@ifremer.fr>
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes