classy-szfast 0.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,150 @@
1
+ from .utils import *
2
+ from .config import *
3
+ from scipy.interpolate import RectBivariateSpline
4
+
5
+
6
+ # this code is adapted from cobaya
7
+ class PowerSpectrumInterpolator(RectBivariateSpline):
8
+ r"""
9
+ 2D spline interpolation object (scipy.interpolate.RectBivariateSpline)
10
+ to evaluate matter power spectrum as function of z and k.
11
+
12
+ *This class is adapted from CAMB's own P(k) interpolator, by Antony Lewis;
13
+ it's mostly interface-compatible with the original.*
14
+
15
+ :param z: values of z for which the power spectrum was evaluated.
16
+ :param k: values of k for which the power spectrum was evaluated.
17
+ :param P_or_logP: Values of the power spectrum (or log-values, if logP=True).
18
+ :param logP: if True (default: False), log of power spectrum are given and used
19
+ for the underlying interpolator.
20
+ :param logsign: if logP is True, P_or_logP is log(logsign*Pk)
21
+ :param extrap_kmax: if set, use power law extrapolation beyond kmax up to
22
+ extrap_kmax; useful for tails of integrals.
23
+ """
24
+
25
+ def __init__(self, z, k, P_or_logP, extrap_kmin=1e-5, extrap_kmax=1e2, logP=False,
26
+ logsign=1):
27
+ self.islog = logP
28
+ # Check order
29
+ z, k = (np.atleast_1d(x) for x in [z, k])
30
+ if len(z) < 4:
31
+ raise ValueError('Require at least four redshifts for Pk interpolation.'
32
+ 'Consider using Pk_grid if you just need a small number'
33
+ 'of specific redshifts (doing 1D splines in k yourself).')
34
+ z, k, P_or_logP = np.array(z), np.array(k), np.array(P_or_logP)
35
+ i_z = np.argsort(z)
36
+ i_k = np.argsort(k)
37
+ self.logsign = logsign
38
+ self.z, self.k, P_or_logP = z[i_z], k[i_k], P_or_logP[i_z, :][:, i_k]
39
+ self.zmin, self.zmax = self.z[0], self.z[-1]
40
+ self.extrap_kmin, self.extrap_kmax = extrap_kmin, extrap_kmax
41
+ logk = np.log(self.k)
42
+ # Start from extrap_kmin using a (log,log)-linear extrapolation
43
+ if extrap_kmin and extrap_kmin < self.input_kmin:
44
+ if not logP:
45
+ raise ValueError('extrap_kmin must use logP')
46
+ logk = np.hstack(
47
+ [np.log(extrap_kmin),
48
+ np.log(self.input_kmin) * 0.1 + np.log(extrap_kmin) * 0.9, logk])
49
+ logPnew = np.empty((P_or_logP.shape[0], P_or_logP.shape[1] + 2))
50
+ logPnew[:, 2:] = P_or_logP
51
+ diff = (logPnew[:, 3] - logPnew[:, 2]) / (logk[3] - logk[2])
52
+ delta = diff * (logk[2] - logk[0])
53
+ logPnew[:, 0] = logPnew[:, 2] - delta
54
+ logPnew[:, 1] = logPnew[:, 2] - delta * 0.9
55
+ P_or_logP = logPnew
56
+ # Continue until extrap_kmax using a (log,log)-linear extrapolation
57
+ if extrap_kmax and extrap_kmax > self.input_kmax:
58
+ if not logP:
59
+ raise ValueError('extrap_kmax must use logP')
60
+ logk = np.hstack(
61
+ [logk, np.log(self.input_kmax) * 0.1 + np.log(extrap_kmax) * 0.9,
62
+ np.log(extrap_kmax)])
63
+ logPnew = np.empty((P_or_logP.shape[0], P_or_logP.shape[1] + 2))
64
+ logPnew[:, :-2] = P_or_logP
65
+ diff = (logPnew[:, -3] - logPnew[:, -4]) / (logk[-3] - logk[-4])
66
+ delta = diff * (logk[-1] - logk[-3])
67
+ logPnew[:, -1] = logPnew[:, -3] + delta
68
+ logPnew[:, -2] = logPnew[:, -3] + delta * 0.9
69
+ P_or_logP = logPnew
70
+ super().__init__(self.z, logk, P_or_logP)
71
+
72
+ @property
73
+ def input_kmin(self):
74
+ """Minimum k for the interpolation (not incl. extrapolation range)."""
75
+ return self.k[0]
76
+
77
+ @property
78
+ def input_kmax(self):
79
+ """Maximum k for the interpolation (not incl. extrapolation range)."""
80
+ return self.k[-1]
81
+
82
+ @property
83
+ def kmin(self):
84
+ """Minimum k of the interpolator (incl. extrapolation range)."""
85
+ if self.extrap_kmin is None:
86
+ return self.input_kmin
87
+ return self.extrap_kmin
88
+
89
+ @property
90
+ def kmax(self):
91
+ """Maximum k of the interpolator (incl. extrapolation range)."""
92
+ if self.extrap_kmax is None:
93
+ return self.input_kmax
94
+ return self.extrap_kmax
95
+
96
+ def check_ranges(self, z, k):
97
+ """Checks that we are not trying to extrapolate beyond the interpolator limits."""
98
+ z = np.atleast_1d(z).flatten()
99
+ min_z, max_z = min(z), max(z)
100
+ if min_z < self.zmin and not np.allclose(min_z, self.zmin):
101
+ print(
102
+ f"Not possible to extrapolate to z={min(z)} "
103
+ f"(minimum z computed is {self.zmin}).")
104
+ if max_z > self.zmax and not np.allclose(max_z, self.zmax):
105
+ print(
106
+ f"Not possible to extrapolate to z={max(z)} "
107
+ f"(maximum z computed is {self.zmax}).")
108
+ k = np.atleast_1d(k).flatten()
109
+ min_k, max_k = min(k), max(k)
110
+ if min_k < self.kmin and not np.allclose(min_k, self.kmin):
111
+ raise print(
112
+ f"Not possible to extrapolate to k={min(k)} 1/Mpc "
113
+ f"(minimum k possible is {self.kmin} 1/Mpc).")
114
+ if max_k > self.kmax and not np.allclose(max_k, self.kmax):
115
+ print(
116
+ f"Not possible to extrapolate to k={max(k)} 1/Mpc "
117
+ f"(maximum k possible is {self.kmax} 1/Mpc).")
118
+
119
+ def P(self, z, k, grid=None):
120
+ """
121
+ Get the power spectrum at (z,k).
122
+ """
123
+ self.check_ranges(z, k)
124
+ if grid is None:
125
+ grid = not np.isscalar(z) and not np.isscalar(k)
126
+ if self.islog:
127
+ return self.logsign * np.exp(self(z, np.log(k), grid=grid, warn=False))
128
+ else:
129
+ return self(z, np.log(k), grid=grid, warn=False)
130
+
131
+ def logP(self, z, k, grid=None):
132
+ """
133
+ Get the log power spectrum at (z,k). (or minus log power spectrum if
134
+ islog and logsign=-1)
135
+ """
136
+ self.check_ranges(z, k)
137
+ if grid is None:
138
+ grid = not np.isscalar(z) and not np.isscalar(k)
139
+ if self.islog:
140
+ return self(z, np.log(k), grid=grid, warn=False)
141
+ else:
142
+ return np.log(self(z, np.log(k), grid=grid, warn=False))
143
+
144
+ def __call__(self, *args, warn=True, **kwargs):
145
+ if warn:
146
+ print(
147
+ "Do not call the instance directly. Use instead methods P(z, k) or "
148
+ "logP(z, k) to get the (log)power spectrum. (If you know what you are "
149
+ "doing, pass warn=False)")
150
+ return super().__call__(*args, **kwargs)
@@ -0,0 +1,395 @@
1
+ from .utils import *
2
+ from .config import *
3
+
4
+
5
+ from .suppress_warnings import suppress_warnings
6
+
7
+ import warnings
8
+ from contextlib import contextmanager
9
+ import logging
10
+
11
+ # Suppress absl warnings
12
+ import absl.logging
13
+ absl.logging.set_verbosity('error')
14
+ # Suppress TensorFlow warnings
15
+ import os
16
+ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
17
+ with suppress_warnings():
18
+ import tensorflow as tf
19
+ tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
20
+ dtype = tf.float32
21
+ import pickle
22
+
23
+ class Restore_NN(tf.keras.Model):
24
+
25
+ def __init__(self,
26
+ parameters=None,
27
+ modes=None,
28
+ parameters_mean=None,
29
+ parameters_std=None,
30
+ features_mean=None,
31
+ features_std=None,
32
+ n_hidden=[512,512,512],
33
+ restore=False,
34
+ restore_filename=None,
35
+ trainable=True,
36
+ optimizer=None,
37
+ verbose=False,
38
+ ):
39
+
40
+ # super
41
+ super(Restore_NN, self).__init__()
42
+
43
+ # restore
44
+
45
+ self.restore(restore_filename)
46
+
47
+
48
+ # input parameters mean and std
49
+ self.parameters_mean = tf.constant(self.parameters_mean_, dtype=dtype, name='parameters_mean')
50
+ self.parameters_std = tf.constant(self.parameters_std_, dtype=dtype, name='parameters_std')
51
+
52
+ # (log)-spectra mean and std
53
+ self.features_mean = tf.constant(self.features_mean_, dtype=dtype, name='features_mean')
54
+ self.features_std = tf.constant(self.features_std_, dtype=dtype, name='features_std')
55
+
56
+ # weights, biases and activation function parameters for each layer of the network
57
+ self.W = []
58
+ self.b = []
59
+ self.alphas = []
60
+ self.betas = []
61
+ for i in range(self.n_layers):
62
+ self.W.append(tf.Variable(tf.random.normal([self.architecture[i], self.architecture[i+1]], 0., 1e-3), name="W_" + str(i), trainable=trainable))
63
+ self.b.append(tf.Variable(tf.zeros([self.architecture[i+1]]), name = "b_" + str(i), trainable=trainable))
64
+ for i in range(self.n_layers-1):
65
+ self.alphas.append(tf.Variable(tf.random.normal([self.architecture[i+1]]), name = "alphas_" + str(i), trainable=trainable))
66
+ self.betas.append(tf.Variable(tf.random.normal([self.architecture[i+1]]), name = "betas_" + str(i), trainable=trainable))
67
+
68
+ # restore weights if restore = True
69
+
70
+ for i in range(self.n_layers):
71
+ self.W[i].assign(self.W_[i])
72
+ self.b[i].assign(self.b_[i])
73
+ for i in range(self.n_layers-1):
74
+ self.alphas[i].assign(self.alphas_[i])
75
+ self.betas[i].assign(self.betas_[i])
76
+
77
+ # optimizer
78
+ self.optimizer = optimizer or tf.keras.optimizers.Adam()
79
+ self.verbose= verbose
80
+
81
+ # print initialization info, if verbose
82
+ if self.verbose:
83
+ multiline_str = "\nInitialized cosmopower_NN model, \n" \
84
+ f"mapping {self.n_parameters} input parameters to {self.n_modes} output modes, \n" \
85
+ f"using {len(self.n_hidden)} hidden layers, \n" \
86
+ f"with {list(self.n_hidden)} nodes, respectively. \n"
87
+ print(multiline_str)
88
+
89
+
90
+ # restore attributes
91
+ def restore(self,
92
+ filename
93
+ ):
94
+ r"""
95
+ Load pre-trained model
96
+
97
+ Parameters:
98
+ filename (str):
99
+ filename tag (without suffix) where model was saved
100
+ """
101
+ # load attributes
102
+ with open(filename + ".pkl", 'rb') as f:
103
+ self.W_, self.b_, self.alphas_, self.betas_, \
104
+ self.parameters_mean_, self.parameters_std_, \
105
+ self.features_mean_, self.features_std_, \
106
+ self.n_parameters, self.parameters, \
107
+ self.n_modes, self.modes, \
108
+ self.n_hidden, self.n_layers, self.architecture = pickle.load(f)
109
+
110
+
111
+
112
+ # auxiliary function to sort input parameters
113
+ def dict_to_ordered_arr_np(self,
114
+ input_dict,
115
+ ):
116
+ r"""
117
+ Sort input parameters
118
+
119
+ Parameters:
120
+ input_dict (dict [numpy.ndarray]):
121
+ input dict of (arrays of) parameters to be sorted
122
+
123
+ Returns:
124
+ numpy.ndarray:
125
+ parameters sorted according to desired order
126
+ """
127
+ if self.parameters is not None:
128
+ return np.stack([input_dict[k] for k in self.parameters], axis=1)
129
+ else:
130
+ return np.stack([input_dict[k] for k in input_dict], axis=1)
131
+
132
+
133
+ # forward prediction given input parameters implemented in Numpy
134
+ def forward_pass_np(self,
135
+ parameters_arr
136
+ ):
137
+ r"""
138
+ Forward pass through the network to predict the output,
139
+ fully implemented in Numpy
140
+
141
+ Parameters:
142
+ parameters_arr (numpy.ndarray):
143
+ array of input parameters
144
+
145
+ Returns:
146
+ numpy.ndarray:
147
+ output predictions
148
+ """
149
+ # forward pass through the network
150
+ act = []
151
+ layers = [(parameters_arr - self.parameters_mean_)/self.parameters_std_]
152
+ for i in range(self.n_layers-1):
153
+
154
+ # linear network operation
155
+ act.append(np.dot(layers[-1], self.W_[i]) + self.b_[i])
156
+
157
+ # pass through activation function
158
+ layers.append((self.betas_[i] + (1.-self.betas_[i])*1./(1.+np.exp(-self.alphas_[i]*act[-1])))*act[-1])
159
+
160
+ # final (linear) layer -> (standardised) predictions
161
+ layers.append(np.dot(layers[-1], self.W_[-1]) + self.b_[-1])
162
+
163
+ # rescale and output
164
+ return layers[-1]*self.features_std_ + self.features_mean_
165
+
166
+
167
+ # Numpy array predictions
168
+ def predictions_np(self,
169
+ parameters_dict
170
+ ):
171
+ r"""
172
+ Predictions given input parameters collected in a dict.
173
+ Fully implemented in Numpy. Calls ``forward_pass_np``
174
+ after ordering the input parameter dict
175
+
176
+ Parameters:
177
+ parameters_dict (dict [numpy.ndarray]):
178
+ dictionary of (arrays of) parameters
179
+
180
+ Returns:
181
+ numpy.ndarray:
182
+ output predictions
183
+ """
184
+ parameters_arr = self.dict_to_ordered_arr_np(parameters_dict)
185
+ return self.forward_pass_np(parameters_arr)
186
+
187
+
188
+ # Numpy array 10.**predictions
189
+ def ten_to_predictions_np(self,
190
+ parameters_dict
191
+ ):
192
+ r"""
193
+ 10^predictions given input parameters collected in a dict.
194
+ Fully implemented in Numpy. It raises 10 to the output
195
+ from ``forward_pass_np``
196
+
197
+ Parameters:
198
+ parameters_dict (dict [numpy.ndarray]):
199
+ dictionary of (arrays of) parameters
200
+
201
+ Returns:
202
+ numpy.ndarray:
203
+ 10^output predictions
204
+ """
205
+ return 10.**self.predictions_np(parameters_dict)
206
+
207
+
208
+ class Restore_PCAplusNN(tf.keras.Model):
209
+
210
+ def __init__(self,
211
+ cp_pca=None,
212
+ n_hidden=[512,512,512],
213
+ restore=False,
214
+ restore_filename=None,
215
+ trainable=True,
216
+ optimizer=None,
217
+ verbose=False,
218
+ ):
219
+ r"""
220
+ Constructor.
221
+ """
222
+ # super
223
+ super(Restore_PCAplusNN, self).__init__()
224
+
225
+
226
+ self.restore(restore_filename)
227
+
228
+ # input parameters mean and std
229
+ self.parameters_mean = tf.constant(self.parameters_mean_, dtype=dtype, name='parameters_mean')
230
+ self.parameters_std = tf.constant(self.parameters_std_, dtype=dtype, name='parameters_std')
231
+
232
+ # PCA mean and std
233
+ self.pca_mean = tf.constant(self.pca_mean_, dtype=dtype, name='pca_mean')
234
+ self.pca_std = tf.constant(self.pca_std_, dtype=dtype, name='pca_std')
235
+
236
+ # (log)-spectra mean and std
237
+ self.features_mean = tf.constant(self.features_mean_, dtype=dtype, name='features_mean')
238
+ self.features_std = tf.constant(self.features_std_, dtype=dtype, name='features_std')
239
+
240
+ # pca transform matrix
241
+ self.pca_transform_matrix = tf.constant(self.pca_transform_matrix_, dtype=dtype, name='pca_transform_matrix')
242
+
243
+ # weights, biases and activation function parameters for each layer of the network
244
+ self.W = []
245
+ self.b = []
246
+ self.alphas = []
247
+ self.betas = []
248
+ for i in range(self.n_layers):
249
+ self.W.append(tf.Variable(tf.random.normal([self.architecture[i], self.architecture[i+1]], 0., np.sqrt(2./self.n_parameters)), name="W_" + str(i), trainable=trainable))
250
+ self.b.append(tf.Variable(tf.zeros([self.architecture[i+1]]), name = "b_" + str(i), trainable=trainable))
251
+ for i in range(self.n_layers-1):
252
+ self.alphas.append(tf.Variable(tf.random.normal([self.architecture[i+1]]), name = "alphas_" + str(i), trainable=trainable))
253
+ self.betas.append(tf.Variable(tf.random.normal([self.architecture[i+1]]), name = "betas_" + str(i), trainable=trainable))
254
+
255
+ # restore weights if restore = True
256
+ for i in range(self.n_layers):
257
+ self.W[i].assign(self.W_[i])
258
+ self.b[i].assign(self.b_[i])
259
+ for i in range(self.n_layers-1):
260
+ self.alphas[i].assign(self.alphas_[i])
261
+ self.betas[i].assign(self.betas_[i])
262
+
263
+ self.optimizer = optimizer or tf.keras.optimizers.Adam()
264
+ self.verbose= verbose
265
+
266
+ # print initialization info, if verbose
267
+ if self.verbose:
268
+ multiline_str = "\nInitialized cosmopower_PCAplusNN model, \n" \
269
+ f"mapping {self.n_parameters} input parameters to {self.n_pcas} PCA components \n" \
270
+ f"and then inverting the PCA compression to obtain {self.n_modes} modes \n" \
271
+ f"The model uses {len(self.n_hidden)} hidden layers, \n" \
272
+ f"with {list(self.n_hidden)} nodes, respectively. \n"
273
+ print(multiline_str)
274
+
275
+
276
+
277
+ # restore attributes
278
+ def restore(self,
279
+ filename,
280
+ ):
281
+ r"""
282
+ Load pre-trained model
283
+
284
+ Parameters:
285
+ filename (str):
286
+ filename tag (without suffix) where model was saved
287
+ """
288
+ # load attributes
289
+ f = open(filename + ".pkl", 'rb')
290
+ self.W_, self.b_, self.alphas_, self.betas_, \
291
+ self.parameters_mean_, self.parameters_std_, \
292
+ self.pca_mean_, self.pca_std_, \
293
+ self.features_mean_, self.features_std_, \
294
+ self.parameters, self.n_parameters, \
295
+ self.modes, self.n_modes, \
296
+ self.n_pcas, self.pca_transform_matrix_, \
297
+ self.n_hidden, self.n_layers, self.architecture = pickle.load(f)
298
+ f.close()
299
+
300
+
301
+
302
+
303
+ # auxiliary function to sort input parameters
304
+ def dict_to_ordered_arr_np(self,
305
+ input_dict,
306
+ ):
307
+ r"""
308
+ Sort input parameters
309
+
310
+ Parameters:
311
+ input_dict (dict [numpy.ndarray]):
312
+ input dict of (arrays of) parameters to be sorted
313
+
314
+ Returns:
315
+ numpy.ndarray:
316
+ parameters sorted according to desired order
317
+ """
318
+ if self.parameters is not None:
319
+ return np.stack([input_dict[k] for k in self.parameters], axis=1)
320
+ else:
321
+ return np.stack([input_dict[k] for k in input_dict], axis=1)
322
+
323
+
324
+ # forward prediction given input parameters implemented in Numpy
325
+ def forward_pass_np(self,
326
+ parameters_arr,
327
+ ):
328
+ r"""
329
+ Forward pass through the network to predict the output,
330
+ fully implemented in Numpy
331
+
332
+ Parameters:
333
+ parameters_arr (numpy.ndarray):
334
+ array of input parameters
335
+
336
+ Returns:
337
+ numpy.ndarray:
338
+ output predictions
339
+ """
340
+ # forward pass through the network
341
+ act = []
342
+ layers = [(parameters_arr - self.parameters_mean_)/self.parameters_std_]
343
+ for i in range(self.n_layers-1):
344
+
345
+ # linear network operation
346
+ act.append(np.dot(layers[-1], self.W_[i]) + self.b_[i])
347
+
348
+ # pass through activation function
349
+ layers.append((self.betas_[i] + (1.-self.betas_[i])*1./(1.+np.exp(-self.alphas_[i]*act[-1])))*act[-1])
350
+
351
+ # final (linear) layer -> (normalized) PCA coefficients
352
+ layers.append(np.dot(layers[-1], self.W_[-1]) + self.b_[-1])
353
+
354
+ # rescale PCA coefficients, multiply out PCA basis -> normalised (log)-spectrum, shift and re-scale (log)-spectrum -> output (log)-spectrum
355
+ return np.dot(layers[-1]*self.pca_std_ + self.pca_mean_, self.pca_transform_matrix_)*self.features_std_ + self.features_mean_
356
+
357
+
358
+ def predictions_np(self,
359
+ parameters_dict,
360
+ ):
361
+ r"""
362
+ Predictions given input parameters collected in a dict.
363
+ Fully implemented in Numpy. Calls ``forward_pass_np``
364
+ after ordering the input parameter dict
365
+
366
+ Parameters:
367
+ parameters_dict (dict [numpy.ndarray]):
368
+ dictionary of (arrays of) parameters
369
+
370
+ Returns:
371
+ numpy.ndarray:
372
+ output predictions
373
+ """
374
+ parameters_arr = self.dict_to_ordered_arr_np(parameters_dict)
375
+ return self.forward_pass_np(parameters_arr)
376
+
377
+
378
+ # 10.**predictions
379
+ def ten_to_predictions_np(self,
380
+ parameters_dict,
381
+ ):
382
+ r"""
383
+ 10^predictions given input parameters collected in a dict.
384
+ Fully implemented in Numpy. It raises 10 to the output
385
+ from ``forward_pass_np``
386
+
387
+ Parameters:
388
+ parameters_dict (dict [numpy.ndarray]):
389
+ dictionary of (arrays of) parameters
390
+
391
+ Returns:
392
+ numpy.ndarray:
393
+ 10^output predictions
394
+ """
395
+ return 10.**self.predictions_np(parameters_dict)
@@ -0,0 +1,10 @@
1
+ import warnings
2
+ from contextlib import contextmanager
3
+
4
+ @contextmanager
5
+ def suppress_warnings():
6
+ warnings.filterwarnings("ignore")
7
+ try:
8
+ yield
9
+ finally:
10
+ warnings.resetwarnings()
classy_szfast/utils.py ADDED
@@ -0,0 +1,62 @@
1
+ import numpy as np
2
+ from datetime import datetime
3
+ import multiprocessing
4
+ import time
5
+ import functools
6
+ import re
7
+ from pkg_resources import resource_filename
8
+ import os
9
+ from scipy import optimize
10
+ from scipy.integrate import quad
11
+ from scipy.interpolate import interp1d
12
+ import math
13
+ from numpy import linalg as LA
14
+ import mcfit
15
+ from mcfit import P2xi
16
+ # import cosmopower
17
+ # import classy_sz as csz
18
+
19
+
20
+
21
+ from scipy.interpolate import LinearNDInterpolator
22
+ from scipy.interpolate import CloughTocher2DInterpolator
23
+
24
+ kb = 1.38064852e-23 #m2 kg s-2 K-1
25
+ clight = 299792458. #m/s
26
+ hplanck=6.62607004e-34 #m2 kg / s
27
+ firas_T0 = 2.728 #pivot temperature used in the Max Lkl Analysis
28
+ firas_T0_bf = 2.725 #best-fitting temperature
29
+
30
+ Tcmb_uk = 2.7255e6
31
+
32
+ G_newton = 6.674e-11
33
+ rho_crit_over_h2_in_GeV_per_cm3 = 1.0537e-5
34
+
35
+
36
+ nu_21_cm_in_GHz = 1./21.1*clight*1.e2/1.e9
37
+ x_21_cm = hplanck*nu_21_cm_in_GHz/kb/firas_T0_bf*1.e9
38
+
39
+ kappa_c = 2.1419 # 4M_2-3M_c see below eq. 9b of https://arxiv.org/pdf/1506.06582.pdf
40
+
41
+ beta_mu = 2.1923
42
+
43
+ G1 = np.pi**2./6
44
+ G2 = 2.4041
45
+ G3 = np.pi**4/15.
46
+ a_rho = G2/G3
47
+ alpha_mu = 2.*G1/3./G2 # = 1/beta_mu = π^2/18ζ(3) see eq. 4.15 CUSO lectures.
48
+
49
+ z_mu_era = 3e5
50
+ z_y_era = 5e4
51
+ z_reio_min = 6
52
+ z_reio_max = 25
53
+ z_recombination_min = 800
54
+ z_recombination_max = 1500
55
+
56
+ # Physical constants
57
+ # ------------------
58
+ # Light speed
59
+ class Const:
60
+ c_km_s = 299792.458 # speed of light
61
+ h_J_s = 6.626070040e-34 # Planck's constant
62
+ kB_J_K = 1.38064852e-23 # Boltzmann constant
@@ -0,0 +1,15 @@
1
+ Metadata-Version: 2.1
2
+ Name: classy_szfast
3
+ Version: 0.0.0
4
+ Summary: The accelerator of the class_sz code from https://github.com/CLASS-SZ
5
+ Maintainer-email: Boris Bolliet <bb667@cam.ac.uk>
6
+ Project-URL: Homepage, https://github.com/CLASS-SZ
7
+ Project-URL: GitHub, https://github.com/CLASS-SZ
8
+ Description-Content-Type: text/markdown
9
+ Requires-Dist: numpy >=1.19.0
10
+ Requires-Dist: Cython >=0.29.21
11
+ Requires-Dist: tensorflow
12
+ Requires-Dist: cosmopower
13
+ Requires-Dist: mcfit
14
+ Requires-Dist: get-cosmopower-emus
15
+
@@ -0,0 +1,18 @@
1
+ classy_szfast/__init__.py,sha256=E2thrL0Z9oXFfdzwcsu-xbOytudLFTlRlPqVFGlPPPg,279
2
+ classy_szfast/classy_sz.py,sha256=QmbwrSXInQLMvCDqsr7KPmtaU0KOiOt1Rb-cTKuulZw,22240
3
+ classy_szfast/classy_szfast.py,sha256=A06tLt_Slxd5TvSQLHaqmX2-Z0aI6p3nWo6jEQ2oWeM,33748
4
+ classy_szfast/config.py,sha256=4CvejtLcFOQR30bJ8tlEeBHhu3Rr7LakeLO6dbFgPSU,210
5
+ classy_szfast/cosmopower.py,sha256=eym72TFAcSJSTUlrwD-sAg8_9e2GdZq0m3lLPQ7uvPU,9858
6
+ classy_szfast/cosmosis_classy_szfast_interface.py,sha256=zAnxvFtn73a5yS7jgs59zpWFEYKCIQyraYPs5hQ4Le8,11483
7
+ classy_szfast/pks_and_sigmas.py,sha256=drtuujE1HhlrYY1hY92DyY5lXlYS1uE15MSuVI4uo6k,6625
8
+ classy_szfast/restore_nn.py,sha256=OyxaRRk9D4hOJTvUSY3c5wAWTPCZJRMxBtin4kq_xd0,14149
9
+ classy_szfast/suppress_warnings.py,sha256=6wIBml2Sj9DyRGZlZWhuA9hqvpxqrNyYjuz6BPK_a6E,202
10
+ classy_szfast/utils.py,sha256=VdaRsJK2ttHI9zkyxVhergxHPC6t99usrlycblyqcP8,1464
11
+ classy_szfast/custom_bias/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ classy_szfast/custom_bias/custom_bias.py,sha256=aR2t5RTIwv7P0m2bsEU0Eq6BTkj4pG10AebH6QpG4qM,486
13
+ classy_szfast/custom_profiles/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ classy_szfast/custom_profiles/custom_profiles.py,sha256=4LZwb2XoqwCyWNmW2s24Z7AJdmgVdaRG7yYaBYe-d9Q,1188
15
+ classy_szfast-0.0.0.dist-info/METADATA,sha256=7upoCRlu-ECxV5fe7aZtRlsDxgwVoi3sOFKcRfNjzzA,497
16
+ classy_szfast-0.0.0.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
17
+ classy_szfast-0.0.0.dist-info/top_level.txt,sha256=hRgqpilUck4lx2KkaWI2y9aCDKqF6pFfGHfNaoPFxv0,14
18
+ classy_szfast-0.0.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (71.1.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ classy_szfast