ler 0.4.1__py3-none-any.whl → 0.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ler might be problematic. Click here for more details.

Files changed (35) hide show
  1. ler/__init__.py +26 -26
  2. ler/gw_source_population/__init__.py +1 -0
  3. ler/gw_source_population/cbc_source_parameter_distribution.py +1076 -818
  4. ler/gw_source_population/cbc_source_redshift_distribution.py +619 -295
  5. ler/gw_source_population/jit_functions.py +484 -9
  6. ler/gw_source_population/sfr_with_time_delay.py +107 -0
  7. ler/image_properties/image_properties.py +44 -13
  8. ler/image_properties/multiprocessing_routine.py +5 -209
  9. ler/lens_galaxy_population/__init__.py +2 -0
  10. ler/lens_galaxy_population/epl_shear_cross_section.py +0 -0
  11. ler/lens_galaxy_population/jit_functions.py +101 -9
  12. ler/lens_galaxy_population/lens_galaxy_parameter_distribution.py +817 -885
  13. ler/lens_galaxy_population/lens_param_data/density_profile_slope_sl.txt +5000 -0
  14. ler/lens_galaxy_population/lens_param_data/external_shear_sl.txt +2 -0
  15. ler/lens_galaxy_population/lens_param_data/number_density_zl_zs.txt +48 -0
  16. ler/lens_galaxy_population/lens_param_data/optical_depth_epl_shear_vd_ewoud.txt +48 -0
  17. ler/lens_galaxy_population/mp copy.py +554 -0
  18. ler/lens_galaxy_population/mp.py +736 -138
  19. ler/lens_galaxy_population/optical_depth.py +2248 -616
  20. ler/rates/__init__.py +1 -2
  21. ler/rates/gwrates.py +129 -75
  22. ler/rates/ler.py +257 -116
  23. ler/utils/__init__.py +2 -0
  24. ler/utils/function_interpolation.py +322 -0
  25. ler/utils/gwsnr_training_data_generator.py +233 -0
  26. ler/utils/plots.py +1 -1
  27. ler/utils/test.py +1078 -0
  28. ler/utils/utils.py +553 -125
  29. {ler-0.4.1.dist-info → ler-0.4.3.dist-info}/METADATA +22 -9
  30. ler-0.4.3.dist-info/RECORD +34 -0
  31. {ler-0.4.1.dist-info → ler-0.4.3.dist-info}/WHEEL +1 -1
  32. ler/rates/ler copy.py +0 -2097
  33. ler-0.4.1.dist-info/RECORD +0 -25
  34. {ler-0.4.1.dist-info → ler-0.4.3.dist-info/licenses}/LICENSE +0 -0
  35. {ler-0.4.1.dist-info → ler-0.4.3.dist-info}/top_level.txt +0 -0
@@ -5,10 +5,29 @@ This module contains various functions use for simulating GW source population.
5
5
 
6
6
  import numpy as np
7
7
  from numba import njit, jit
8
+ from scipy.interpolate import CubicSpline
8
9
  from astropy.cosmology import LambdaCDM
9
10
  cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
11
+ from scipy.integrate import quad
12
+ from scipy.optimize import fsolve
10
13
 
11
- from ler.utils import inverse_transform_sampler
14
+ from ..utils import inverse_transform_sampler, sample_from_powerlaw_distribution
15
+
16
+ @njit
17
+ def cumulative_trapezoid(y, x=None, dx=1.0, initial=0.0):
18
+ """
19
+ Compute the cumulative integral of a function using the trapezoidal rule.
20
+ """
21
+ if x is None:
22
+ x = np.arange(len(y)) * dx
23
+
24
+ # Calculate the cumulative integral using trapezoidal rule
25
+ cumsum = np.zeros_like(y)
26
+ cumsum[0] = initial
27
+ for i in range(1, len(y)):
28
+ cumsum[i] = cumsum[i - 1] + (y[i - 1] + y[i]) * (x[i] - x[i - 1]) / 2.0
29
+
30
+ return cumsum
12
31
 
13
32
  # import pickle
14
33
  # # call the interpolator
@@ -30,7 +49,7 @@ def sample_source_redshift(size, zs_inv_cdf=None):
30
49
 
31
50
  @njit
32
51
  def merger_rate_density_bbh_popI_II_oguri2018(
33
- zs, R0=23.9 * 1e-9, b2=1.6, b3=2.0, b4=30,
52
+ zs, R0=23.9 * 1e-9, b2=1.6, b3=2.1, b4=30,
34
53
  ):
35
54
  """
36
55
  Function to compute the merger rate density (PopI/PopII). Reference: Oguri et al. (2018). The output is in detector frame and is unnormalized.
@@ -47,7 +66,7 @@ def merger_rate_density_bbh_popI_II_oguri2018(
47
66
  default: 1.6
48
67
  b3 : `float`
49
68
  Fitting paramters
50
- default: 2.0
69
+ default: 2.1
51
70
  b4 : `float`
52
71
  Fitting paramters
53
72
  default: 30
@@ -107,12 +126,69 @@ def merger_rate_density_bbh_popIII_ken2022(zs, n0=19.2 * 1e-9, aIII=0.66, bIII=0
107
126
  / (bIII + aIII * np.exp((aIII + bIII) * (zs - zIII)))
108
127
  )
109
128
 
129
+ # @njit
130
+ def sfr_madau_fragos2017_with_bbh_td(zs, R0=23.9 * 1e-9):
131
+ """
132
+ """
133
+
134
+ rm = np.array([1.00304765, 1.00370075, 1.00449545, 1.00546251, 1.00663937, 1.00807168, 1.00981505, 1.01193727, 1.01046483, 1.01359803, 1.01741386, 1.02206193, 1.02772495, 1.03462601, 1.04303746, 1.05329142, 1.07093106, 1.08624215, 1.10489848, 1.12760683,1.15519183, 1.18858451, 1.22878158, 1.27676494, 1.33727882, 1.40335222, 1.47956936, 1.56759515, 1.6711375 , 1.79690371, 1.95410462, 2.15201042, 2.36151109, 2.66742932, 3.04354598, 3.49048755, 3.98122536, 4.42347511, 4.61710896, 4.30190679, 3.50890876, 2.37699066, 1.41830834, 0.77944771,0.40667706, 0.20463758, 0.09975143, 0.04745116])
135
+ zs_ = np.geomspace(0.001, 10, 48)
136
+
137
+ spline = CubicSpline(zs_, rm, extrapolate=True)
138
+ SFR = spline(zs)*R0 # in Mpc^-3 yr^-1
139
+ return SFR
140
+
141
+ # @njit
142
+ def sfr_madau_dickinson2014_with_bbh_td(zs, R0=23.9 * 1e-9):
143
+ """
144
+ """
145
+
146
+ rm = np.array([1.00292325, 1.0035494 , 1.0043112 , 1.00523807, 1.0063658 , 1.00773798, 1.00940767, 1.01143948, 1.00997839, 1.01297699, 1.01662662, 1.02106895, 1.02647649, 1.03305927, 1.04107277, 1.05082743, 1.06802831, 1.08255749, 1.10022606, 1.12169013, 1.14772134, 1.1792097 , 1.21715406, 1.26263694, 1.32051095, 1.38462461, 1.45997648, 1.54851567, 1.65349288, 1.78046645, 1.93811129, 2.1354612 , 2.34086287, 2.63664802, 2.98892341, 3.38353439, 3.76990612, 4.03489696, 4.00806904, 3.56766897, 2.86966689, 2.01282062, 1.29696347, 0.78913584, 0.46166281, 0.26226345, 0.14509118, 0.07854392])
147
+ zs_ = np.geomspace(0.001, 10, 48)
148
+
149
+ spline = CubicSpline(zs_, rm, extrapolate=True)
150
+ SFR = spline(zs)*R0 # in Mpc^-3 yr^-1
151
+ return SFR
152
+
153
+ # @njit
154
+ def sfr_madau_fragos2017_with_bns_td(zs, R0=105.5 * 1e-9):
155
+ """
156
+ """
157
+
158
+ rm = np.array([1.00309364, 1.00375139, 1.00455175, 1.00552568, 1.00671091, 1.00815339, 1.00990912, 1.01204635, 1.00757017, 1.01071962, 1.01455507, 1.01922677, 1.02491815, 1.03185311, 1.04030479, 1.05060602, 1.06970166, 1.08508957, 1.10382838, 1.12661829, 1.15427005, 1.18768774, 1.22781836, 1.27555711, 1.31791484, 1.38209039, 1.4555543 , 1.5397332 , 1.63806934, 1.75685668, 1.90448546, 2.08862044, 2.34440211, 2.63899295, 2.99729389, 3.41567274, 3.86324106, 4.24545603, 4.37018218, 4.00555831, 3.10525751, 2.06354992, 1.20906304, 0.65233811, 0.33356891, 0.16397688, 0.08024945, 0.036953])
159
+ zs_ = np.geomspace(0.001, 10, 48)
160
+
161
+ spline = CubicSpline(zs_, rm, extrapolate=True)
162
+ SFR = spline(zs)*R0 # in Mpc^-3 yr^-1
163
+ return SFR
164
+
165
+ # @njit
166
+ def sfr_madau_dickinson2014_with_bns_td(zs, R0=105.5 * 1e-9):
167
+ """
168
+ """
169
+
170
+ rm = np.array([1.0029945 , 1.00362259, 1.00438674, 1.00531645, 1.00644763, 1.00782396, 1.00949865, 1.01153645, 1.00240992, 1.00539605, 1.00903013, 1.01345293, 1.01883579, 1.02538714, 1.03336026, 1.04306247, 1.05841698, 1.07283625, 1.09035966, 1.11162909, 1.1373949 , 1.16851509, 1.20594024, 1.25068092, 1.3085267 , 1.37111306, 1.44421094, 1.52948237, 1.62985636, 1.75058453, 1.90010572, 2.0870216 , 2.33573104, 2.6218286 , 2.96031682, 3.3343522 , 3.69149889, 3.92099769, 3.86227814, 3.40811745, 2.59314381, 1.79588097, 1.14260538, 0.686002 , 0.3954134 , 0.22083291, 0.11548455, 0.06064368])
171
+ zs_ = np.geomspace(0.001, 10, 48)
172
+
173
+ spline = CubicSpline(zs_, rm, extrapolate=True)
174
+ SFR = spline(zs)*R0 # in Mpc^-3 yr^-1
175
+ return SFR
176
+
177
+
178
+ @njit
179
+ def sfr_madau_fragos2017(zs, a=0.01, b=2.6, c=3.2, d=6.2):
180
+ """
181
+ https://arxiv.org/pdf/1606.07887.pdf
182
+ """
183
+
184
+ return a * (1+zs)**b / (1 + ((1+zs)/c)**d) # [Msun yr-1 Mpc-3]
185
+
110
186
  @njit
111
- def star_formation_rate_madau_dickinson2014(
112
- zs, af=2.7, bf=5.6, cf=2.9,
187
+ def sfr_madau_dickinson2014(
188
+ zs, a=0.015, b=2.7, c=2.9, d=5.6,
113
189
  ):
114
190
  """
115
- Function to compute star formation rate as given in Eqn. 15 Madau & Dickinson (2014). The output is in detector frame and is unnormalized.
191
+ Function to compute star formation rate as given in Eqn. 15 Madau & Dickinson (2014). The output is in detector frame and is unnormalized. https://arxiv.org/pdf/1403.0007
116
192
 
117
193
  Parameters
118
194
  ----------
@@ -135,12 +211,12 @@ def star_formation_rate_madau_dickinson2014(
135
211
 
136
212
  Examples
137
213
  ----------
138
- >>> from ler.gw_source_population import star_formation_rate_madau_dickinson2014
139
- >>> rate_density = star_formation_rate_madau_dickinson2014(zs=0.1)
214
+ >>> from ler.gw_source_population import sfr_madau_dickinson2014
215
+ >>> rate_density = sfr_madau_dickinson2014(zs=0.1)
140
216
  """
141
217
 
142
218
  # rate density
143
- return 0.015 * (1 + zs) ** af / (1 + ((1 + zs) / cf) ** bf)
219
+ return a * (1 + zs) ** b / (1 + ((1 + zs) / c) ** d) # [Msun yr-1 Mpc-3]
144
220
 
145
221
 
146
222
  # @jit
@@ -295,3 +371,402 @@ def inverse_transform_sampler_m1m2(size, inv_cdf, x):
295
371
  m1[idx], m2[idx] = m2[idx], m1[idx]
296
372
 
297
373
  return m1, m2
374
+
375
+ @njit
376
+ def erf(x):
377
+ # Constants for the approximation
378
+ p = 0.3275911
379
+ a1 = 0.254829592
380
+ a2 = -0.284496736
381
+ a3 = 1.421413741
382
+ a4 = -1.453152027
383
+ a5 = 1.061405429
384
+
385
+ # Save the sign of x
386
+ sign = np.sign(x)
387
+ x = abs(x)
388
+
389
+ # A&S formula 7.1.26 given in Handbook of Mathematical Functions
390
+ t = 1.0 / (1.0 + p * x)
391
+ y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * np.exp(-x * x)
392
+
393
+ return sign * y
394
+
395
+ @njit
396
+ def compute_normalization_factor(mu, sigma, mmin, mmax):
397
+ part1 = (mmax - mu) / (np.sqrt(2) * sigma)
398
+ part2 = (mmin - mu) / (np.sqrt(2) * sigma)
399
+ N = np.sqrt(2 * np.pi) * sigma * (0.5 * (erf(part1) - erf(part2)))
400
+ return N
401
+
402
+ @njit
403
+ def bns_bimodal_pdf(m, w=0.643, muL=1.352, sigmaL=0.08, muR=1.88, sigmaR=0.3, mmin=1.0, mmax=2.3,):
404
+
405
+ # left peak
406
+ pdf_unnormL = np.exp(-((m - muL) ** 2) / (2 * sigmaL**2))
407
+ normL = compute_normalization_factor(muL, sigmaL, mmin, mmax) # normalization constant
408
+ # right peak
409
+ pdf_unnormR = np.exp(-((m - muR) ** 2) / (2 * sigmaR**2))
410
+ normR = compute_normalization_factor(muR, sigmaR, mmin, mmax)
411
+ # total pdf
412
+ pdf = w * pdf_unnormL / normL + (1 - w) * pdf_unnormR / normR
413
+
414
+ return pdf
415
+
416
+ @njit
417
+ def smoothing_S(m, mmin, delta_m, threshold=709.0):
418
+ s = np.zeros_like(m)
419
+
420
+ # Region where smoothing is not needed: m >= mmin + delta_m
421
+ idx_2 = m >= mmin + delta_m
422
+ s[idx_2] = 1.0
423
+
424
+ # Region where smoothing is applied: mmin <= m < mmin + delta_m
425
+ idx_1 = (m >= mmin) & (m < mmin + delta_m)
426
+ mprime = m[idx_1] - mmin
427
+ exponent = delta_m / mprime + delta_m / (mprime - delta_m)
428
+
429
+ # Safe exponentiation only where the exponent is below threshold
430
+ safe_idx = exponent <= threshold
431
+ s_vals = np.zeros_like(mprime)
432
+ s_vals[safe_idx] = 1.0 / (np.exp(exponent[safe_idx]) + 1.0)
433
+
434
+ # Assign back to main array
435
+ s[idx_1] = s_vals
436
+
437
+ return s
438
+
439
+ @njit
440
+ def powerlaw_with_smoothing(m, mmin, alpha, delta_m):
441
+ """
442
+ Power law with smoothing applied.
443
+ """
444
+ s = smoothing_S(m, mmin, delta_m)
445
+ return m ** (-alpha) * s
446
+
447
+ @njit
448
+ def inverse_transform_sampler(size, cdf, x):
449
+ """
450
+ Function to sample from the inverse transform method.
451
+ """
452
+
453
+ u = np.random.uniform(0, 1, size)
454
+ idx = np.searchsorted(cdf, u)
455
+ x1, x0, y1, y0 = cdf[idx], cdf[idx-1], x[idx], x[idx-1]
456
+ samples = y0 + (y1 - y0) * (u - x0) / (x1 - x0)
457
+ return samples
458
+
459
+ @njit
460
+ def broken_powerlaw_cdf(size=1000, mminbh=26,mmaxbh=125,alpha_1=6.75,alpha_2=0.0,b=0.5,delta_m=5):
461
+
462
+ # find normalization
463
+ m_try = np.linspace(mminbh, mmaxbh, size)
464
+ pdf_unnormalized = broken_powerlaw_unormalized(m_try, mminbh=26., mmaxbh=125., alpha_1=6.75, alpha_2=0., b=0.5, delta_m=5.)
465
+ # Compute the CDF using cumulative trapezoid integration
466
+ cdf_values = cumulative_trapezoid(y=pdf_unnormalized, x=m_try, dx=1.0, initial=0.0)
467
+ # Normalize the CDF
468
+ normalization = cdf_values[size-1]
469
+ # Normalize the CDF
470
+ cdf_values /= normalization
471
+
472
+ return cdf_values
473
+
474
+ @njit
475
+ def sample_broken_powerlaw(size=1000, mminbh=26., mmaxbh=125., alpha_1=6.75, alpha_2=0., b=0.5, delta_m=5., normalization_size=1000):
476
+ """
477
+ Generates samples from the broken powerlaw distribution.
478
+ """
479
+ # Generate the CDF
480
+ cdf_values = broken_powerlaw_cdf(size=normalization_size, mminbh=mminbh, mmaxbh=mmaxbh, alpha_1=alpha_1, alpha_2=alpha_2, b=b, delta_m=delta_m)
481
+
482
+ x = np.linspace(mminbh, mmaxbh, normalization_size)
483
+ idx = np.argwhere(cdf_values > 0)[0][0]
484
+ cdf_values = cdf_values[idx:]
485
+ x = x[idx:]
486
+ samples = inverse_transform_sampler(size, cdf_values, x)
487
+
488
+ return samples
489
+
490
+ @njit
491
+ def sample_broken_powerlaw_nsbh_masses(size=1000, mminbh=26., mmaxbh=125., alpha_1=6.75, alpha_2=0., b=0.5, delta_m=5., mminns=1.0, mmaxns=3.0, alphans=0.0, normalization_size=1000):
492
+ """
493
+ Generates samples from the broken powerlaw distribution for NSBH masses.
494
+ """
495
+ # Sample mass 1
496
+ m1_samples = sample_broken_powerlaw(size=size, mminbh=mminbh, mmaxbh=mmaxbh, alpha_1=alpha_1, alpha_2=alpha_2, b=b, delta_m=delta_m, normalization_size=normalization_size)
497
+
498
+ # Sample mass 2 (NS mass)
499
+ # inverse transform sampling from a power-law distribution, with a minimum and maximum mass
500
+ m2_samples = sample_from_powerlaw_distribution(size, alphans, mminns, mmaxns)
501
+
502
+ return m1_samples, m2_samples
503
+
504
+
505
+ @njit
506
+ def broken_powerlaw_pdf(m, mminbh=26., mmaxbh=125., alpha_1=6.75, alpha_2=0., b=0.5, delta_m=5., normalization_size=1000):
507
+ """
508
+ Generates samples using a Numba-jitted loop for high performance.
509
+ """
510
+ # find normalization
511
+ m_try = np.linspace(mminbh, mmaxbh, normalization_size)
512
+ pdf_unnormalized = broken_powerlaw_unormalized(m_try, mminbh=26., mmaxbh=125., alpha_1=6.75, alpha_2=0., b=0.5, delta_m=5.)
513
+ # Normalize the PDF
514
+ normalization = np.trapz(pdf_unnormalized, m_try)
515
+
516
+ # Generate the PDF for the input mass array
517
+ pdf_unnormalized = broken_powerlaw_unormalized(m, mminbh=mminbh, mmaxbh=mmaxbh, alpha_1=alpha_1, alpha_2=alpha_2, b=b, delta_m=delta_m)
518
+ # Normalize the PDF
519
+ pdf = pdf_unnormalized / normalization
520
+
521
+ return pdf
522
+
523
+ @njit
524
+ def broken_powerlaw_unormalized(m, mminbh=26., mmaxbh=125., alpha_1=6.75, alpha_2=0., b=0.5, delta_m=5.):
525
+ """
526
+ Probability density function for the broken powerlaw model.
527
+ """
528
+ mbreak = mminbh + b * (mmaxbh - mminbh)
529
+ idx_1 = (m > mminbh) & (m < mbreak)
530
+ idx_2 = (m >= mbreak) & (m < mmaxbh)
531
+
532
+ pdf_unnormalized = np.zeros_like(m)
533
+ pdf_unnormalized[idx_1] = powerlaw_with_smoothing(m[idx_1], mminbh, alpha_1, delta_m) # m[idx_1] ** (-alpha_1) * smoothing_S(m[idx_1], mminbh, delta_m)
534
+ norm_1 = pdf_unnormalized[idx_1][np.sum(idx_1)-1]
535
+ pdf_unnormalized[idx_2] = powerlaw_with_smoothing(m[idx_2], mminbh, alpha_2, delta_m)
536
+ # (m[idx_2] ** (-alpha_2)* smoothing_S(m[idx_2], mminbh, delta_m))
537
+ norm_2 = pdf_unnormalized[idx_2][0]
538
+ pdf_unnormalized[idx_2] = pdf_unnormalized[idx_2] * (norm_1 / norm_2)
539
+
540
+ return pdf_unnormalized
541
+
542
+ @njit
543
+ def powerlaw_B(m, alpha, mminbh, mmaxbh):
544
+ """
545
+ normalised power-law distribution with spectral index -alpha and cut-off mmaxbh
546
+ """
547
+
548
+ normalization = (mmaxbh ** (-alpha + 1)) / (-alpha + 1) - (mminbh ** (-alpha + 1)) / (-alpha + 1)
549
+ pdf = m ** (-alpha) / normalization
550
+ return pdf
551
+
552
+ @njit
553
+ def gaussian_G(m, mu_g, sigma_g):
554
+ """
555
+ Gaussian distribution with mean mu_g and standard deviation sigma_g.
556
+ """
557
+ normalization = 1.0 / (sigma_g * np.sqrt(2 * np.pi))
558
+ exponent = -0.5 * ((m - mu_g) / sigma_g) ** 2
559
+ pdf = normalization * np.exp(exponent)
560
+ return pdf
561
+
562
+ @njit
563
+ def powerlaw_gaussian_pdf(m, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m, normalization_size=1000):
564
+ """
565
+ Calculate the PDF for the power-law Gaussian model.
566
+ """
567
+
568
+ # find normalization
569
+ m_try = np.linspace(mminbh, mmaxbh, normalization_size)
570
+ pdf_unnormalized = powerlaw_gaussian_unnormalized(
571
+ m_try, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m
572
+ )
573
+ normalization = np.trapz(pdf_unnormalized, m_try)
574
+
575
+ # calculate PDF
576
+ pdf_unnormalized = powerlaw_gaussian_unnormalized(
577
+ m, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m
578
+ )
579
+ pdf = pdf_unnormalized / normalization
580
+
581
+ return pdf
582
+
583
+ @njit
584
+ def powerlaw_gaussian_cdf(size, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m):
585
+ """
586
+ Sample from the power-law Gaussian model.
587
+ """
588
+ # find normalization
589
+ m_try = np.linspace(mminbh, mmaxbh, size)
590
+ pdf_unnormalized = powerlaw_gaussian_unnormalized(
591
+ m_try, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m
592
+ )
593
+ # Compute the CDF using cumulative trapezoid integration
594
+ cdf_values = cumulative_trapezoid(y=pdf_unnormalized, x=m_try, dx=1.0, initial=0.0)
595
+ # Normalize the CDF
596
+ normalization = cdf_values[size-1]
597
+ # Normalize the CDF
598
+ cdf_values /= normalization
599
+
600
+ return cdf_values
601
+
602
+ @njit
603
+ def sample_powerlaw_gaussian(size, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m, normalization_size=1000):
604
+ """
605
+ Sample from the power-law Gaussian model.
606
+ """
607
+ # Generate the CDF
608
+ cdf_values = powerlaw_gaussian_cdf(normalization_size, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m)
609
+
610
+ # Generate random samples from a uniform distribution
611
+ x = np.linspace(mminbh, mmaxbh, normalization_size)
612
+ idx = np.argwhere(cdf_values > 0)[0][0]
613
+ cdf_values = cdf_values[idx:]
614
+ x = x[idx:]
615
+ samples = inverse_transform_sampler(size, cdf_values, x)
616
+
617
+ return samples
618
+
619
+ @njit
620
+ def sample_powerlaw_gaussian_source_bbh_masses(size, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m, beta, normalization_size=1000):
621
+ """
622
+ Sample from the power-law Gaussian model for source masses.
623
+ """
624
+ # Sample mass 1
625
+ m1 = sample_powerlaw_gaussian(size, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m, normalization_size)
626
+
627
+ # Sample mass ratio q
628
+ # sample_mass_ratio(m1, mminbh, beta, delta_m) in for loop
629
+ q = np.zeros(size)
630
+ for i in range(size):
631
+ q[i] = sample_mass_ratio(m1[i], mminbh, beta, delta_m)
632
+
633
+ # Calculate mass 2
634
+ m2 = m1 * q
635
+
636
+ return m1, m2
637
+
638
+ @njit
639
+ def sample_mass_ratio(m1, mminbh, beta, delta_m):
640
+
641
+ qmin = mminbh / m1
642
+ pow_beta = beta + 1.0
643
+
644
+ while True:
645
+ u_q = np.random.rand()
646
+ # inverse transform sampling for mass ratio q
647
+ # where q follows a power-law distribution
648
+ q = (u_q * (1.0**pow_beta - qmin**pow_beta) + qmin**pow_beta)**(1.0 / pow_beta)
649
+ m2 = m1 * q
650
+ # apply the smoothing function to m2
651
+ s_m2 = smoothing_S(np.array([m2]), mminbh, delta_m)[0]
652
+ u_smooth = np.random.rand()
653
+ if u_smooth < s_m2:
654
+ break
655
+
656
+ return q
657
+
658
+ @njit
659
+ def powerlaw_gaussian_unnormalized(m, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m):
660
+ """
661
+ Calculate the unnormalized PDF for the power-law Gaussian model.
662
+ """
663
+
664
+ # pdf_unnormalized = ((1-lambda_peak) * powerlaw_B(m, alpha, mminbh, mmaxbh)) + 0*(lambda_peak * gaussian_G(m, mu_g, sigma_g)) * smoothing_S(m, mminbh, delta_m)
665
+
666
+ pdf_unnormalized = ((1-lambda_peak)*powerlaw_B(m, alpha, mminbh, mmaxbh) + (lambda_peak * gaussian_G(m, mu_g, sigma_g)))* smoothing_S(m, mminbh, delta_m)
667
+
668
+ return pdf_unnormalized
669
+
670
+
671
+ # def bns_bimodal_pdf_scipy(m, w=0.643, muL=1.352, sigmaL=0.08, muR=1.88, sigmaR=0.3, mmin=1.0, mmax=2.3,):
672
+
673
+ # mass_arr = np.linspace(mmin, mmax, 1000)
674
+ # # left and right peak
675
+ # pdf_unnormL = lambda m: np.exp(-((m - muL) ** 2) / (2 * sigmaL**2))
676
+ # normL = quad(pdf_unnormL, mmin, mmax)[0] # normalization constant
677
+ # pdf_unnormR = lambda m: np.exp(-((m - muR) ** 2) / (2 * sigmaR**2))
678
+ # normR = quad(pdf_unnormR, mmin, mmax)[0] # normalization constant
679
+ # # total pdf
680
+ # pdf = w * pdf_unnormL(m) / normL + (1 - w) * pdf_unnormR(m) / normR
681
+
682
+ # return pdf
683
+
684
+
685
+ ###### power law_gaussian functions without vectorization ######
686
+ # @njit
687
+ # def smoothing_S(m, mmin, delta_m):
688
+ # """
689
+ # A Numba-jitted smoothing function for the low-mass cutoff.
690
+ # """
691
+ # if m < mmin:
692
+ # return 0.0
693
+ # elif m < mmin + delta_m:
694
+ # mprime = m - mmin
695
+ # # This calculation is safe from division by zero due to the checks above
696
+ # exponent = delta_m / mprime + delta_m / (mprime - delta_m)
697
+
698
+ # # Prevent overflow warning, as learned from the previous step
699
+ # if exponent > 709.0:
700
+ # return 0.0
701
+
702
+ # f = np.exp(exponent)
703
+ # return 1.0 / (f + 1.0)
704
+ # else:
705
+ # return 1.0
706
+
707
+ # @njit
708
+ # def BBH_powerlaw_gaussian(size, mminbh, mmaxbh, alpha, mu_g, sigma_g, lambda_peak, delta_m, beta):
709
+ # """
710
+ # Generates samples using a Numba-jitted loop for high performance.
711
+ # """
712
+ # # Pre-allocate NumPy arrays to store the results. This is much faster
713
+ # # than appending to a list in a loop.
714
+ # m1_sample = np.empty(size, dtype=np.float64)
715
+ # m2_sample = np.empty(size, dtype=np.float64)
716
+
717
+ # accepted_count = 0
718
+
719
+ # # Pre-calculate powers outside the loop
720
+ # pow_alpha = 1.0 - alpha
721
+ # mmin_pow_alpha = mminbh**pow_alpha
722
+ # mmax_pow_alpha = mmaxbh**pow_alpha
723
+ # pow_beta = beta + 1.0
724
+
725
+ # # Use a 'while' loop to ensure we collect exactly size,
726
+ # # accounting for samples rejected by the smoothing function.
727
+ # while accepted_count < size:
728
+ # # 1. Sample m1
729
+ # # ----------------
730
+ # if np.random.rand() < lambda_peak:
731
+ # # Sample from Gaussian component
732
+ # m1_ = np.random.normal(mu_g, sigma_g)
733
+ # else:
734
+ # # Sample from Power-law component
735
+ # # FIX: The original code was missing this random number draw
736
+ # u_pl = np.random.rand()
737
+ # m1_ = (u_pl * (mmax_pow_alpha - mmin_pow_alpha) + mmin_pow_alpha)**(1.0 / pow_alpha)
738
+
739
+ # # 2. Apply smoothing rejection sampling for m1 and m2
740
+ # # ----------------------------------------------------
741
+ # # Draw a single random number for the joint smoothing check
742
+ # u_smooth = np.random.rand()
743
+
744
+ # s1 = smoothing_S(m1_, mminbh, delta_m)
745
+ # if u_smooth > s1:
746
+ # continue # Reject m1 and restart the loop
747
+
748
+ # # If m1 is accepted, sample m2
749
+ # qmin = mminbh / m1_
750
+
751
+ # # Rejection sampling loop for m2
752
+ # while True:
753
+ # u_q = np.random.rand()
754
+ # # Inverse transform sampling for mass ratio q
755
+ # q = (u_q * (1.0**pow_beta - qmin**pow_beta) + qmin**pow_beta)**(1.0 / pow_beta)
756
+ # m2_ = m1_ * q
757
+
758
+ # s_m2 = smoothing_S(m2_, mminbh, delta_m)
759
+
760
+ # # The acceptance of m2 depends on the same random number 'u_smooth'.
761
+ # # This logic is preserved from the original code.
762
+ # u_smooth = np.random.rand()
763
+ # if u_smooth <= s_m2:
764
+ # break # m2 is accepted, break the inner loop
765
+
766
+ # # 3. Store the accepted sample
767
+ # # -----------------------------
768
+ # m1_sample[accepted_count] = m1_
769
+ # m2_sample[accepted_count] = m2_
770
+ # accepted_count += 1
771
+
772
+ # return m1_sample, m2_sample
@@ -0,0 +1,107 @@
1
+ import numpy as np
2
+ from scipy.integrate import quad
3
+ from scipy.optimize import fsolve
4
+ from astropy.cosmology import LambdaCDM
5
+ cosmo = LambdaCDM(H0=70, Om0=0.3, Ode0=0.7)
6
+ # from scipy.interpolate import interp1d
7
+ from .jit_functions import sfr_madau_fragos2017
8
+
9
+ def sfr_with_time_delay(input_args):
10
+ """
11
+ Compute the star formation rate at redshift z, given parameters a, b, c, and d,
12
+ and cosmological parameters H0, Omega_M, and Omega_Lambda.
13
+ The star formation rate is time-delayed relative to the observed redshift,
14
+ with a time delay uniformly distributed between td_min and td_max.
15
+ The time delay is computed using the cosmology provided by astropy.
16
+
17
+ Parameters
18
+ ----------
19
+ input_args : list
20
+ z : float
21
+ observed redshift
22
+ idx : int
23
+ index of the galaxy
24
+ td_min : float
25
+ minimum time delay in Gyr
26
+ td_max : float
27
+ maximum time delay in Gyr
28
+ H0 : float
29
+ Hubble constant in km/s/Mpc
30
+ Omega_M : float
31
+ matter density parameter
32
+ Omega_Lambda : float
33
+ dark energy density parameter
34
+ a : float
35
+ parameter of the Madau-Fragos star formation rate
36
+ b : float
37
+ parameter of the Madau-Fragos star formation rate
38
+ c : float
39
+ parameter of the Madau-Fragos star formation rate
40
+ d : float
41
+ parameter of the Madau-Fragos star formation rate
42
+
43
+ Returns
44
+ -------
45
+ idx : int
46
+ index of the galaxy
47
+ result : float
48
+ star formation rate at observed redshift z
49
+ """
50
+ z = input_args[0]
51
+ idx = input_args[1]
52
+ td_min = input_args[2]
53
+ td_max = input_args[3]
54
+ H0 = input_args[4]
55
+ Omega_M = input_args[5]
56
+ Omega_Lambda = input_args[6]
57
+ a = input_args[7]
58
+ b = input_args[8]
59
+ c = input_args[9]
60
+ d = input_args[10]
61
+
62
+ def E(z_prime):
63
+ """Hubble parameter as a function of redshift."""
64
+ return np.sqrt(Omega_M * (1 + z_prime)**3 + Omega_Lambda)
65
+
66
+ def integrand(z_prime):
67
+ """Integrand for the equation."""
68
+ return 1 / (H0* (1 + z_prime) * E(z_prime)) * 977.813
69
+
70
+ def time_delay(zform, z):
71
+ """Time delay between formation and observation."""
72
+ integral, _ = quad(integrand, z, zform)
73
+ return integral
74
+
75
+ def equation_to_solve(zform, z, td):
76
+ """Equation to solve for zform."""
77
+ return td - time_delay(zform, z)
78
+
79
+ def find_zform(z, td):
80
+ """Find zform using grid search and linear interpolation."""
81
+ zform_solution = fsolve(equation_to_solve, z, args=(z, td))
82
+ return zform_solution
83
+
84
+ # montecalo integration
85
+ def integrand_rates(z, size=100000, zform_max=1000.):
86
+
87
+ td = np.random.uniform(td_min, td_max, size)
88
+ td_max_allowed = time_delay(zform_max, z)
89
+ # idx = td < td_max_allowed
90
+ idx = np.where(td < td_max_allowed)[0]
91
+ P_td = np.zeros_like(td)
92
+ # print('idx', idx)
93
+ # print('td', td)
94
+ # print('td_max_allowed', td_max_allowed)
95
+ P_td[idx] = 1/(np.log(td_max/td_min) * td[idx])
96
+
97
+ zform = np.zeros_like(td)
98
+ for idx_ in idx:
99
+ zform[idx_] = find_zform(z, td[idx_])
100
+
101
+ psi = sfr_madau_fragos2017(zform, a, b, c, d)
102
+
103
+ integral = 1/(td_max - td_min) * np.sum(P_td * psi)
104
+ return integral
105
+
106
+ result = integrand_rates(z)
107
+ return int(idx), result