cosmic-popsynth 3.6.2__cp313-cp313-macosx_14_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1252 @@
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (C) Katelyn Breivik (2017 - 2021)
3
+ #
4
+ # This file is part of cosmic.
5
+ #
6
+ # cosmic is free software: you can redistribute it and/or modify
7
+ # it under the terms of the GNU General Public License as published by
8
+ # the Free Software Foundation, either version 3 of the License, or
9
+ # (at your option) any later version.
10
+ #
11
+ # cosmic is distributed in the hope that it will be useful,
12
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+ # GNU General Public License for more details.
15
+ #
16
+ # You should have received a copy of the GNU General Public License
17
+ # along with cosmic. If not, see <http://www.gnu.org/licenses/>.
18
+
19
+ """`independent`
20
+ """
21
+
22
+ import numpy as np
23
+ import warnings
24
+ import pandas as pd
25
+
26
+ from cosmic import utils
27
+
28
+ from .sampler import register_sampler
29
+ from .. import InitialBinaryTable
30
+
31
+
32
+ __author__ = "Katelyn Breivik <katie.breivik@gmail.com>"
33
+ __credits__ = ("Scott Coughlin <scott.coughlin@ligo.org>, Michael Zevin <michael.j.zevin@gmail.com>, "
34
+ "Tom Wagg <tomjwagg@gmail.com>")
35
+ __all__ = ["get_independent_sampler", "Sample"]
36
+
37
+
38
+ def get_independent_sampler(
39
+ final_kstar1,
40
+ final_kstar2,
41
+ primary_model,
42
+ ecc_model,
43
+ porb_model,
44
+ SF_start,
45
+ SF_duration,
46
+ binfrac_model,
47
+ met,
48
+ size=None,
49
+ total_mass=np.inf,
50
+ sampling_target="size",
51
+ trim_extra_samples=False,
52
+ q_power_law=0,
53
+ **kwargs
54
+ ):
55
+ """Generates an initial binary sample according to user specified models
56
+
57
+ Parameters
58
+ ----------
59
+ final_kstar1 : `int or list`
60
+ Int or list of final kstar1
61
+
62
+ final_kstar2 : `int or list`
63
+ Int or list of final kstar2
64
+
65
+ primary_model : `str`
66
+ Model to sample primary mass; choices include: kroupa93, kroupa01, salpeter55, custom
67
+ if 'custom' is selected, must also pass arguemts:
68
+ alphas : `array`
69
+ list of power law indices
70
+ mcuts : `array`
71
+ breaks in the power laws.
72
+ e.g. alphas=[-1.3,-2.3,-2.3],mcuts=[0.08,0.5,1.0,150.] reproduces standard Kroupa2001 IMF
73
+
74
+ ecc_model : `str`
75
+ Model to sample eccentricity; choices include: thermal, uniform, sana12
76
+
77
+ porb_model : `str` or `dict`
78
+ Model to sample orbital period; choices include: log_uniform, sana12, renzo19, raghavan10, moe19, martinez26
79
+ or a custom power law distribution defined with a dictionary with keys "min", "max", and "slope"
80
+ (e.g. {"min": 0.15, "max": 0.55, "slope": -0.55}) would reproduce the Sana+2012 distribution
81
+
82
+ qmin : `float`
83
+ kwarg which sets the minimum mass ratio for sampling the secondary
84
+ where the mass ratio distribution is flat in q
85
+ if q > 0, qmin sets the minimum mass ratio
86
+ q = -1, this limits the minimum mass ratio to be set such that
87
+ the pre-MS lifetime of the secondary is not longer than the full
88
+ lifetime of the primary if it were to evolve as a single star.
89
+ Cannot be used in conjunction with m2_min
90
+
91
+ m_max : `float`
92
+ kwarg which sets the maximum primary and secondary mass for sampling
93
+ NOTE: this value changes the range of the IMF and should *not* be used
94
+ as a means of selecting certain kstar types!
95
+
96
+ m1_min : `float`
97
+ kwarg which sets the minimum primary mass for sampling
98
+ NOTE: this value changes the range of the IMF and should *not* be used
99
+ as a means of selecting certain kstar types!
100
+
101
+ m2_min : `float`
102
+ kwarg which sets the minimum secondary mass for sampling
103
+ the secondary as uniform in mass_2 between m2_min and mass_1
104
+ Cannot be used in conjunction with qmin
105
+
106
+ msort : `float`
107
+ Stars with M>msort can have different pairing and sampling of companions
108
+
109
+ qmin_msort : `float`
110
+ Same as qmin for M>msort
111
+
112
+ m2_min_msort : `float`
113
+ Same as m2_min for M>msort
114
+
115
+ SF_start : `float`
116
+ Time in the past when star formation initiates in Myr
117
+
118
+ SF_duration : `float`
119
+ Duration of constant star formation beginning from SF_Start in Myr
120
+
121
+ binfrac_model : `str or float`
122
+ Model for binary fraction; choices include: vanHaaften, offner23, or a fraction where 1.0 is 100% binaries
123
+
124
+ binfrac_model_msort : `str or float`
125
+ Same as binfrac_model for M>msort
126
+
127
+ met : `float`
128
+ Sets the metallicity of the binary population where solar metallicity is zsun
129
+
130
+ size : `int`
131
+ Size of the population to sample
132
+
133
+ total_mass : `float`
134
+ Total mass to use as a target for sampling
135
+
136
+ sampling_target : `str`
137
+ Which type of target to use for sampling (either "size" or "total_mass"), by default "size".
138
+ Note that `total_mass` must not be None when `sampling_target=="total_mass"`.
139
+
140
+ trim_extra_samples : `str`
141
+ Whether to trim the sampled population so that the total mass sampled is as close as possible to
142
+ `total_mass`. Ignored when `sampling_target==size`.
143
+ Note that given the discrete mass of stars, this could mean your sample is off by 300
144
+ solar masses in the worst case scenario (of a 150+150 binary being sampled). In reality the majority
145
+ of cases track the target total mass to within a solar mass.
146
+
147
+ zsun : `float`
148
+ optional kwarg for setting effective radii, default is 0.02
149
+
150
+ q_power_law : `float`
151
+ Exponent for the mass ratio distribution power law, default is 0 (flat in q). Note that
152
+ q_power_law cannot be exactly -1, as this would result in a divergent distribution.
153
+
154
+
155
+ Returns
156
+ -------
157
+ InitialBinaryTable : `pandas.DataFrame`
158
+ DataFrame in the format of the InitialBinaryTable
159
+
160
+ mass_singles : `float`
161
+ Total mass in single stars needed to generate population
162
+
163
+ mass_binaries : `float`
164
+ Total mass in binaries needed to generate population
165
+
166
+ n_singles : `int`
167
+ Number of single stars needed to generate a population
168
+
169
+ n_binaries : `int`
170
+ Number of binaries needed to generate a population
171
+ """
172
+ if sampling_target == "total_mass" and (total_mass is None or total_mass == np.inf):
173
+ raise ValueError("If `sampling_target == 'total mass'` then `total_mass` must be supplied")
174
+ if size is None and (total_mass is None or total_mass == np.inf):
175
+ raise ValueError("Either a sample `size` or `total_mass` must be supplied")
176
+ elif size is None:
177
+ size = int(total_mass)
178
+
179
+ if binfrac_model == 0.0 and sampling_target == "size":
180
+ raise ValueError(("If `binfrac_model == 0.0` then `sampling_target` must be 'total_mass'. Otherwise "
181
+ "you are targetting a population of `size` binaries but will never select any."))
182
+
183
+ # don't allow users to specify both a qmin and m2_min
184
+ if "qmin" in kwargs and "m2_min" in kwargs:
185
+ raise ValueError("You cannot specify both qmin and m2_min, please choose one or the other")
186
+
187
+ final_kstar1 = [final_kstar1] if isinstance(final_kstar1, (int, float)) else final_kstar1
188
+ final_kstar2 = [final_kstar2] if isinstance(final_kstar2, (int, float)) else final_kstar2
189
+ primary_min, primary_max, secondary_min, secondary_max = utils.mass_min_max_select(
190
+ final_kstar1, final_kstar2, **kwargs)
191
+ initconditions = Sample()
192
+
193
+ # set up multiplier if the mass sampling is inefficient
194
+ multiplier = 1
195
+
196
+ # track samples to actually return (after masks)
197
+ mass1_singles = []
198
+ mass1_binary = []
199
+ mass2_binary = []
200
+ binfrac = []
201
+
202
+ # track the total mass of singles and binaries sampled
203
+ m_sampled_singles = 0.0
204
+ m_sampled_binaries = 0.0
205
+
206
+ # track the total number of stars sampled
207
+ n_singles = 0
208
+ n_binaries = 0
209
+
210
+ # if porb_model = `moe19`, the binary fraction is fixed based on the metallicity
211
+ if porb_model == "moe19":
212
+ binfrac_model = utils.get_met_dep_binfrac(met)
213
+ warnings.warn('your supplied binfrac_model has been overwritten to {} match Moe+2019'.format(binfrac_model))
214
+
215
+ # define a function that evaluates whether you've reached your sampling target
216
+ target = lambda mass1_binary, size, m_sampled_singles, m_sampled_binaries, total_mass:\
217
+ len(mass1_binary) < size if sampling_target == "size" else m_sampled_singles + m_sampled_binaries < total_mass
218
+
219
+ # sample until you've reached your target
220
+ while target(mass1_binary, size, m_sampled_singles, m_sampled_binaries, total_mass):
221
+ # sample primary masses
222
+ mass1, _ = initconditions.sample_primary(primary_model, size=int(size * multiplier), **kwargs)
223
+
224
+ # split them into binaries or single stars
225
+ (mass1_binaries, mass_single, binfrac_binaries, binary_index,
226
+ ) = initconditions.binary_select(mass1, binfrac_model=binfrac_model, **kwargs)
227
+
228
+ # sample secondary masses for the single stars
229
+ mass2_binaries = initconditions.sample_secondary(mass1_binaries, q_power_law=q_power_law, **kwargs)
230
+
231
+ # check if this batch of samples will take us over our sampling target
232
+ if not target(mass1_binary, size,
233
+ m_sampled_singles + np.sum(mass_single),
234
+ m_sampled_binaries + np.sum(mass1_binaries) + np.sum(mass2_binaries),
235
+ total_mass) and trim_extra_samples and sampling_target == "total_mass":
236
+ # get the cumulative total mass of the samples
237
+ total_mass_list = np.copy(mass1)
238
+ total_mass_list[binary_index] += mass2_binaries
239
+ sampled_so_far = m_sampled_singles + m_sampled_binaries
240
+ cumulative_total_mass = sampled_so_far + np.cumsum(total_mass_list)
241
+
242
+ # find the boundary for reaching the right total mass
243
+ threshold_index = np.where(cumulative_total_mass > total_mass)[0][0]
244
+
245
+ keep_offset = abs(cumulative_total_mass[threshold_index] - total_mass)
246
+ drop_offset = abs(cumulative_total_mass[threshold_index - 1] - total_mass)
247
+ lim = threshold_index - 1 if (keep_offset > drop_offset) else threshold_index
248
+
249
+ # work out how many singles vs. binaries to delete
250
+ one_if_binary = np.zeros(len(mass1))
251
+ one_if_binary[binary_index] = 1
252
+ sb_delete = one_if_binary[lim + 1:]
253
+ n_single_delete = (sb_delete == 0).sum()
254
+ n_binary_delete = (sb_delete == 1).sum()
255
+
256
+ # delete em!
257
+ if n_single_delete > 0:
258
+ mass_single = mass_single[:-n_single_delete]
259
+ if n_binary_delete > 0:
260
+ mass1_binaries = mass1_binaries[:-n_binary_delete]
261
+ mass2_binaries = mass2_binaries[:-n_binary_delete]
262
+ binfrac_binaries = binfrac_binaries[:-n_binary_delete]
263
+
264
+ # ensure we don't loop again after this
265
+ target = lambda mass1_binary, size, m_sampled_singles, m_sampled_binaries, total_mass: False
266
+
267
+ # track the mass sampled
268
+ m_sampled_singles += sum(mass_single)
269
+ m_sampled_binaries += sum(mass1_binaries)
270
+ m_sampled_binaries += sum(mass2_binaries)
271
+
272
+ # track the total number sampled
273
+ n_singles += len(mass_single)
274
+ n_binaries += len(mass1_binaries)
275
+
276
+ # select out the primaries and secondaries that will produce the final kstars
277
+ ind_select = ( (mass1_binaries > primary_min)
278
+ & (mass1_binaries < primary_max)
279
+ & (mass2_binaries > secondary_min)
280
+ & (mass2_binaries < secondary_max))
281
+ mass1_binary.extend(mass1_binaries[ind_select])
282
+ mass2_binary.extend(mass2_binaries[ind_select])
283
+ binfrac.extend(binfrac_binaries[ind_select])
284
+
285
+ # select out the single stars that will produce the final kstar
286
+ mass1_singles.extend(mass_single[(mass_single > primary_min) & (mass_single < primary_max)])
287
+
288
+ # check to see if we should increase the multiplier factor to sample the population more quickly
289
+ if target(mass1_binary, size / 100, m_sampled_singles, m_sampled_binaries, total_mass / 100):
290
+ # well this sampling rate is clearly not working time to increase
291
+ # the multiplier by an order of magnitude
292
+ multiplier *= 10
293
+
294
+ mass1_binary = np.array(mass1_binary)
295
+ mass2_binary = np.array(mass2_binary)
296
+ binfrac = np.asarray(binfrac)
297
+ mass1_singles = np.asarray(mass1_singles)
298
+
299
+ zsun = kwargs.pop("zsun", 0.02)
300
+
301
+ rad1 = initconditions.set_reff(mass1_binary, metallicity=met, zsun=zsun)
302
+ rad2 = initconditions.set_reff(mass2_binary, metallicity=met, zsun=zsun)
303
+
304
+ # sample periods and eccentricities
305
+ # if the porb_model is moe19, the metallicity needs to be supplied
306
+ if porb_model == "moe19":
307
+ porb,aRL_over_a = initconditions.sample_porb(
308
+ mass1_binary, mass2_binary, rad1, rad2, porb_model, met=met, size=mass1_binary.size
309
+ )
310
+ else:
311
+ porb,aRL_over_a = initconditions.sample_porb(
312
+ mass1_binary, mass2_binary, rad1, rad2, porb_model, size=mass1_binary.size
313
+ )
314
+ ecc = initconditions.sample_ecc(aRL_over_a, ecc_model, size=mass1_binary.size)
315
+
316
+ tphysf, metallicity = initconditions.sample_SFH(
317
+ SF_start=SF_start, SF_duration=SF_duration, met=met, size=mass1_binary.size
318
+ )
319
+ metallicity[metallicity < 1e-4] = 1e-4
320
+ metallicity[metallicity > 0.03] = 0.03
321
+ kstar1 = initconditions.set_kstar(mass1_binary)
322
+ kstar2 = initconditions.set_kstar(mass2_binary)
323
+
324
+ if kwargs.pop("keep_singles", False):
325
+ binary_table = InitialBinaryTable.InitialBinaries(
326
+ mass1_binary,
327
+ mass2_binary,
328
+ porb,
329
+ ecc,
330
+ tphysf,
331
+ kstar1,
332
+ kstar2,
333
+ metallicity,
334
+ binfrac=binfrac,
335
+ )
336
+ tphysf_singles, metallicity_singles = initconditions.sample_SFH(
337
+ SF_start=SF_start, SF_duration=SF_duration, met=met, size=mass1_singles.size
338
+ )
339
+ metallicity_singles[metallicity_singles < 1e-4] = 1e-4
340
+ metallicity_singles[metallicity_singles > 0.03] = 0.03
341
+ kstar1_singles = initconditions.set_kstar(mass1_singles)
342
+ singles_table = InitialBinaryTable.InitialBinaries(
343
+ mass1_singles, # mass1
344
+ np.ones_like(mass1_singles) * 0, # mass2 (all massless remnants)
345
+ np.ones_like(mass1_singles) * -1, # porb (single not binary)
346
+ np.ones_like(mass1_singles) * -1, # ecc (single not binary)
347
+ tphysf_singles, # tphysf
348
+ kstar1_singles, # kstar1
349
+ np.ones_like(mass1_singles) * 15, # kstar2 (all massless remnants)
350
+ metallicity_singles, # metallicity
351
+ )
352
+ binary_table = pd.concat([binary_table, singles_table], ignore_index=True)
353
+ else:
354
+ binary_table = InitialBinaryTable.InitialBinaries(
355
+ mass1_binary,
356
+ mass2_binary,
357
+ porb,
358
+ ecc,
359
+ tphysf,
360
+ kstar1,
361
+ kstar2,
362
+ metallicity,
363
+ binfrac=binfrac,
364
+ )
365
+
366
+ return (
367
+ binary_table,
368
+ m_sampled_singles,
369
+ m_sampled_binaries,
370
+ n_singles,
371
+ n_binaries
372
+ )
373
+
374
+
375
+ register_sampler(
376
+ "independent",
377
+ InitialBinaryTable,
378
+ get_independent_sampler,
379
+ usage="final_kstar1, final_kstar2, binfrac_model, primary_model, ecc_model, SFH_model, component_age, metallicity, size",
380
+ )
381
+
382
+
383
+ class Sample(object):
384
+ # sample primary masses
385
+ def sample_primary(self, primary_model='kroupa01', size=None, **kwargs):
386
+ """Sample the primary mass (always the most massive star) from a user-selected model
387
+
388
+ kroupa93 follows Kroupa (1993), normalization comes from
389
+ `Hurley 2002 <https://arxiv.org/abs/astro-ph/0201220>`_
390
+ between 0.08 and 150 Msun
391
+ salpter55 follows
392
+ `Salpeter (1955) <http://adsabs.harvard.edu/abs/1955ApJ...121..161S>`_
393
+ between 0.08 and 150 Msun
394
+ kroupa01 follows Kroupa (2001) <https://arxiv.org/abs/astro-ph/0009005>
395
+ between 0.08 and 100 Msun
396
+
397
+
398
+ Parameters
399
+ ----------
400
+ primary_model : str, optional
401
+ model for mass distribution; choose from:
402
+
403
+ kroupa93 follows Kroupa (1993), normalization comes from
404
+ `Hurley 2002 <https://arxiv.org/abs/astro-ph/0201220>`_
405
+ valid for masses between 0.1 and 100 Msun
406
+
407
+ salpter55 follows
408
+ `Salpeter (1955) <http://adsabs.harvard.edu/abs/1955ApJ...121..161S>`_
409
+ valid for masses between 0.1 and 100 Msun
410
+
411
+ kroupa01 follows Kroupa (2001), normalization comes from
412
+ `Hurley 2002 <https://arxiv.org/abs/astro-ph/0009005>`_
413
+ valid for masses between 0.1 and 100 Msun
414
+
415
+ custom is a generic piecewise power law that takes in the power
416
+ law slopes and break points given in the optional input lists (alphas, mcuts)
417
+ default alphas and mcuts yield an IMF identical to kroupa01
418
+
419
+ Default kroupa01
420
+
421
+ size : int, optional
422
+ number of initial primary masses to sample
423
+ NOTE: this is set in cosmic-pop call as Nstep
424
+
425
+ alphas : array, optional
426
+ absolute values of the power law slopes for primary_model = 'custom'
427
+ Default [-1.3,-2.3,-2.3] (identical to slopes for primary_model = 'kroupa01')
428
+
429
+ mcuts : array, optional, units of Msun
430
+ break points separating the power law 'pieces' for primary_model = 'custom'
431
+ Default [0.08,0.5,1.0,150.] (identical to breaks for primary_model = 'kroupa01')
432
+
433
+ Optional kwargs are defined in `get_independent_sampler`
434
+
435
+ Returns
436
+ -------
437
+ a_0 : array
438
+ Sampled primary masses
439
+ np.sum(a_0) : float
440
+ Total amount of mass sampled
441
+ """
442
+
443
+ # Read in m1_min and m_max kwargs, if provided
444
+ m1_min = kwargs["m1_min"] if "m1_min" in kwargs.keys() else 0.08
445
+ m_max = kwargs["m_max"] if "m_max" in kwargs.keys() else 150.0
446
+
447
+ # Make sure m1_min value is below 0.5, since otherwise it will not work for Kroupa IMF
448
+ if m1_min > 0.5:
449
+ raise ValueError("m1_min must be greater than 0.5 Msun")
450
+
451
+ if primary_model == 'kroupa93':
452
+ alphas, mcuts = [-1.3,-2.2,-2.7], [m1_min,0.5,1.0,m_max]
453
+ # Since COSMIC/BSE can't handle < 0.08Msun, we by default truncate at 0.08 Msun instead of 0.01
454
+ elif primary_model == 'kroupa01':
455
+ alphas, mcuts = [-1.3,-2.3], [m1_min,0.5,m_max]
456
+ elif primary_model == 'salpeter55':
457
+ alphas, mcuts = [-2.35], [m1_min,m_max]
458
+ elif primary_model == 'custom':
459
+ if 'alphas' in kwargs and 'mcuts' in kwargs:
460
+ alphas = kwargs.pop("alphas", [-1.3,-2.3,-2.3])
461
+ mcuts = kwargs.pop("mcuts", [m1_min,0.5,1.0,m_max])
462
+ else:
463
+ raise ValueError("You must supply both alphas and mcuts to use"
464
+ " a custom IMF generator")
465
+
466
+ Ncumulative, Ntotal, coeff = [], 0., 1.
467
+ for i in range(len(alphas)):
468
+ g = 1. + alphas[i]
469
+ # Compute this piece of the IMF's contribution to Ntotal
470
+ if alphas[i] == -1: Ntotal += coeff * np.log(mcuts[i+1]/mcuts[i])
471
+ else: Ntotal += coeff/g * (mcuts[i+1]**g - mcuts[i]**g)
472
+ Ncumulative.append(Ntotal)
473
+ if i < len(alphas)-1: coeff *= mcuts[i+1]**(-alphas[i+1]+alphas[i])
474
+
475
+ cutoffs = np.array(Ncumulative)/Ntotal
476
+ u = np.random.uniform(0.,1.,size)
477
+ idxs = [() for i in range(len(alphas))]
478
+
479
+ for i in range(len(alphas)):
480
+ if i == 0: idxs[i], = np.where(u <= cutoffs[0])
481
+ elif i < len(alphas)-1: idxs[i], = np.where((u > cutoffs[i-1]) & (u <= cutoffs[i]))
482
+ else: idxs[i], = np.where(u > cutoffs[i-1])
483
+ for i in range(len(alphas)):
484
+ if alphas[i] == -1.0:
485
+ u[idxs[i]] = 10**np.random.uniform(np.log10(mcuts[i]),
486
+ np.log10(mcuts[i+1]),
487
+ len(idxs[i]))
488
+ else:
489
+ u[idxs[i]] = utils.rndm(a=mcuts[i], b=mcuts[i+1], g=alphas[i], size=len(idxs[i]))
490
+
491
+ return u, np.sum(u)
492
+
493
+ # sample secondary mass
494
+ def sample_secondary(self, primary_mass, q_power_law=0, **kwargs):
495
+ """Sample a secondary mass using draws from a uniform mass ratio distribution motivated by
496
+ `Mazeh et al. (1992) <http://adsabs.harvard.edu/abs/1992ApJ...401..265M>`_
497
+ and `Goldberg & Mazeh (1994) <http://adsabs.harvard.edu/abs/1994ApJ...429..362G>`_
498
+
499
+ NOTE: the lower lim is set by either qmin or m2_min which are passed as kwargs
500
+
501
+ Parameters
502
+ ----------
503
+ primary_mass : `array`
504
+ sets the maximum secondary mass (for a maximum mass ratio of 1)
505
+
506
+ Optional kwargs are defined in `get_independent_sampler`
507
+
508
+ Returns
509
+ -------
510
+ secondary_mass : array
511
+ sampled secondary masses with array size matching size of
512
+ primary_mass
513
+ """
514
+ qmin = kwargs["qmin"] if "qmin" in kwargs.keys() else 0.0
515
+ m1_min = kwargs["m1_min"] if "m1_min" in kwargs.keys() else 0.08
516
+ m2_min = kwargs["m2_min"] if "m2_min" in kwargs.keys() else None
517
+ if (m2_min is None) & (qmin is None):
518
+ warnings.warn("It is highly recommended that you specify either qmin or m2_min!")
519
+ if (m2_min is not None) and (m2_min > m1_min):
520
+ raise ValueError("The m2_min you specified is above the minimum"
521
+ " primary mass of the IMF, either lower m2_min or"
522
+ " raise the lower value of your sampled primaries")
523
+
524
+ if (m2_min is not None) & (qmin != 0):
525
+ raise ValueError("You cannot specify both m2_min and qmin, please choose one or the other")
526
+
527
+ # --- `msort` kwarg can be set to have different qmin above `msort`
528
+ msort = kwargs["msort"] if "msort" in kwargs.keys() else None
529
+ qmin_msort = kwargs["qmin_msort"] if "qmin_msort" in kwargs.keys() else None
530
+ m2_min_msort = kwargs["m2_min_msort"] if "m2_min_msort" in kwargs.keys() else None
531
+ if (msort is None) and (qmin_msort is not None):
532
+ raise ValueError("If qmin_msort is specified, you must also supply a value for msort")
533
+ if (msort is None) and (m2_min_msort is not None):
534
+ raise ValueError("If m2_min_msort is specified, you must also supply a value for msort")
535
+ if (m2_min_msort is not None) and (m2_min_msort > msort):
536
+ raise ValueError("The m2_min_msort you specified is above the minimum"
537
+ " primary mass of the high-mass binaries msort")
538
+
539
+ if (msort is not None) and (qmin_msort is not None):
540
+ (highmassIdx,) = np.where(primary_mass >= msort)
541
+ (lowmassIdx,) = np.where(primary_mass < msort)
542
+ else:
543
+ (highmassIdx,) = np.where(primary_mass < 0)
544
+ (lowmassIdx,) = np.where(primary_mass >= 0) # all idxs
545
+
546
+
547
+ qmin_vals = -1 * np.ones_like(primary_mass)
548
+ # --- qmin for low-mass systems (all systems if msort is not specified)
549
+ if (qmin > 0.0):
550
+ qmin_vals[lowmassIdx] = qmin * np.ones_like(primary_mass[lowmassIdx])
551
+ elif (qmin < 0.0):
552
+ # mass-dependent qmin, assume qmin=0.1 for m_primary<5
553
+ dat = np.array([[5.0, 0.1363522012578616],
554
+ [6.999999999999993, 0.1363522012578616],
555
+ [12.599999999999994, 0.11874213836477984],
556
+ [20.999999999999993, 0.09962264150943395],
557
+ [29.39999999999999, 0.0820125786163522],
558
+ [41, 0.06490566037735851],
559
+ [55, 0.052327044025157254],
560
+ [70.19999999999999, 0.04301886792452836],
561
+ [87.4, 0.03622641509433966],
562
+ [107.40000000000002, 0.030188679245283068],
563
+ [133.40000000000003, 0.02515723270440262],
564
+ [156.60000000000002, 0.02163522012578628],
565
+ [175.40000000000003, 0.01962264150943399],
566
+ [200.20000000000005, 0.017358490566037776]])
567
+ from scipy.interpolate import interp1d
568
+ qmin_interp = interp1d(dat[:, 0], dat[:, 1])
569
+ qmin_vals[lowmassIdx] = np.ones_like(primary_mass[lowmassIdx]) * 0.1
570
+ ind_5, = np.where(primary_mass[lowmassIdx] > 5.0)
571
+ qmin_vals[lowmassIdx][ind_5] = qmin_interp(primary_mass[lowmassIdx][ind_5])
572
+ else:
573
+ qmin_vals[lowmassIdx] = np.zeros_like(primary_mass[lowmassIdx])
574
+ # --- qmin for high-mass systems, if msort and qmin_msort are specified
575
+ if (msort is not None) and (qmin_msort is not None):
576
+ if (qmin_msort > 0.0):
577
+ qmin_vals[highmassIdx] = qmin_msort * np.ones_like(primary_mass[highmassIdx])
578
+ elif (qmin_msort < 0.0):
579
+ # mass-dependent qmin, assume qmin=0.1 for m_primary<5
580
+ dat = np.array([[5.0, 0.1363522012578616],
581
+ [6.999999999999993, 0.1363522012578616],
582
+ [12.599999999999994, 0.11874213836477984],
583
+ [20.999999999999993, 0.09962264150943395],
584
+ [29.39999999999999, 0.0820125786163522],
585
+ [41, 0.06490566037735851],
586
+ [55, 0.052327044025157254],
587
+ [70.19999999999999, 0.04301886792452836],
588
+ [87.4, 0.03622641509433966],
589
+ [107.40000000000002, 0.030188679245283068],
590
+ [133.40000000000003, 0.02515723270440262],
591
+ [156.60000000000002, 0.02163522012578628],
592
+ [175.40000000000003, 0.01962264150943399],
593
+ [200.20000000000005, 0.017358490566037776]])
594
+ from scipy.interpolate import interp1d
595
+ qmin_interp = interp1d(dat[:, 0], dat[:, 1])
596
+ qmin_vals[highmassIdx] = np.ones_like(primary_mass[highmassIdx]) * 0.1
597
+ ind_5, = np.where(primary_mass[highmassIdx] > 5.0)
598
+ qmin_vals[highmassIdx][ind_5] = qmin_interp(primary_mass[highmassIdx][ind_5])
599
+ else:
600
+ qmin_vals[highmassIdx] = np.zeros_like(primary_mass[highmassIdx])
601
+
602
+ # --- apply m2_min and m2_min_msort, if specified
603
+ if m2_min is not None:
604
+ qmin_vals[lowmassIdx] = np.maximum(qmin_vals[lowmassIdx], m2_min/primary_mass[lowmassIdx])
605
+ if m2_min_msort is not None:
606
+ qmin_vals[highmassIdx] = np.maximum(qmin_vals[highmassIdx], m2_min_msort/primary_mass[highmassIdx])
607
+
608
+ # --- now, randomly sample mass ratios and get secondary masses
609
+ secondary_mass = utils.rndm(qmin_vals, 1, q_power_law, size=len(primary_mass)) * primary_mass
610
+ return secondary_mass
611
+
612
+ def binary_select(self, primary_mass, binfrac_model=0.5, **kwargs):
613
+ """Select which primary masses will have a companion using
614
+ either a binary fraction specified by a float or a
615
+ primary-mass dependent binary fraction following
616
+ `van Haaften et al.(2009) <http://adsabs.harvard.edu/abs/2013A%26A...552A..69V>`_ in appdx
617
+ or `Offner et al.(2023) <https://ui.adsabs.harvard.edu/abs/2023ASPC..534..275O/abstract>`_ in fig 1
618
+
619
+ Parameters
620
+ ----------
621
+ primary_mass : array
622
+ Mass that determines the binary fraction
623
+
624
+ binfrac_model : str or float
625
+ vanHaaften - primary mass dependent and ONLY VALID up to 100 Msun
626
+
627
+ offner23 - primary mass dependent
628
+
629
+ float - fraction of binaries; 0.5 means 2 in 3 stars are a binary pair while 1
630
+ means every star is in a binary pair
631
+
632
+ Optional kwargs are defined in `get_independent_sampler`
633
+
634
+ Returns
635
+ -------
636
+ stars_in_binary : array
637
+ primary masses that will have a binary companion
638
+
639
+ stars_in_single : array
640
+ primary masses that will be single stars
641
+
642
+ binary_fraction : array
643
+ system-specific probability of being in a binary
644
+
645
+ binaryIdx : array
646
+ Idx of stars in binary
647
+ """
648
+
649
+ # --- `msort` kwarg can be set to have different binary fraction above `msort`
650
+ msort = kwargs["msort"] if "msort" in kwargs.keys() else None
651
+ binfrac_model_msort = kwargs["binfrac_model_msort"] if "binfrac_model_msort" in kwargs.keys() else None
652
+ if (msort is None) and (binfrac_model_msort is not None):
653
+ raise ValueError("If binfrac_model_msort is specified, you must also supply a value for msort")
654
+ if (msort is not None) and (binfrac_model_msort is not None):
655
+ (highmassIdx,) = np.where(primary_mass >= msort)
656
+ (lowmassIdx,) = np.where(primary_mass < msort)
657
+ else:
658
+ (highmassIdx,) = np.where(primary_mass < 0)
659
+ (lowmassIdx,) = np.where(primary_mass >= 0) # all idxs
660
+
661
+
662
+ # --- read in binfrac models
663
+ if type(binfrac_model) == str:
664
+ if binfrac_model == "vanHaaften":
665
+ binary_fraction_low = 1 / 2.0 + 1 / \
666
+ 4.0 * np.log10(primary_mass[lowmassIdx])
667
+ binary_choose_low = np.random.uniform(
668
+ 0, 1.0, primary_mass[lowmassIdx].size)
669
+
670
+ (singleIdx_low,) = np.where(
671
+ binary_fraction_low < binary_choose_low)
672
+ (binaryIdx_low,) = np.where(
673
+ binary_fraction_low >= binary_choose_low)
674
+ elif binfrac_model == "offner23":
675
+ from scipy.interpolate import BSpline
676
+ t = [0.0331963853, 0.0331963853, 0.0331963853, 0.0331963853, 0.106066017,
677
+ 0.212132034, 0.424264069, 0.866025404, 1.03077641, 1.11803399,
678
+ 1.95959179, 3.87298335, 6.32455532, 11.6619038, 29.1547595,
679
+ 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 150, 150, 150, 150]
680
+ c = [0.08, 0.15812003, 0.20314101, 0.23842953, 0.33154153, 0.39131739,
681
+ 0.46020725, 0.59009569, 0.75306454, 0.81652502, 0.93518422, 0.92030594,
682
+ 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96]
683
+ k = 3
684
+ def offner_curve(x):
685
+ a = -0.16465041
686
+ b = -0.11616329
687
+ return np.piecewise(x, [x < 6.4, x >= 6.4], [BSpline(t,c,k), lambda x : a * np.exp(b * x) + 0.97])
688
+ binary_fraction_low = offner_curve(primary_mass[lowmassIdx])
689
+ binary_choose_low = np.random.uniform(
690
+ 0, 1.0, primary_mass[lowmassIdx].size)
691
+
692
+ (singleIdx_low,) = np.where(
693
+ binary_fraction_low < binary_choose_low)
694
+ (binaryIdx_low,) = np.where(
695
+ binary_fraction_low >= binary_choose_low)
696
+ else:
697
+ raise ValueError(
698
+ "You have supplied a non-supported binary fraction model. Please choose vanHaaften, offner23, or a float"
699
+ )
700
+ elif type(binfrac_model) == float:
701
+ if (binfrac_model <= 1.0) & (binfrac_model >= 0.0):
702
+ binary_fraction_low = binfrac_model * \
703
+ np.ones(primary_mass[lowmassIdx].size)
704
+ binary_choose_low = np.random.uniform(
705
+ 0, 1.0, primary_mass[lowmassIdx].size)
706
+
707
+ (singleIdx_low,) = np.where(
708
+ binary_choose_low > binary_fraction_low)
709
+ (binaryIdx_low,) = np.where(
710
+ binary_choose_low <= binary_fraction_low)
711
+ else:
712
+ raise ValueError(
713
+ "You have supplied a fraction outside of 0-1. Please choose a fraction between 0 and 1."
714
+ )
715
+ else:
716
+ raise ValueError(
717
+ "You have not supplied a model or a fraction. Please choose either vanHaaften, offner23, or a float"
718
+ )
719
+
720
+ # --- if using a different binary fraction for high-mass systems
721
+ if (binfrac_model_msort is not None) and (type(binfrac_model_msort) == str):
722
+ if binfrac_model_msort == "vanHaaften":
723
+ binary_fraction_high = 1 / 2.0 + 1 / \
724
+ 4.0 * np.log10(primary_mass[highmassIdx])
725
+ binary_choose_high = np.random.uniform(
726
+ 0, 1.0, primary_mass[highmassIdx].size)
727
+
728
+ (singleIdx_high,) = np.where(
729
+ binary_fraction_high < binary_choose_high)
730
+ (binaryIdx_high,) = np.where(
731
+ binary_fraction_high >= binary_choose_high)
732
+ elif binfrac_model_msort == "offner23":
733
+ from scipy.interpolate import BSpline
734
+ t = [0.0331963853, 0.0331963853, 0.0331963853, 0.0331963853, 0.106066017,
735
+ 0.212132034, 0.424264069, 0.866025404, 1.03077641, 1.11803399,
736
+ 1.95959179, 3.87298335, 6.32455532, 11.6619038, 29.1547595,
737
+ 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 150, 150, 150, 150]
738
+ c = [0.08, 0.15812003, 0.20314101, 0.23842953, 0.33154153, 0.39131739,
739
+ 0.46020725, 0.59009569, 0.75306454, 0.81652502, 0.93518422, 0.92030594,
740
+ 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96, 0.96]
741
+ k = 3
742
+ def offner_curve(x):
743
+ a = -0.16465041
744
+ b = -0.11616329
745
+ return np.piecewise(x, [x < 6.4, x >= 6.4], [BSpline(t,c,k), lambda x : a * np.exp(b * x) + 0.97])
746
+ binary_fraction_high = offner_curve(primary_mass[highmassIdx])
747
+ binary_choose_high = np.random.uniform(
748
+ 0, 1.0, primary_mass[highmassIdx].size)
749
+
750
+ (singleIdx_high,) = np.where(
751
+ binary_fraction_high < binary_choose_high)
752
+ (binaryIdx_high,) = np.where(
753
+ binary_fraction_high >= binary_choose_high)
754
+ else:
755
+ raise ValueError(
756
+ "You have supplied a non-supported binary fraction model. Please choose vanHaaften, offner23, or a float"
757
+ )
758
+ elif (binfrac_model_msort is not None) and (type(binfrac_model_msort) == float):
759
+ if (binfrac_model_msort <= 1.0) & (binfrac_model_msort >= 0.0):
760
+ binary_fraction_high = binfrac_model_msort * \
761
+ np.ones(primary_mass[highmassIdx].size)
762
+ binary_choose_high = np.random.uniform(
763
+ 0, 1.0, primary_mass[highmassIdx].size)
764
+
765
+ (singleIdx_high,) = np.where(
766
+ binary_choose_high > binary_fraction_high)
767
+ (binaryIdx_high,) = np.where(
768
+ binary_choose_high <= binary_fraction_high)
769
+ else:
770
+ raise ValueError(
771
+ "You have supplied a fraction outside of 0-1. Please choose a fraction between 0 and 1."
772
+ )
773
+ elif (binfrac_model_msort is not None):
774
+ raise ValueError(
775
+ "You have not supplied a model or a fraction. Please choose either vanHaaften, offner23, or a float"
776
+ )
777
+
778
+
779
+ # --- get pertinent info
780
+ if (binfrac_model_msort is not None):
781
+ stars_in_binary = np.append(
782
+ primary_mass[highmassIdx][binaryIdx_high], primary_mass[lowmassIdx][binaryIdx_low])
783
+ stars_in_single = np.append(
784
+ primary_mass[highmassIdx][singleIdx_high], primary_mass[lowmassIdx][singleIdx_low])
785
+ binary_fraction = np.append(
786
+ binary_fraction_high[binaryIdx_high], binary_fraction_low[binaryIdx_low])
787
+ binaryIdx = np.append(
788
+ highmassIdx[binaryIdx_high], lowmassIdx[binaryIdx_low])
789
+ else:
790
+ stars_in_binary = primary_mass[lowmassIdx][binaryIdx_low]
791
+ stars_in_single = primary_mass[lowmassIdx][singleIdx_low]
792
+ binary_fraction = binary_fraction_low[binaryIdx_low]
793
+ binaryIdx = lowmassIdx[binaryIdx_low]
794
+
795
+ return (
796
+ stars_in_binary,
797
+ stars_in_single,
798
+ binary_fraction,
799
+ binaryIdx,
800
+ )
801
+
802
+ def sample_porb(self, mass1, mass2, rad1, rad2, porb_model, porb_max=None, size=None, **kwargs):
803
+ """Sample the orbital period according to the user-specified model
804
+
805
+ Parameters
806
+ ----------
807
+ mass1 : array
808
+ primary masses
809
+ mass2 : array
810
+ secondary masses
811
+ rad1 : array
812
+ radii of the primaries.
813
+ rad2 : array
814
+ radii of the secondaries
815
+ porb_model : `str` or `dict`
816
+ selects which model to sample orbital periods, choices include:
817
+ log_uniform : semi-major axis flat in log space from RRLO < 0.5 up to 1e5 Rsun according to
818
+ `Abt (1983) <http://adsabs.harvard.edu/abs/1983ARA%26A..21..343A>`_
819
+ and consistent with Dominik+2012,2013
820
+ and then converted to orbital period in days using Kepler III
821
+ sana12 : power law orbital period between 0.15 < log(P/day) < 5.5 following
822
+ `Sana+2012 <https://ui.adsabs.harvard.edu/abs/2012Sci...337..444S/abstract>_`
823
+ renzo19 : power law orbital period for m1 > 15Msun binaries from
824
+ `Sana+2012 <https://ui.adsabs.harvard.edu/abs/2012Sci...337..444S/abstract>_`
825
+ following the implementation of
826
+ `Renzo+2019 <https://ui.adsabs.harvard.edu/abs/2019A%26A...624A..66R/abstract>_`
827
+ and flat in log otherwise
828
+ raghavan10 : log normal orbital periods in days with mean_logP = 4.9
829
+ and sigma_logP = 2.3 between 0 < log10(P/day) < 9 following
830
+ `Raghavan+2010 <https://ui.adsabs.harvard.edu/abs/2010ApJS..190....1R/abstract>_`
831
+ moe19 : log normal orbital periods in days with mean_logP = 4.9
832
+ and sigma_logP = 2.3 between 0 < log10(P/day) < 9 following
833
+ `Raghavan+2010 <https://ui.adsabs.harvard.edu/abs/2010ApJS..190....1R/abstract>_`
834
+ but with different close binary fractions following
835
+ `Moe+2019 <https://ui.adsabs.harvard.edu/abs/2019ApJ...875...61M/abstract>_`
836
+ martinez26 : piecewise model with a power law orbital period following
837
+ `Sana+2012 <https://ui.adsabs.harvard.edu/abs/2012Sci...337..444S/abstract>_`
838
+ between 0.15 < log(P/day) < log(3000) for binaries with m1 >= 8Msun and following
839
+ `Raghavan+2010 <https://ui.adsabs.harvard.edu/abs/2010ApJS..190....1R/abstract>_`
840
+ with a log normal orbital period in days with mean_logP = 4.9 and sigma_logP = 2.3 between
841
+ 0 < log10(P/day) < 9 for binaries with m1 < 8Msun. Used in
842
+ `Martinez+2026 <https://ui.adsabs.harvard.edu/abs/2025arXiv251123285M/abstract>_`.
843
+ Custom power law distribution defined with a dictionary with keys "min", "max", and "slope"
844
+ (e.g. porb_model={"min": 0.15, "max": 0.55, "slope": -0.55}) would reproduce the
845
+ Sana+2012 distribution.
846
+ met : float
847
+ metallicity of the population
848
+
849
+ Returns
850
+ -------
851
+ porb : array
852
+ orbital period with array size equalling array size
853
+ of mass1 and mass2 in units of days
854
+ aRL_over_a: array
855
+ ratio of radius where RL overflow starts to the sampled seperation
856
+ used to truncate the eccentricitiy distribution
857
+ """
858
+
859
+ # First we need to compute where RL overflow starts. We truncate the lower-bound
860
+ # of the period distribution there
861
+ q = mass2 / mass1
862
+ RL_fac = (0.49 * q ** (2.0 / 3.0)) / (
863
+ 0.6 * q ** (2.0 / 3.0) + np.log(1 + q ** 1.0 / 3.0)
864
+ )
865
+
866
+ q2 = mass1 / mass2
867
+ RL_fac2 = (0.49 * q2 ** (2.0 / 3.0)) / (
868
+ 0.6 * q2 ** (2.0 / 3.0) + np.log(1 + q2 ** 1.0 / 3.0)
869
+ )
870
+
871
+ # include the factor for the eccentricity
872
+ RL_max = 2 * rad1 / RL_fac
873
+ (ind_switch,) = np.where(RL_max < 2 * rad2 / RL_fac2)
874
+ if len(ind_switch) >= 1:
875
+ RL_max[ind_switch] = 2 * rad2[ind_switch] / RL_fac2[ind_switch]
876
+
877
+ # Can either sample the porb first and truncate the eccentricities at RL overflow
878
+ # or sample the eccentricities first and truncate a(1-e) at RL overflow
879
+ #
880
+ # If we haven't sampled the eccentricities, then the minimum semi-major axis is at
881
+ # RL overflow
882
+ #
883
+ # If we have, then the minimum pericenter is set to RL overflow
884
+ a_min = RL_max
885
+
886
+ if porb_model == "log_uniform":
887
+ if porb_max is None:
888
+ a_0 = np.random.uniform(np.log(a_min), np.log(1e5), size)
889
+ else:
890
+ # If in CMC, only sample binaries as wide as the local hard/soft boundary
891
+ a_max = utils.a_from_p(porb_max,mass1,mass2)
892
+ a_max[a_max < a_min] = a_min[a_max < a_min]
893
+ a_0 = np.random.uniform(np.log(a_min), np.log(a_max), size)
894
+
895
+ # convert out of log space
896
+ a_0 = np.exp(a_0)
897
+ aRL_over_a = a_min/a_0
898
+
899
+ # convert to au
900
+ rsun_au = 0.00465047
901
+ a_0 = a_0 * rsun_au
902
+
903
+ # convert to orbital period in years
904
+ yr_day = 365.24
905
+ porb_yr = ((a_0 ** 3.0) / (mass1 + mass2)) ** 0.5
906
+ porb = porb_yr * yr_day
907
+ elif porb_model == "sana12":
908
+ # Same here: if using CMC, set the maximum porb to the smaller of either the
909
+ # hard/soft boundary or 5.5 (from Sana paper)
910
+ if porb_max is None:
911
+ log10_porb_max = 5.5
912
+ else:
913
+ log10_porb_max = np.minimum(5.5,np.log10(porb_max))
914
+
915
+ # Use the lower limit from the Sana12 distribution, unless this means the binaries are sampled at RL overflow. If so,
916
+ # change the lower limit to a_min
917
+
918
+ log10_porb_min = np.array([0.15]*len(a_min))
919
+ RL_porb = utils.p_from_a(a_min,mass1,mass2)
920
+ log10_RL_porb = np.log10(RL_porb)
921
+ log10_porb_min[log10_porb_min < log10_RL_porb] = log10_RL_porb[log10_porb_min < log10_RL_porb]
922
+
923
+ porb = 10 ** utils.rndm(a=log10_porb_min, b=log10_porb_max, g=-0.55, size=size)
924
+ aRL_over_a = a_min / utils.a_from_p(porb,mass1,mass2)
925
+
926
+ elif isinstance(porb_model, dict):
927
+ # use a power law distribution for the orbital periods
928
+ params = {
929
+ "min": 0.15,
930
+ "max": 5.5,
931
+ "slope": -0.55,
932
+ }
933
+ # update the default parameters with the user-supplied ones
934
+ params.update(porb_model)
935
+
936
+ # same calculations as sana12 case (sample from a power law distribution but avoid RLOF)
937
+ log10_RL_porb = np.log10(utils.p_from_a(a_min, mass1, mass2))
938
+ params["min"] = np.full(len(a_min), params["min"])
939
+ params["min"][params["min"] < log10_RL_porb] = log10_RL_porb[params["min"] < log10_RL_porb]
940
+ porb = 10**utils.rndm(a=params["min"], b=params["max"], g=params["slope"], size=size)
941
+ aRL_over_a = a_min / utils.a_from_p(porb, mass1, mass2)
942
+
943
+ elif porb_model == "renzo19":
944
+ # Same here: if using CMC, set the maximum porb to the smaller of either the
945
+ # hard/soft boundary or 5.5 (from Sana paper)
946
+ if porb_max is None:
947
+ log10_porb_max = 5.5
948
+
949
+ else:
950
+ log10_porb_max = np.minimum(5.5,np.log10(porb_max))
951
+
952
+ # Use the lower limit from the Sana12 distribution, unless this means the binaries are sampled at RL overflow. If so,
953
+ # change the lower limit to a_min
954
+ log10_porb_min = np.array([0.15]*len(a_min))
955
+ RL_porb = utils.p_from_a(a_min,mass1,mass2)
956
+ log10_RL_porb = np.log10(RL_porb)
957
+ log10_porb_min[log10_porb_min < log10_RL_porb] = log10_RL_porb[log10_porb_min < log10_RL_porb]
958
+
959
+ porb = 10 ** (np.random.uniform(log10_porb_min, log10_porb_max, size))
960
+ (ind_massive,) = np.where(mass1 > 15)
961
+
962
+ if type(log10_porb_max) != float:
963
+ log10_porb_max = log10_porb_max[ind_massive]
964
+ log10_porb_min = log10_porb_min[ind_massive]
965
+
966
+
967
+ porb[ind_massive] = 10 ** utils.rndm(
968
+ a=log10_porb_min[ind_massive], b=log10_porb_max, g=-0.55, size=len(ind_massive))
969
+ aRL_over_a = a_min / utils.a_from_p(porb,mass1,mass2)
970
+
971
+ elif porb_model == "raghavan10":
972
+ import scipy
973
+ # Same here: if using CMC, set the maximum porb to the smaller of either the
974
+ # hard/soft boundary or 5.5 (from Sana paper)
975
+ if porb_max is None:
976
+ log10_porb_max = 9.0
977
+ else:
978
+ log10_porb_max = np.minimum(5.5, np.log10(porb_max))
979
+
980
+ lower = 0
981
+ upper = log10_porb_max
982
+ mu = 4.9
983
+ sigma = 2.3
984
+
985
+ porb = 10 ** (scipy.stats.truncnorm.rvs(
986
+ (lower-mu)/sigma,(upper-mu)/sigma, loc=mu, scale=sigma, size=size
987
+ ))
988
+
989
+ aRL_over_a = a_min / utils.a_from_p(porb,mass1,mass2)
990
+
991
+ elif porb_model == "moe19":
992
+ from scipy.interpolate import interp1d
993
+ from scipy.stats import norm
994
+ from scipy.integrate import trapezoid
995
+
996
+ try:
997
+ met = kwargs.pop('met')
998
+ except:
999
+ raise ValueError(
1000
+ "You have chosen moe19 for the orbital period distribution which is a metallicity-dependent distribution. "
1001
+ "Please specify a metallicity for the population."
1002
+ )
1003
+ def get_logP_dist(nsamp, norm_wide, norm_close, mu=4.4, sigma=2.1):
1004
+ logP_lo_lim=0
1005
+ logP_hi_lim=9
1006
+ close_logP=4.0
1007
+ wide_logP=6.0
1008
+ neval = 500
1009
+ prob_wide = norm.pdf(np.linspace(wide_logP, logP_hi_lim, neval), loc=mu, scale=sigma)*norm_wide
1010
+ prob_close = norm.pdf(np.linspace(logP_lo_lim, close_logP, neval), loc=mu, scale=sigma)*norm_close
1011
+ slope = -(prob_close[-1] - prob_wide[0]) / (wide_logP - close_logP)
1012
+ prob_intermediate = slope * (np.linspace(close_logP, wide_logP, neval) - close_logP) + prob_close[-1]
1013
+ prob_interp_int = interp1d(np.linspace(close_logP, wide_logP, neval), prob_intermediate)
1014
+
1015
+ log_p_success = []
1016
+ n_success = 0
1017
+ while n_success < nsamp:
1018
+ logP_samp = np.random.uniform(logP_lo_lim, logP_hi_lim, nsamp*5)
1019
+ logP_prob = np.random.uniform(0, 1, nsamp*5)
1020
+
1021
+ logP_samp_lo = logP_samp[logP_samp<close_logP]
1022
+ logP_prob_lo = logP_prob[logP_samp<close_logP]
1023
+ log_p_success.extend(logP_samp_lo[np.where(logP_prob_lo < norm.pdf(logP_samp_lo, loc=mu, scale=sigma)*norm_close)])
1024
+
1025
+ logP_samp_int = logP_samp[(logP_samp>=close_logP) & (logP_samp<wide_logP)]
1026
+ logP_prob_int = logP_prob[(logP_samp>=close_logP) & (logP_samp<wide_logP)]
1027
+ log_p_success.extend(logP_samp_int[np.where(logP_prob_int < prob_interp_int(logP_samp_int))])
1028
+
1029
+ logP_samp_hi = logP_samp[(logP_samp>=wide_logP)]
1030
+ logP_prob_hi = logP_prob[(logP_samp>=wide_logP)]
1031
+
1032
+ log_p_success.extend(logP_samp_hi[np.where(logP_prob_hi < norm.pdf(logP_samp_hi, loc=mu, scale=sigma)*norm_wide)])
1033
+
1034
+ n_success = len(log_p_success)
1035
+ log_p_success = np.array(log_p_success)[np.random.randint(0,n_success,nsamp)]
1036
+ return log_p_success
1037
+ norm_wide, norm_close = utils.get_porb_norm(met)
1038
+ logP_dist = get_logP_dist(size, norm_wide, norm_close)
1039
+ logP_dist = logP_dist[np.random.randint(0, len(logP_dist), size)]
1040
+ porb = 10**logP_dist
1041
+ aRL_over_a = a_min / utils.a_from_p(porb,mass1,mass2)
1042
+
1043
+ elif porb_model == "martinez26":
1044
+ # martinez+26 model: use sana12 for mass1 >= 8.0 and raghavan10 for mass1 < 8.0
1045
+ import scipy
1046
+
1047
+ # Create mask for high-mass and low-mass systems
1048
+ (ind_massive,) = np.where(mass1 >= 8.0)
1049
+ (ind_lowmass,) = np.where(mass1 < 8.0)
1050
+
1051
+ # Initialize porb array
1052
+ porb = np.zeros(size)
1053
+
1054
+ # sana12 for massive systems with upper bound 3000 days
1055
+ if len(ind_massive) > 0:
1056
+ if porb_max is None:
1057
+ log10_porb_max_sana = np.log10(3000)
1058
+ else:
1059
+ log10_porb_max_sana = np.minimum(np.log10(3000), np.log10(porb_max))
1060
+
1061
+ log10_porb_min_sana = np.array([0.15]*len(ind_massive))
1062
+ RL_porb_sana = utils.p_from_a(a_min[ind_massive], mass1[ind_massive], mass2[ind_massive])
1063
+ log10_RL_porb_sana = np.log10(RL_porb_sana)
1064
+ log10_porb_min_sana[log10_porb_min_sana < log10_RL_porb_sana] = log10_RL_porb_sana[log10_porb_min_sana < log10_RL_porb_sana]
1065
+
1066
+ porb[ind_massive] = 10 ** utils.rndm(a=log10_porb_min_sana, b=log10_porb_max_sana, g=-0.55, size=len(ind_massive))
1067
+
1068
+ # Raghavan10 for low-mass systems (mass1 < 8.0)
1069
+ if len(ind_lowmass) > 0:
1070
+ if porb_max is None:
1071
+ log10_porb_max_ragh = 9.0
1072
+ else:
1073
+ log10_porb_max_ragh = np.minimum(9.0, np.log10(porb_max))
1074
+
1075
+ # Handle array vs scalar case for log10_porb_max_ragh
1076
+ if isinstance(log10_porb_max_ragh, np.ndarray):
1077
+ log10_porb_max_ragh = log10_porb_max_ragh[ind_lowmass]
1078
+
1079
+ lower = 0
1080
+ upper = log10_porb_max_ragh
1081
+ mu = 4.9
1082
+ sigma = 2.3
1083
+
1084
+ # Sample from truncated normal distribution
1085
+ porb[ind_lowmass] = 10 ** (scipy.stats.truncnorm.rvs(
1086
+ (lower-mu)/sigma, (upper-mu)/sigma, loc=mu, scale=sigma, size=len(ind_lowmass)
1087
+ ))
1088
+
1089
+ aRL_over_a = a_min / utils.a_from_p(porb, mass1, mass2)
1090
+ else:
1091
+ raise ValueError(
1092
+ "You have supplied a non-supported model; Please choose either log_uniform, sana12, renzo19, raghavan10, moe19, or martinez26"
1093
+ )
1094
+ return porb, aRL_over_a
1095
+
1096
+ def sample_ecc(self, aRL_over_a, ecc_model="sana12", size=None):
1097
+ """Sample the eccentricity according to a user specified model
1098
+
1099
+ Parameters
1100
+ ----------
1101
+ ecc_model : string
1102
+ 'thermal' samples from a thermal eccentricity distribution following
1103
+ `Heggie (1975) <http://adsabs.harvard.edu/abs/1975MNRAS.173..729H>`_
1104
+ 'uniform' samples from a uniform eccentricity distribution
1105
+ 'sana12' samples from the eccentricity distribution from
1106
+ `Sana+2012 <https://ui.adsabs.harvard.edu/abs/2012Sci...337..444S/abstract>_`
1107
+ 'circular' assumes zero eccentricity for all systems
1108
+ DEFAULT = 'sana12'
1109
+
1110
+ aRL_over_a : ratio of the minimum seperation (where RL overflow starts)
1111
+ to the sampled semi-major axis. Use this to truncate the eccentricitiy
1112
+
1113
+ size : int, optional
1114
+ number of eccentricities to sample
1115
+ this is set in cosmic-pop call as Nstep
1116
+
1117
+ Returns
1118
+ -------
1119
+ ecc : array
1120
+ array of sampled eccentricities with size=size
1121
+ """
1122
+
1123
+ # if we sampled the periods first, we need to truncate the eccentricities
1124
+ # to avoid RL overflow/collision at pericenter
1125
+ e_max = 1.0 - aRL_over_a
1126
+
1127
+ if ecc_model == "thermal":
1128
+ a_0 = np.random.uniform(0.0, e_max**2, size)
1129
+ ecc = a_0 ** 0.5
1130
+ return ecc
1131
+
1132
+ elif ecc_model == "uniform":
1133
+ ecc = np.random.uniform(0.0, e_max, size)
1134
+ return ecc
1135
+
1136
+ elif ecc_model == "sana12":
1137
+ sana_max = np.array([0.9]*len(e_max))
1138
+ max_e = np.minimum(e_max, sana_max)
1139
+ ecc = utils.rndm(a=0.001, b=max_e, g=-0.45, size=size)
1140
+
1141
+ return ecc
1142
+
1143
+ elif ecc_model == "circular":
1144
+ ecc = np.zeros(size)
1145
+ return ecc
1146
+
1147
+ else:
1148
+ raise ValueError("You have specified an unsupported model. Please choose from thermal, "
1149
+ "uniform, sana12, or circular")
1150
+
1151
+ def sample_SFH(self, SF_start=13700.0, SF_duration=0.0, met=0.02, size=None):
1152
+ """Sample an evolution time for each binary based on a user-specified
1153
+ time at the start of star formation and the duration of star formation.
1154
+ The default is a burst of star formation 13,700 Myr in the past.
1155
+
1156
+ Parameters
1157
+ ----------
1158
+ SF_start : float
1159
+ Time in the past when star formation initiates in Myr
1160
+ SF_duration : float
1161
+ Duration of constant star formation beginning from SF_Start in Myr
1162
+ met : float
1163
+ metallicity of the population [Z_sun = 0.02]
1164
+ Default: 0.02
1165
+ size : int, optional
1166
+ number of evolution times to sample
1167
+ NOTE: this is set in cosmic-pop call as Nstep
1168
+
1169
+ Returns
1170
+ -------
1171
+ tphys : array
1172
+ array of evolution times of size=size
1173
+ metallicity : array
1174
+ array of metallicities
1175
+ """
1176
+
1177
+ if (SF_start > 0.0) & (SF_duration >= 0.0):
1178
+ tphys = np.random.uniform(SF_start - SF_duration, SF_start, size)
1179
+ metallicity = np.ones(size) * met
1180
+ return tphys, metallicity
1181
+ else:
1182
+ raise ValueError(
1183
+ 'SF_start and SF_duration must be positive and SF_start must be greater than 0.0')
1184
+
1185
+ def set_kstar(self, mass):
1186
+ """Initialize stellar types according to BSE classification
1187
+ kstar=1 if M>=0.7 Msun; kstar=0 if M<0.7 Msun
1188
+
1189
+ Parameters
1190
+ ----------
1191
+ mass : array
1192
+ array of masses
1193
+
1194
+ Returns
1195
+ -------
1196
+ kstar : array
1197
+ array of initial stellar types
1198
+ """
1199
+
1200
+ kstar = np.zeros(mass.size)
1201
+ low_cutoff = 0.7
1202
+ lowIdx = np.where(mass < low_cutoff)[0]
1203
+ hiIdx = np.where(mass >= low_cutoff)[0]
1204
+
1205
+ kstar[lowIdx] = 0
1206
+ kstar[hiIdx] = 1
1207
+
1208
+ return kstar
1209
+
1210
+ def set_reff(self, mass, metallicity, zsun=0.02):
1211
+ """
1212
+ Better way to set the radii from BSE, by calling it directly
1213
+
1214
+ takes masses and metallicities, and returns the radii
1215
+
1216
+ Note that the BSE function is hard-coded to go through arrays
1217
+ of length 10^5. If your masses are more than that, you'll
1218
+ need to divide it into chunks
1219
+ """
1220
+ from cosmic import _evolvebin
1221
+
1222
+ max_array_size = 100000
1223
+ total_length = len(mass)
1224
+ radii = np.zeros(total_length)
1225
+
1226
+ _evolvebin.metvars.zsun = zsun
1227
+
1228
+ idx = 0
1229
+ while total_length > max_array_size:
1230
+ ## cycle through the masses max_array_size number at a time
1231
+ temp_mass = mass[idx*max_array_size:(idx+1)*max_array_size]
1232
+
1233
+ temp_radii = _evolvebin.compute_r(temp_mass,metallicity,max_array_size)
1234
+
1235
+ ## put these in the radii array
1236
+ radii[idx*max_array_size:(idx+1)*max_array_size] = temp_radii
1237
+
1238
+ total_length -= max_array_size
1239
+ idx += 1
1240
+
1241
+ length_remaining = total_length
1242
+
1243
+ # if smaller than 10^5, need to pad out the array
1244
+ temp_mass = np.zeros(max_array_size)
1245
+ temp_mass[:length_remaining] = mass[-length_remaining:]
1246
+
1247
+ temp_radii = _evolvebin.compute_r(temp_mass,metallicity,length_remaining)
1248
+
1249
+ #finish up the array
1250
+ radii[-length_remaining:] = temp_radii[:length_remaining]
1251
+
1252
+ return radii