cosmic-popsynth 3.6.2__cp313-cp313-macosx_14_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cosmic/.dylibs/libgcc_s.1.1.dylib +0 -0
- cosmic/.dylibs/libgfortran.5.dylib +0 -0
- cosmic/.dylibs/libquadmath.0.dylib +0 -0
- cosmic/Match.py +191 -0
- cosmic/__init__.py +32 -0
- cosmic/_commit_hash.py +1 -0
- cosmic/_evolvebin.cpython-313-darwin.so +0 -0
- cosmic/_version.py +1 -0
- cosmic/bse_utils/__init__.py +18 -0
- cosmic/bse_utils/zcnsts.py +570 -0
- cosmic/bse_utils/zdata.py +596 -0
- cosmic/checkstate.py +128 -0
- cosmic/data/cosmic-settings.json +1635 -0
- cosmic/evolve.py +607 -0
- cosmic/filter.py +214 -0
- cosmic/get_commit_hash.py +15 -0
- cosmic/output.py +466 -0
- cosmic/plotting.py +680 -0
- cosmic/sample/__init__.py +26 -0
- cosmic/sample/cmc/__init__.py +18 -0
- cosmic/sample/cmc/elson.py +411 -0
- cosmic/sample/cmc/king.py +260 -0
- cosmic/sample/initialbinarytable.py +251 -0
- cosmic/sample/initialcmctable.py +449 -0
- cosmic/sample/sampler/__init__.py +25 -0
- cosmic/sample/sampler/cmc.py +418 -0
- cosmic/sample/sampler/independent.py +1252 -0
- cosmic/sample/sampler/multidim.py +882 -0
- cosmic/sample/sampler/sampler.py +130 -0
- cosmic/test_evolve.py +108 -0
- cosmic/test_match.py +30 -0
- cosmic/test_sample.py +580 -0
- cosmic/test_utils.py +198 -0
- cosmic/utils.py +1574 -0
- cosmic_popsynth-3.6.2.data/scripts/cosmic-pop +544 -0
- cosmic_popsynth-3.6.2.dist-info/METADATA +55 -0
- cosmic_popsynth-3.6.2.dist-info/RECORD +38 -0
- cosmic_popsynth-3.6.2.dist-info/WHEEL +6 -0
cosmic/utils.py
ADDED
|
@@ -0,0 +1,1574 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
# Copyright (C) Scott Coughlin (2017 - 2021)
|
|
3
|
+
#
|
|
4
|
+
# This file is part of cosmic.
|
|
5
|
+
#
|
|
6
|
+
# cosmic is free software: you can redistribute it and/or modify
|
|
7
|
+
# it under the terms of the GNU General Public License as published by
|
|
8
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
9
|
+
# (at your option) any later version.
|
|
10
|
+
#
|
|
11
|
+
# cosmic is distributed in the hope that it will be useful,
|
|
12
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
13
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
14
|
+
# GNU General Public License for more details.
|
|
15
|
+
#
|
|
16
|
+
# You should have received a copy of the GNU General Public License
|
|
17
|
+
# along with cosmic. If not, see <http://www.gnu.org/licenses/>.
|
|
18
|
+
|
|
19
|
+
"""`utils`
|
|
20
|
+
"""
|
|
21
|
+
import scipy
|
|
22
|
+
import numpy as np
|
|
23
|
+
import pandas as pd
|
|
24
|
+
import scipy.special as ss
|
|
25
|
+
import astropy.stats as astrostats
|
|
26
|
+
import warnings
|
|
27
|
+
import ast
|
|
28
|
+
import operator
|
|
29
|
+
import json
|
|
30
|
+
import itertools
|
|
31
|
+
import os.path
|
|
32
|
+
import h5py as h5
|
|
33
|
+
import re
|
|
34
|
+
|
|
35
|
+
import sys
|
|
36
|
+
if sys.version_info >= (3, 9):
|
|
37
|
+
from importlib.resources import files as io_files
|
|
38
|
+
else:
|
|
39
|
+
from importlib_resources import files as io_files
|
|
40
|
+
|
|
41
|
+
from configparser import ConfigParser
|
|
42
|
+
from .bse_utils.zcnsts import zcnsts
|
|
43
|
+
|
|
44
|
+
__author__ = "Katelyn Breivik <katie.breivik@gmail.com>"
|
|
45
|
+
__credits__ = [
|
|
46
|
+
"Scott Coughlin <scott.coughlin@ligo.org>",
|
|
47
|
+
"Michael Zevin <zevin@northwestern.edu>",
|
|
48
|
+
"Tom Wagg <tomjwagg@gmail.com>",
|
|
49
|
+
]
|
|
50
|
+
__all__ = [
|
|
51
|
+
"filter_bin_state",
|
|
52
|
+
"conv_select",
|
|
53
|
+
"mass_min_max_select",
|
|
54
|
+
"idl_tabulate",
|
|
55
|
+
"rndm",
|
|
56
|
+
"param_transform",
|
|
57
|
+
"dat_transform",
|
|
58
|
+
"dat_un_transform",
|
|
59
|
+
"knuth_bw_selector",
|
|
60
|
+
"error_check",
|
|
61
|
+
"check_initial_conditions",
|
|
62
|
+
"convert_kstar_evol_type",
|
|
63
|
+
"parse_inifile",
|
|
64
|
+
"pop_write",
|
|
65
|
+
"a_from_p",
|
|
66
|
+
"p_from_a",
|
|
67
|
+
"get_Z_from_FeH",
|
|
68
|
+
"get_FeH_from_Z",
|
|
69
|
+
"get_binfrac_of_Z",
|
|
70
|
+
"get_porb_norm",
|
|
71
|
+
"get_met_dep_binfrac",
|
|
72
|
+
"explain_setting",
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def filter_bin_state(bcm, bpp, method, kstar1_range, kstar2_range):
|
|
77
|
+
"""Filter the output of bpp and bcm, where the kstar ranges
|
|
78
|
+
have already been selected by the conv_select module
|
|
79
|
+
|
|
80
|
+
Parameters
|
|
81
|
+
----------
|
|
82
|
+
bcm : `pandas.DataFrame`
|
|
83
|
+
bcm dataframe
|
|
84
|
+
|
|
85
|
+
bpp : `pandas.DataFrame`
|
|
86
|
+
bpp dataframe
|
|
87
|
+
|
|
88
|
+
method : `dict`,
|
|
89
|
+
one or more methods by which to filter the
|
|
90
|
+
bpp or bcm table, e.g. ``{'binary_state' : [0,1]}``;
|
|
91
|
+
This means you do *not* want to select the final state of the binaries in the bcm array
|
|
92
|
+
|
|
93
|
+
kstar1_range : `list`
|
|
94
|
+
list containing all kstar1 values to retain
|
|
95
|
+
|
|
96
|
+
kstar2_range : `list`
|
|
97
|
+
list containing all kstar2 values to retain
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
bcm : `pandas.DataFrame`
|
|
102
|
+
filtered bcm dataframe
|
|
103
|
+
"""
|
|
104
|
+
_known_methods = ["binary_state", "timestep_conditions"]
|
|
105
|
+
|
|
106
|
+
if not set(method.keys()).issubset(set(_known_methods)):
|
|
107
|
+
raise ValueError(
|
|
108
|
+
"You have supplied an "
|
|
109
|
+
"unknown method to filter out "
|
|
110
|
+
"the bpp or bcm array. Known methods are "
|
|
111
|
+
"{0}".format(_known_methods)
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
for meth, use in method.items():
|
|
115
|
+
if meth == "binary_state":
|
|
116
|
+
bin_num_save = []
|
|
117
|
+
|
|
118
|
+
# in order to filter on binary state we need the last entry of the bcm array for each binary
|
|
119
|
+
bcm_last_entry = bcm.groupby("bin_num").last().reset_index()
|
|
120
|
+
|
|
121
|
+
# in order to find the properities of disrupted or systems
|
|
122
|
+
# that are alive today we can simply check the last entry in the bcm
|
|
123
|
+
# array for the system and see what its properities are today
|
|
124
|
+
bcm_0_2 = bcm_last_entry.loc[(bcm_last_entry.bin_state != 1)]
|
|
125
|
+
bin_num_save.extend(bcm_0_2.bin_num.tolist())
|
|
126
|
+
|
|
127
|
+
# in order to find the properities of merged systems
|
|
128
|
+
# we actually need to search in the BPP array for the properities
|
|
129
|
+
# of the objects right at merge because the bcm will report
|
|
130
|
+
# the post merge object only
|
|
131
|
+
bcm_1 = bcm_last_entry.loc[bcm_last_entry.bin_state == 1]
|
|
132
|
+
|
|
133
|
+
# We now find the product of the kstar range lists so we can match the
|
|
134
|
+
# merger_type column from the bcm array which tells us what objects
|
|
135
|
+
# merged
|
|
136
|
+
merger_objects_to_track = []
|
|
137
|
+
merger_objects_to_track.extend(
|
|
138
|
+
list(
|
|
139
|
+
map(
|
|
140
|
+
lambda x: "{0}{1}".format(
|
|
141
|
+
str(x[0]).zfill(2), str(x[1]).zfill(2)
|
|
142
|
+
),
|
|
143
|
+
list(itertools.product(kstar1_range, kstar2_range)),
|
|
144
|
+
)
|
|
145
|
+
)
|
|
146
|
+
)
|
|
147
|
+
merger_objects_to_track.extend(
|
|
148
|
+
list(
|
|
149
|
+
map(
|
|
150
|
+
lambda x: "{0}{1}".format(
|
|
151
|
+
str(x[0]).zfill(2), str(x[1]).zfill(2)
|
|
152
|
+
),
|
|
153
|
+
list(itertools.product(kstar2_range, kstar1_range)),
|
|
154
|
+
)
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
bin_num_save.extend(
|
|
158
|
+
bcm_1.loc[
|
|
159
|
+
bcm_1.merger_type.isin(merger_objects_to_track)
|
|
160
|
+
].bin_num.tolist()
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
bcm_last_entry = bcm_last_entry.loc[
|
|
164
|
+
bcm_last_entry.bin_num.isin(bin_num_save)
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
# this will tell use the binary state fraction of the systems with a certain final kstar type
|
|
168
|
+
# before we throw out certain binary states if a user requested that.
|
|
169
|
+
bin_state_fraction = bcm_last_entry.groupby("bin_state").tphys.count()
|
|
170
|
+
bin_states = []
|
|
171
|
+
for ii in range(3):
|
|
172
|
+
try:
|
|
173
|
+
bin_states.append(bin_state_fraction.loc[ii])
|
|
174
|
+
except Exception:
|
|
175
|
+
bin_states.append(0)
|
|
176
|
+
bin_state_fraction = pd.DataFrame([bin_states], columns=[0, 1, 2])
|
|
177
|
+
|
|
178
|
+
bcm = bcm.loc[
|
|
179
|
+
bcm.bin_num.isin(
|
|
180
|
+
bcm_last_entry.loc[bcm_last_entry.bin_state.isin(use)].bin_num
|
|
181
|
+
)
|
|
182
|
+
]
|
|
183
|
+
|
|
184
|
+
return bcm, bin_state_fraction
|
|
185
|
+
|
|
186
|
+
def conv_select_singles(bcm_save, bpp_save, final_kstar_1): # fix
|
|
187
|
+
"""Select singles"""
|
|
188
|
+
conv_save = bpp_save.loc[
|
|
189
|
+
(bpp_save.kstar_1.isin(final_kstar_1))
|
|
190
|
+
]
|
|
191
|
+
# select the formation parameters
|
|
192
|
+
conv_save = conv_save.groupby("bin_num").first().reset_index()
|
|
193
|
+
return conv_save
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def conv_select(bcm_save, bpp_save, final_kstar_1, final_kstar_2, method, conv_lims):
|
|
197
|
+
"""Select bcm data for special convergence cases
|
|
198
|
+
|
|
199
|
+
Parameters
|
|
200
|
+
----------
|
|
201
|
+
bcm_save : `pandas.DataFrame`
|
|
202
|
+
bcm dataframe containing all saved bcm data
|
|
203
|
+
|
|
204
|
+
bpp_save : `pandas.DataFrame`
|
|
205
|
+
bpp dataframe containing all saved bpp data
|
|
206
|
+
|
|
207
|
+
final_kstar_1 : `list`
|
|
208
|
+
contains list of final primary kstars specified by user
|
|
209
|
+
|
|
210
|
+
final_kstar_2 : `list`
|
|
211
|
+
contains list of final primary kstars specified by user
|
|
212
|
+
|
|
213
|
+
method : `str`
|
|
214
|
+
stage in binary evolution to check convergence for
|
|
215
|
+
only one method may be supplied and they are specified
|
|
216
|
+
in the inifile
|
|
217
|
+
|
|
218
|
+
conv_lims : `dict`
|
|
219
|
+
dictionary where keys are convergence params and the
|
|
220
|
+
values are lists containing a [lo, hi] value to filter the
|
|
221
|
+
convergence param between
|
|
222
|
+
any non-specified convergence params will not be filtered
|
|
223
|
+
|
|
224
|
+
Returns
|
|
225
|
+
-------
|
|
226
|
+
conv_save : `pandas.DataFrame`
|
|
227
|
+
filtered dataframe containing binaries that fulfill
|
|
228
|
+
user-specified convergence criteria
|
|
229
|
+
|
|
230
|
+
"""
|
|
231
|
+
_known_methods = [
|
|
232
|
+
"formation",
|
|
233
|
+
"1_SN",
|
|
234
|
+
"2_SN",
|
|
235
|
+
"disruption",
|
|
236
|
+
"final_state",
|
|
237
|
+
"XRB_form",
|
|
238
|
+
]
|
|
239
|
+
|
|
240
|
+
if method not in _known_methods:
|
|
241
|
+
raise ValueError(
|
|
242
|
+
"You have supplied an "
|
|
243
|
+
"unknown method to filter the "
|
|
244
|
+
"bcm array for convergence. Known methods are "
|
|
245
|
+
"{0}".format(_known_methods)
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
if method == "formation":
|
|
249
|
+
# filter the bpp array to find the systems that match the user-specified
|
|
250
|
+
# final kstars
|
|
251
|
+
conv_save = bpp_save.loc[
|
|
252
|
+
((bpp_save.kstar_1.isin(final_kstar_1))
|
|
253
|
+
& (bpp_save.kstar_2.isin(final_kstar_2))
|
|
254
|
+
)
|
|
255
|
+
|
|
|
256
|
+
((bpp_save.kstar_1.isin(final_kstar_2))
|
|
257
|
+
& (bpp_save.kstar_2.isin(final_kstar_1))
|
|
258
|
+
)
|
|
259
|
+
]
|
|
260
|
+
|
|
261
|
+
# select the formation parameters
|
|
262
|
+
conv_save = conv_save.groupby("bin_num").first().reset_index()
|
|
263
|
+
|
|
264
|
+
elif method == "1_SN":
|
|
265
|
+
# select out the systems which will undergo a supernova
|
|
266
|
+
conv_sn_ind = bpp_save.loc[bpp_save.evol_type.isin([15.0, 16.0])].bin_num
|
|
267
|
+
|
|
268
|
+
# select out the systems which will produce the user specified final kstars
|
|
269
|
+
# and undergo a supernova
|
|
270
|
+
conv_sn_ind = bpp_save.loc[
|
|
271
|
+
(bpp_save.bin_num.isin(conv_sn_ind))
|
|
272
|
+
& (bpp_save.kstar_1.isin(final_kstar_1))
|
|
273
|
+
& (bpp_save.kstar_2.isin(final_kstar_2))
|
|
274
|
+
& (bpp_save.sep > 0)
|
|
275
|
+
].bin_num
|
|
276
|
+
|
|
277
|
+
# select out the values just before the supernova(e)
|
|
278
|
+
conv_sn = bpp_save.loc[
|
|
279
|
+
(bpp_save.bin_num.isin(conv_sn_ind))
|
|
280
|
+
& (bpp_save.evol_type.isin([15.0, 16.0]))
|
|
281
|
+
]
|
|
282
|
+
|
|
283
|
+
# make sure to select out only the first supernova
|
|
284
|
+
conv_save = conv_sn.groupby("bin_num").first().reset_index()
|
|
285
|
+
|
|
286
|
+
elif method == "2_SN":
|
|
287
|
+
# select out the systems which will undergo a supernova
|
|
288
|
+
conv_sn_ind = bpp_save.loc[bpp_save.evol_type.isin([15.0, 16.0])].bin_num
|
|
289
|
+
|
|
290
|
+
# select out the systems which will produce the user specified final kstars
|
|
291
|
+
# and undergo a supernova
|
|
292
|
+
conv_sn_ind = bpp_save.loc[
|
|
293
|
+
(bpp_save.bin_num.isin(conv_sn_ind))
|
|
294
|
+
& (bpp_save.kstar_1.isin(final_kstar_1))
|
|
295
|
+
& (bpp_save.kstar_2.isin(final_kstar_2))
|
|
296
|
+
& (bpp_save.sep > 0)
|
|
297
|
+
].bin_num
|
|
298
|
+
# select out the values just before the supernova(e)
|
|
299
|
+
conv_sn = bpp_save.loc[
|
|
300
|
+
(bpp_save.bin_num.isin(conv_sn_ind))
|
|
301
|
+
& (bpp_save.evol_type.isin([15.0, 16.0]))
|
|
302
|
+
]
|
|
303
|
+
|
|
304
|
+
# select out only the systems that go through 2 supernovae
|
|
305
|
+
conv_sn_2 = conv_sn.loc[conv_sn.groupby("bin_num").size() == 2]
|
|
306
|
+
|
|
307
|
+
# make sure to select out only the second supernova
|
|
308
|
+
conv_save = conv_sn_2.groupby("bin_num").nth(1).reset_index()
|
|
309
|
+
|
|
310
|
+
elif method == "disruption":
|
|
311
|
+
# filter the bpp array to find the systems that match the user-specified
|
|
312
|
+
# final kstars
|
|
313
|
+
conv_ind = bpp_save.loc[
|
|
314
|
+
(bpp_save.kstar_1.isin(final_kstar_1))
|
|
315
|
+
& (bpp_save.kstar_2.isin(final_kstar_2))
|
|
316
|
+
].bin_num.unique()
|
|
317
|
+
|
|
318
|
+
conv_save = bpp_save.loc[(bpp_save.bin_num.isin(conv_ind))]
|
|
319
|
+
|
|
320
|
+
# select out the parameters just before disruption
|
|
321
|
+
# first reset the index:
|
|
322
|
+
conv_save_reset = conv_save.reset_index()
|
|
323
|
+
|
|
324
|
+
# next select out the index for the disrupted systems using evol_type == 11
|
|
325
|
+
conv_save_reset_ind = conv_save_reset.loc[
|
|
326
|
+
conv_save_reset.evol_type == 11.0
|
|
327
|
+
].index
|
|
328
|
+
|
|
329
|
+
conv_save = conv_save_reset.iloc[conv_save_reset_ind]
|
|
330
|
+
|
|
331
|
+
elif method == "final_state":
|
|
332
|
+
# the bcm array is all that we need!
|
|
333
|
+
conv_save = bcm_save
|
|
334
|
+
|
|
335
|
+
elif method == "XRB_form":
|
|
336
|
+
# select out the systems which undergo a SN
|
|
337
|
+
conv_ind = bpp_save.loc[bpp_save.evol_type.isin([15.0, 16.0])].bin_num.unique()
|
|
338
|
+
conv_sn = bpp_save.loc[bpp_save.bin_num.isin(conv_ind)]
|
|
339
|
+
|
|
340
|
+
# select out systems when they first enter RLO after the 1st SN
|
|
341
|
+
conv_xrb = conv_sn.loc[
|
|
342
|
+
(conv_sn.kstar_1.isin(final_kstar_1))
|
|
343
|
+
& (conv_sn.kstar_2.isin(final_kstar_2))
|
|
344
|
+
& (conv_sn.RRLO_2 >= 1.0)
|
|
345
|
+
& (conv_sn.sep > 0)
|
|
346
|
+
]
|
|
347
|
+
conv_save = conv_xrb.groupby("bin_num").first().reset_index()
|
|
348
|
+
|
|
349
|
+
if conv_lims:
|
|
350
|
+
for key in conv_lims.keys():
|
|
351
|
+
filter_lo = conv_lims[key][0]
|
|
352
|
+
filter_hi = conv_lims[key][1]
|
|
353
|
+
conv_save_lim = conv_save.loc[conv_save[key] < filter_hi]
|
|
354
|
+
conv_lims_bin_num = conv_save_lim.loc[conv_save[key] > filter_lo].bin_num
|
|
355
|
+
else:
|
|
356
|
+
conv_lims_bin_num = conv_save.bin_num
|
|
357
|
+
|
|
358
|
+
return conv_save, conv_lims_bin_num
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
def pop_write(
|
|
362
|
+
dat_store,
|
|
363
|
+
log_file,
|
|
364
|
+
mass_list,
|
|
365
|
+
number_list,
|
|
366
|
+
bcm,
|
|
367
|
+
bpp,
|
|
368
|
+
initC,
|
|
369
|
+
conv,
|
|
370
|
+
kick_info,
|
|
371
|
+
bin_state_nums,
|
|
372
|
+
match,
|
|
373
|
+
idx,
|
|
374
|
+
**kwargs,
|
|
375
|
+
):
|
|
376
|
+
"""Writes all the good stuff that you want to save from runFixedPop in a
|
|
377
|
+
single function
|
|
378
|
+
|
|
379
|
+
Parameters
|
|
380
|
+
----------
|
|
381
|
+
dat_store : `pandas HDFStore`
|
|
382
|
+
H5 file to write to
|
|
383
|
+
|
|
384
|
+
log_file : `file write`
|
|
385
|
+
log file to write to
|
|
386
|
+
mass_list : `list`
|
|
387
|
+
list containing the mass of the singles, mass of the binaries,
|
|
388
|
+
and mass of the stars
|
|
389
|
+
|
|
390
|
+
n_list : `list`
|
|
391
|
+
list containing the number of singles, number of binaries,
|
|
392
|
+
and number of stars
|
|
393
|
+
|
|
394
|
+
bcm : `pandas.DataFrame`
|
|
395
|
+
bcm array to write
|
|
396
|
+
|
|
397
|
+
bpp : `pandas.DataFrame`
|
|
398
|
+
bpp array to write
|
|
399
|
+
|
|
400
|
+
initCond : `pandas.DataFrame`
|
|
401
|
+
initCond array to write
|
|
402
|
+
|
|
403
|
+
conv : `pandas.DataFrame`
|
|
404
|
+
conv array to write
|
|
405
|
+
|
|
406
|
+
kick_info : `pandas.DataFrame`
|
|
407
|
+
kick_info array to write
|
|
408
|
+
|
|
409
|
+
bin_state_nums : `list`
|
|
410
|
+
contains the count of binstates 0,1,2
|
|
411
|
+
|
|
412
|
+
match : pandas.DataFrame
|
|
413
|
+
contains the match values for each conv_param
|
|
414
|
+
|
|
415
|
+
idx : `int`
|
|
416
|
+
contains the index of the bcm so we can pick up where we left off
|
|
417
|
+
if runFixedPop hits a wall time
|
|
418
|
+
|
|
419
|
+
conv_singles : `pandas.DataFrame`
|
|
420
|
+
kwargs conv_singles array to write
|
|
421
|
+
|
|
422
|
+
bcm_singles : `pandas.DataFrame`
|
|
423
|
+
kwargs bcm_singles array to write
|
|
424
|
+
|
|
425
|
+
bpp_singles : `pandas.DataFrame`
|
|
426
|
+
kwargs bpp_singles array to write
|
|
427
|
+
|
|
428
|
+
initC_singles : `pandas.DataFrame`
|
|
429
|
+
kwargs initC_singles array to write
|
|
430
|
+
|
|
431
|
+
kick_info_singles : `pandas.DataFrame`
|
|
432
|
+
kwargs kick_info_singles array to write
|
|
433
|
+
|
|
434
|
+
Returns
|
|
435
|
+
-------
|
|
436
|
+
Nothing!
|
|
437
|
+
"""
|
|
438
|
+
|
|
439
|
+
m_keys = ["mass_singles", "mass_binaries", "mass_stars"]
|
|
440
|
+
n_keys = ["n_singles", "n_binaries", "n_stars"]
|
|
441
|
+
for m_write, m_key, n_write, n_key in zip(mass_list, m_keys, number_list, n_keys):
|
|
442
|
+
# save the total_sampled_mass so far
|
|
443
|
+
dat_store.append(m_key, pd.DataFrame([m_write]))
|
|
444
|
+
dat_store.append(n_key, pd.DataFrame([n_write]))
|
|
445
|
+
log_file.write("The total mass sampled so far is: {0}\n".format(mass_list[2]))
|
|
446
|
+
|
|
447
|
+
# Save the bcm dataframe
|
|
448
|
+
dat_store.append("bcm", bcm)
|
|
449
|
+
|
|
450
|
+
# Save the bpp dataframe
|
|
451
|
+
dat_store.append("bpp", bpp)
|
|
452
|
+
|
|
453
|
+
# Save the initial binaries
|
|
454
|
+
# ensure that the index corresponds to bin_num
|
|
455
|
+
dat_store.append("initC", initC.set_index("bin_num", drop=False))
|
|
456
|
+
|
|
457
|
+
# Save the converging dataframe
|
|
458
|
+
dat_store.append("conv", conv)
|
|
459
|
+
|
|
460
|
+
# Save the converging dataframe
|
|
461
|
+
dat_store.append("kick_info", kick_info)
|
|
462
|
+
|
|
463
|
+
# Save number of systems in each bin state
|
|
464
|
+
dat_store.append("bin_state_nums", bin_state_nums)
|
|
465
|
+
|
|
466
|
+
# Save the matches
|
|
467
|
+
dat_store.append("match", match)
|
|
468
|
+
|
|
469
|
+
# Save the index
|
|
470
|
+
dat_store.append("idx", pd.DataFrame([idx]))
|
|
471
|
+
|
|
472
|
+
if "conv_singles" in kwargs.keys():
|
|
473
|
+
|
|
474
|
+
# Save the singles conv dataframe
|
|
475
|
+
dat_store.append("conv_singles", kwargs["conv_singles"])
|
|
476
|
+
|
|
477
|
+
# Save the singles bcm dataframe
|
|
478
|
+
dat_store.append("bcm_singles", kwargs["bcm_singles"])
|
|
479
|
+
|
|
480
|
+
# Save the singles bpp dataframe
|
|
481
|
+
dat_store.append("bpp_singles", kwargs["bpp_singles"])
|
|
482
|
+
|
|
483
|
+
# save the singles initCond dataframe
|
|
484
|
+
dat_store.append("initC_singles", kwargs["initC_singles"])
|
|
485
|
+
|
|
486
|
+
# save the singles kick_info dataframe
|
|
487
|
+
dat_store.append("kick_info_singles", kwargs["kick_info_singles"])
|
|
488
|
+
|
|
489
|
+
return
|
|
490
|
+
|
|
491
|
+
|
|
492
|
+
def a_from_p(p, m1, m2):
|
|
493
|
+
"""Computes the separation from orbital period with KEPLER III
|
|
494
|
+
|
|
495
|
+
Parameters
|
|
496
|
+
----------
|
|
497
|
+
p : float/array
|
|
498
|
+
orbital period [day]
|
|
499
|
+
m1 : float/array
|
|
500
|
+
primary mass [msun]
|
|
501
|
+
m2 : float/array
|
|
502
|
+
secondary mass [msun]
|
|
503
|
+
|
|
504
|
+
Returns
|
|
505
|
+
-------
|
|
506
|
+
sep : float/array
|
|
507
|
+
separation [rsun]
|
|
508
|
+
"""
|
|
509
|
+
|
|
510
|
+
p_yr = p / 365.25
|
|
511
|
+
sep_3 = p_yr ** 2 * (m1 + m2)
|
|
512
|
+
sep = sep_3 ** (1 / 3.0)
|
|
513
|
+
sep_rsun = sep * 215.032
|
|
514
|
+
return sep_rsun
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
def p_from_a(sep, m1, m2):
|
|
518
|
+
"""Computes separation from orbital period with kepler III
|
|
519
|
+
|
|
520
|
+
Parameters
|
|
521
|
+
----------
|
|
522
|
+
sep : float/array
|
|
523
|
+
separation [rsun]
|
|
524
|
+
m1 : float/array
|
|
525
|
+
primary mass [msun]
|
|
526
|
+
m2 : float/array
|
|
527
|
+
secondary mass [msun]
|
|
528
|
+
|
|
529
|
+
Returns
|
|
530
|
+
-------
|
|
531
|
+
p : float/array
|
|
532
|
+
orbital period [day]
|
|
533
|
+
"""
|
|
534
|
+
|
|
535
|
+
sep_au = sep / 215.032
|
|
536
|
+
p_2 = sep_au ** 3 / (m1 + m2)
|
|
537
|
+
p_day = (p_2 ** 0.5) * 365.25
|
|
538
|
+
return p_day
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
def calc_Roche_radius(M1, M2, A):
|
|
542
|
+
"""Get Roche lobe radius (Eggleton 1983)
|
|
543
|
+
|
|
544
|
+
Parameters
|
|
545
|
+
----------
|
|
546
|
+
M1 : float
|
|
547
|
+
Primary mass [any unit]
|
|
548
|
+
M2 : float
|
|
549
|
+
Secondary mass [any unit]
|
|
550
|
+
A : float
|
|
551
|
+
Orbital separation [any unit]
|
|
552
|
+
|
|
553
|
+
Returns
|
|
554
|
+
-------
|
|
555
|
+
Roche radius : float
|
|
556
|
+
in units of input 'A'
|
|
557
|
+
"""
|
|
558
|
+
q = M1 / M2
|
|
559
|
+
return (
|
|
560
|
+
A
|
|
561
|
+
* 0.49
|
|
562
|
+
* q ** (2.0 / 3.0)
|
|
563
|
+
/ (0.6 * q ** (2.0 / 3.0) + np.log(1.0 + q ** (1.0 / 3.0)))
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
def mass_min_max_select(kstar_1, kstar_2, **kwargs):
|
|
568
|
+
"""Select a minimum and maximum mass to filter out binaries in the initial
|
|
569
|
+
parameter sample to reduce the number of unneccessary binaries evolved
|
|
570
|
+
in BSE
|
|
571
|
+
|
|
572
|
+
Parameters
|
|
573
|
+
----------
|
|
574
|
+
kstar_1 : int, list
|
|
575
|
+
BSE stellar type for the primary
|
|
576
|
+
or minimum and maximum stellar types for the primary
|
|
577
|
+
kstar_2 : int, list
|
|
578
|
+
BSE stellar type for the secondary
|
|
579
|
+
or minimum and maximum stellar types for the secondary
|
|
580
|
+
|
|
581
|
+
Returns
|
|
582
|
+
-------
|
|
583
|
+
min_mass[0] : float
|
|
584
|
+
minimum primary mass for initial sample
|
|
585
|
+
max_mass[0] : float
|
|
586
|
+
maximum primary mass for initial sample
|
|
587
|
+
min_mass[1] : float
|
|
588
|
+
minimum secondary mass for initial sample
|
|
589
|
+
max_mass[1] : float
|
|
590
|
+
maximum secondary mass for initial sample
|
|
591
|
+
"""
|
|
592
|
+
|
|
593
|
+
primary_max = kwargs["m_max"] if "m_max" in kwargs.keys() else 150.0
|
|
594
|
+
secondary_max = kwargs["m_max"] if "m_max" in kwargs.keys() else 150.0
|
|
595
|
+
|
|
596
|
+
primary_min = kwargs["m1_min"] if "m1_min" in kwargs.keys() else 0.08
|
|
597
|
+
secondary_min = kwargs["m2_min"] if "m2_min" in kwargs.keys() else 0.08
|
|
598
|
+
|
|
599
|
+
if ((primary_min < 0.08) | (secondary_min < 0.08)):
|
|
600
|
+
warnings.warn("Tread carefully, BSE is not equipped to handle stellar masses less than 0.08 Msun!")
|
|
601
|
+
if primary_max > 150:
|
|
602
|
+
warnings.warn("Tread carefully, BSE is not equipped to handle stellar masses greater than 150 Msun! And to be honest, we are extrapolating beyond 50 Msun :-/")
|
|
603
|
+
|
|
604
|
+
min_mass = [primary_min, secondary_min]
|
|
605
|
+
max_mass = [primary_max, secondary_max]
|
|
606
|
+
|
|
607
|
+
if len(kstar_1) == 1:
|
|
608
|
+
# there is a range of final kstar_1s to save
|
|
609
|
+
kstar_1_lo = kstar_1[0]
|
|
610
|
+
kstar_1_hi = kstar_1[0]
|
|
611
|
+
else:
|
|
612
|
+
kstar_1_lo = min(kstar_1)
|
|
613
|
+
kstar_1_hi = max(kstar_1)
|
|
614
|
+
|
|
615
|
+
if len(kstar_2) == 1:
|
|
616
|
+
# there is a range of final kstar_1s to save
|
|
617
|
+
kstar_2_lo = kstar_2[0]
|
|
618
|
+
kstar_2_hi = kstar_2[0]
|
|
619
|
+
else:
|
|
620
|
+
kstar_2_lo = min(kstar_2)
|
|
621
|
+
kstar_2_hi = max(kstar_2)
|
|
622
|
+
|
|
623
|
+
kstar_lo = [kstar_1_lo, kstar_2_lo]
|
|
624
|
+
kstar_hi = [kstar_1_hi, kstar_2_hi]
|
|
625
|
+
|
|
626
|
+
ii = 0
|
|
627
|
+
for k in kstar_lo:
|
|
628
|
+
if k == 14.0:
|
|
629
|
+
min_mass[ii] = 8.0
|
|
630
|
+
elif k == 13.0:
|
|
631
|
+
min_mass[ii] = 3.0
|
|
632
|
+
elif k == 12.0:
|
|
633
|
+
min_mass[ii] = 1.0
|
|
634
|
+
elif k == 11.0:
|
|
635
|
+
min_mass[ii] = 0.8
|
|
636
|
+
elif k == 10.0:
|
|
637
|
+
min_mass[ii] = 0.5
|
|
638
|
+
ii += 1
|
|
639
|
+
|
|
640
|
+
ii = 0
|
|
641
|
+
for k in kstar_hi:
|
|
642
|
+
if k == 13.0:
|
|
643
|
+
max_mass[ii] = 60.0
|
|
644
|
+
elif k == 12.0:
|
|
645
|
+
max_mass[ii] = 20.0
|
|
646
|
+
elif k == 11.0:
|
|
647
|
+
max_mass[ii] = 20.0
|
|
648
|
+
elif k == 10.0:
|
|
649
|
+
max_mass[ii] = 20.0
|
|
650
|
+
ii += 1
|
|
651
|
+
|
|
652
|
+
return min_mass[0], max_mass[0], min_mass[1], max_mass[1]
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
def idl_tabulate(x, f, p=5):
|
|
656
|
+
"""Function that replicates the IDL int_tabulated function
|
|
657
|
+
which performs a p-point integration on a tabulated set of data
|
|
658
|
+
|
|
659
|
+
Parameters
|
|
660
|
+
----------
|
|
661
|
+
x : array
|
|
662
|
+
tabulated x-value data
|
|
663
|
+
f : array
|
|
664
|
+
tabulated f-value data, same size as x
|
|
665
|
+
p : int
|
|
666
|
+
number of chunks to divide tabulated data into
|
|
667
|
+
Default: 5
|
|
668
|
+
|
|
669
|
+
Returns
|
|
670
|
+
-------
|
|
671
|
+
ret : float
|
|
672
|
+
Integration result
|
|
673
|
+
"""
|
|
674
|
+
|
|
675
|
+
def newton_cotes(x, f):
|
|
676
|
+
if x.shape[0] < 2:
|
|
677
|
+
return 0
|
|
678
|
+
rn = (x.shape[0] - 1) * (x - x[0]) / (x[-1] - x[0])
|
|
679
|
+
weights = scipy.integrate.newton_cotes(rn)[0]
|
|
680
|
+
return (x[-1] - x[0]) / (x.shape[0] - 1) * np.dot(weights, f)
|
|
681
|
+
|
|
682
|
+
ret = 0
|
|
683
|
+
for idx in range(0, x.shape[0], p - 1):
|
|
684
|
+
ret += newton_cotes(x[idx: idx + p], f[idx: idx + p])
|
|
685
|
+
return ret
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
def rndm(a, b, g, size):
|
|
689
|
+
r"""Power-law generator for pdf(x)\propto x^{g} for a<=x<=b
|
|
690
|
+
|
|
691
|
+
Parameters
|
|
692
|
+
----------
|
|
693
|
+
a : float
|
|
694
|
+
Minimum of range for power law
|
|
695
|
+
b : float
|
|
696
|
+
Maximum of range for power law
|
|
697
|
+
g : float
|
|
698
|
+
Index for power law
|
|
699
|
+
size : int
|
|
700
|
+
Number of data points to draw
|
|
701
|
+
|
|
702
|
+
Returns
|
|
703
|
+
-------
|
|
704
|
+
power : array
|
|
705
|
+
Array of data sampled from power law distribution with params
|
|
706
|
+
fixed by inputs
|
|
707
|
+
"""
|
|
708
|
+
|
|
709
|
+
if g == -1:
|
|
710
|
+
raise ValueError("Power law index cannot be exactly -1")
|
|
711
|
+
r = np.random.random(size=size)
|
|
712
|
+
ag, bg = a ** (g + 1), b ** (g + 1)
|
|
713
|
+
return (ag + (bg - ag) * r) ** (1.0 / (g + 1))
|
|
714
|
+
|
|
715
|
+
|
|
716
|
+
def param_transform(dat):
|
|
717
|
+
"""Transforms a data set to limits between zero and one
|
|
718
|
+
Leaves some wiggle room on the edges of the data set
|
|
719
|
+
|
|
720
|
+
Parameters
|
|
721
|
+
----------
|
|
722
|
+
dat : array
|
|
723
|
+
array of data to transform between 0 and 1
|
|
724
|
+
|
|
725
|
+
Returns
|
|
726
|
+
-------
|
|
727
|
+
datTransformed : array
|
|
728
|
+
array of data with limits between 0 and 1
|
|
729
|
+
"""
|
|
730
|
+
|
|
731
|
+
datMax = max(dat)
|
|
732
|
+
datMin = min(dat)
|
|
733
|
+
datZeroed = dat - datMin
|
|
734
|
+
|
|
735
|
+
datTransformed = datZeroed / ((datMax - datMin))
|
|
736
|
+
if np.max(datTransformed) == 1.0:
|
|
737
|
+
datTransformed[datTransformed == 1.0] = 1 - 1e-6
|
|
738
|
+
if np.min(datTransformed) == 0.0:
|
|
739
|
+
datTransformed[datTransformed == 0.0] = 1e-6
|
|
740
|
+
return datTransformed
|
|
741
|
+
|
|
742
|
+
|
|
743
|
+
def dat_transform(dat, dat_list):
|
|
744
|
+
"""Transform a data set to have limits between zero and one using
|
|
745
|
+
param_transform, then transform to log space
|
|
746
|
+
|
|
747
|
+
Parameters
|
|
748
|
+
----------
|
|
749
|
+
dat " DataFrame
|
|
750
|
+
Data to transform to eventually perform KDE
|
|
751
|
+
dat_list : list
|
|
752
|
+
List of DataFrame columns to include in transformation
|
|
753
|
+
|
|
754
|
+
Returns
|
|
755
|
+
-------
|
|
756
|
+
dat_trans : array
|
|
757
|
+
Transformed data for columns in dat_list
|
|
758
|
+
"""
|
|
759
|
+
|
|
760
|
+
dat_trans = []
|
|
761
|
+
for column in dat_list:
|
|
762
|
+
dat_trans.append(ss.logit(param_transform(dat[column])))
|
|
763
|
+
dat_trans = np.vstack([dat_trans])
|
|
764
|
+
|
|
765
|
+
return dat_trans
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
def dat_un_transform(dat_sample, dat_set, dat_list):
|
|
769
|
+
"""Un-transform data that was transformed in dat_transform
|
|
770
|
+
|
|
771
|
+
Parameters
|
|
772
|
+
----------
|
|
773
|
+
dat_sample : array
|
|
774
|
+
Data sampled from kde generated with transformed data
|
|
775
|
+
dat_set : DataFrame
|
|
776
|
+
Un-transformed data (same as dat in dat_transform)
|
|
777
|
+
dat_list : list
|
|
778
|
+
List of DataFrame columns to include in transformation
|
|
779
|
+
|
|
780
|
+
Returns
|
|
781
|
+
-------
|
|
782
|
+
dat : array
|
|
783
|
+
Array of data sampled from kde that is transformed back to
|
|
784
|
+
bounds of the un-transformed data set the kde is generated from
|
|
785
|
+
"""
|
|
786
|
+
dat = []
|
|
787
|
+
|
|
788
|
+
dat_exp = ss.expit(dat_sample)
|
|
789
|
+
for ii, column in zip(range(len(dat_list)), dat_list):
|
|
790
|
+
dat_untrans = dat_exp[ii, :] * (
|
|
791
|
+
max(dat_set[column]) - min(dat_set[column])
|
|
792
|
+
) + min(dat_set[column])
|
|
793
|
+
dat.append(dat_untrans)
|
|
794
|
+
dat = np.vstack(dat)
|
|
795
|
+
return dat
|
|
796
|
+
|
|
797
|
+
|
|
798
|
+
def knuth_bw_selector(dat_list):
|
|
799
|
+
"""Selects the kde bandwidth using Knuth's rule implemented in Astropy
|
|
800
|
+
If Knuth's rule raises error, Scott's rule is used
|
|
801
|
+
|
|
802
|
+
Parameters
|
|
803
|
+
----------
|
|
804
|
+
dat_list : list
|
|
805
|
+
List of data arrays that will be used to generate a kde
|
|
806
|
+
|
|
807
|
+
Returns
|
|
808
|
+
-------
|
|
809
|
+
bw_min : float
|
|
810
|
+
Minimum of bandwidths for all of the data arrays in dat_list
|
|
811
|
+
"""
|
|
812
|
+
|
|
813
|
+
bw_list = []
|
|
814
|
+
for dat in dat_list:
|
|
815
|
+
try:
|
|
816
|
+
bw = astrostats.knuth_bin_width(dat)
|
|
817
|
+
except Exception:
|
|
818
|
+
print("Using Scott Rule!!")
|
|
819
|
+
bw = astrostats.scott_bin_width(dat)
|
|
820
|
+
bw_list.append(bw)
|
|
821
|
+
return np.mean(bw_list)
|
|
822
|
+
|
|
823
|
+
|
|
824
|
+
def get_Z_from_FeH(FeH, Z_sun=0.02):
|
|
825
|
+
"""
|
|
826
|
+
Converts from FeH to Z under the assumption that
|
|
827
|
+
all stars have the same abundance as the sun
|
|
828
|
+
|
|
829
|
+
Parameters
|
|
830
|
+
----------
|
|
831
|
+
FeH : array
|
|
832
|
+
Fe/H values to convert
|
|
833
|
+
Z_sun : float
|
|
834
|
+
solar metallicity
|
|
835
|
+
|
|
836
|
+
Returns
|
|
837
|
+
-------
|
|
838
|
+
Z : array
|
|
839
|
+
metallicities corresponding to Fe/H
|
|
840
|
+
"""
|
|
841
|
+
Z = 10**(FeH + np.log10(Z_sun))
|
|
842
|
+
return Z
|
|
843
|
+
|
|
844
|
+
|
|
845
|
+
def get_FeH_from_Z(Z, Z_sun=0.02):
|
|
846
|
+
"""
|
|
847
|
+
Converts from Z to FeH under the assumption that
|
|
848
|
+
all stars have the same abundance as the sun
|
|
849
|
+
|
|
850
|
+
Parameters
|
|
851
|
+
----------
|
|
852
|
+
Z : array
|
|
853
|
+
metallicities to convert to Fe/H
|
|
854
|
+
Z_sun : float
|
|
855
|
+
solar metallicity
|
|
856
|
+
|
|
857
|
+
Returns
|
|
858
|
+
-------
|
|
859
|
+
FeH : array
|
|
860
|
+
Fe/H corresponding to metallicities
|
|
861
|
+
"""
|
|
862
|
+
FeH = np.log10(Z) - np.log10(Z_sun)
|
|
863
|
+
return FeH
|
|
864
|
+
|
|
865
|
+
|
|
866
|
+
def get_binfrac_of_Z(Z):
|
|
867
|
+
'''
|
|
868
|
+
Calculates the theoretical binary fraction as a
|
|
869
|
+
function of metallicity. Following Moe+2019
|
|
870
|
+
|
|
871
|
+
Parameters
|
|
872
|
+
----------
|
|
873
|
+
Z : array
|
|
874
|
+
metallicity Z values
|
|
875
|
+
|
|
876
|
+
Returns
|
|
877
|
+
-------
|
|
878
|
+
binfrac : array
|
|
879
|
+
binary fraction values
|
|
880
|
+
'''
|
|
881
|
+
FeH = get_FeH_from_Z(Z)
|
|
882
|
+
FeH_low = FeH[np.where(FeH<=-1.0)]
|
|
883
|
+
FeH_high = FeH[np.where(FeH>-1.0)]
|
|
884
|
+
binfrac_low = -0.0648 * FeH_low + 0.3356
|
|
885
|
+
binfrac_high = -0.1977 * FeH_high + 0.2025
|
|
886
|
+
binfrac = np.append(binfrac_low, binfrac_high)
|
|
887
|
+
return binfrac
|
|
888
|
+
|
|
889
|
+
|
|
890
|
+
def get_porb_norm(Z, close_logP=4.0, wide_logP=6.0, binfrac_tot_solar=0.66, Z_sun=0.02):
|
|
891
|
+
'''Returns normalization constants to produce log normals consistent with Fig 19 of Moe+19
|
|
892
|
+
for the orbital period distribution
|
|
893
|
+
|
|
894
|
+
Parameters
|
|
895
|
+
----------
|
|
896
|
+
Z : array
|
|
897
|
+
metallicity values
|
|
898
|
+
close_logP : float
|
|
899
|
+
divding line beween close and intermediate orbits
|
|
900
|
+
wide_logP : float
|
|
901
|
+
dividing line between intermediate and wide orbits
|
|
902
|
+
binfrac_tot : float
|
|
903
|
+
integrated total binary fraction at solar metallicity
|
|
904
|
+
|
|
905
|
+
Returns
|
|
906
|
+
-------
|
|
907
|
+
norm_wide : float
|
|
908
|
+
normalization factor for kde for wide binaries
|
|
909
|
+
norm_close : float
|
|
910
|
+
normalization factor for kde for wide binaries
|
|
911
|
+
'''
|
|
912
|
+
from scipy.stats import norm
|
|
913
|
+
from scipy.integrate import trapezoid
|
|
914
|
+
from scipy.interpolate import interp1d
|
|
915
|
+
|
|
916
|
+
# fix to values used in Moe+19
|
|
917
|
+
logP_lo_lim=0
|
|
918
|
+
logP_hi_lim=9
|
|
919
|
+
log_P = np.linspace(logP_lo_lim, logP_hi_lim, 10000)
|
|
920
|
+
|
|
921
|
+
logP_pdf = norm.pdf(log_P, loc=4.9, scale=2.3)
|
|
922
|
+
|
|
923
|
+
# set up the wide binary fraction inflection point
|
|
924
|
+
norm_wide = binfrac_tot_solar/trapezoid(logP_pdf, log_P)
|
|
925
|
+
|
|
926
|
+
# set up the close binary fraction inflection point
|
|
927
|
+
FeHclose = np.linspace(-3.0, 0.5, 100)
|
|
928
|
+
fclose = -0.0648 * FeHclose + 0.3356
|
|
929
|
+
fclose[FeHclose > -1.0] = -0.1977 * FeHclose[FeHclose > -1.0] + 0.2025
|
|
930
|
+
Zclose = get_Z_from_FeH(FeHclose, Z_sun=Z_sun)
|
|
931
|
+
|
|
932
|
+
fclose_interp = interp1d(Zclose, fclose)
|
|
933
|
+
|
|
934
|
+
fclose_Z = fclose_interp(Z)
|
|
935
|
+
norm_close = fclose_Z/trapezoid(logP_pdf[log_P < close_logP], log_P[log_P < close_logP])
|
|
936
|
+
|
|
937
|
+
return norm_wide, norm_close
|
|
938
|
+
|
|
939
|
+
|
|
940
|
+
def get_met_dep_binfrac(met):
|
|
941
|
+
'''Returns a population-wide binary fraction consistent with
|
|
942
|
+
Moe+19 based on the supplied metallicity
|
|
943
|
+
|
|
944
|
+
Parameters
|
|
945
|
+
----------
|
|
946
|
+
met : float
|
|
947
|
+
metallicity of the population
|
|
948
|
+
|
|
949
|
+
Returns
|
|
950
|
+
-------
|
|
951
|
+
binfrac : float
|
|
952
|
+
binary fraction of the population based on metallicity
|
|
953
|
+
|
|
954
|
+
'''
|
|
955
|
+
logP_hi_lim = 9
|
|
956
|
+
logP_lo_lim = 0
|
|
957
|
+
wide_logP = 6
|
|
958
|
+
close_logP = 4
|
|
959
|
+
neval = 5000
|
|
960
|
+
|
|
961
|
+
from scipy.interpolate import interp1d
|
|
962
|
+
from scipy.integrate import trapezoid
|
|
963
|
+
from scipy.stats import norm
|
|
964
|
+
|
|
965
|
+
norm_wide, norm_close = get_porb_norm(met)
|
|
966
|
+
prob_wide = norm.pdf(np.linspace(wide_logP, logP_hi_lim, neval), loc=4.9, scale=2.3)*norm_wide
|
|
967
|
+
prob_close = norm.pdf(np.linspace(logP_lo_lim, close_logP, neval), loc=4.9, scale=2.3)*norm_close
|
|
968
|
+
slope = -(prob_close[-1] - prob_wide[0]) / (wide_logP - close_logP)
|
|
969
|
+
prob_intermediate = slope * (np.linspace(close_logP, wide_logP, neval) - close_logP) + prob_close[-1]
|
|
970
|
+
prob_interp_int = interp1d(np.linspace(close_logP, wide_logP, neval), prob_intermediate)
|
|
971
|
+
|
|
972
|
+
x_dat = np.hstack([np.linspace(logP_lo_lim, close_logP, neval),
|
|
973
|
+
np.linspace(close_logP, wide_logP, neval),
|
|
974
|
+
np.linspace(wide_logP, logP_hi_lim, neval),])
|
|
975
|
+
y_dat = np.hstack([prob_close, prob_interp_int(np.linspace(close_logP, wide_logP, neval)), prob_wide])
|
|
976
|
+
|
|
977
|
+
binfrac = trapezoid(y_dat, x_dat)/0.66 * 0.5
|
|
978
|
+
|
|
979
|
+
return float(np.round(binfrac, 2))
|
|
980
|
+
|
|
981
|
+
def error_check(BSEDict, filters=None, convergence=None, sampling=None):
|
|
982
|
+
"""Checks that values in BSEDict, filters, and convergence are viable"""
|
|
983
|
+
if not isinstance(BSEDict, dict):
|
|
984
|
+
raise ValueError("BSE flags must be supplied via a dictionary")
|
|
985
|
+
|
|
986
|
+
if filters is not None:
|
|
987
|
+
if not isinstance(filters, dict):
|
|
988
|
+
raise ValueError("Filters criteria must be supplied via a dictionary")
|
|
989
|
+
for option in ["binary_state", "timestep_conditions"]:
|
|
990
|
+
if option not in filters.keys():
|
|
991
|
+
raise ValueError(
|
|
992
|
+
"Inifile section filters must have option {0} supplied".format(
|
|
993
|
+
option
|
|
994
|
+
)
|
|
995
|
+
)
|
|
996
|
+
|
|
997
|
+
if convergence is not None:
|
|
998
|
+
if not isinstance(convergence, dict):
|
|
999
|
+
raise ValueError("Convergence criteria must be supplied via a dictionary")
|
|
1000
|
+
for option in [
|
|
1001
|
+
"pop_select",
|
|
1002
|
+
"convergence_params",
|
|
1003
|
+
"convergence_limits",
|
|
1004
|
+
"match",
|
|
1005
|
+
"apply_convergence_limits",
|
|
1006
|
+
]:
|
|
1007
|
+
if option not in convergence.keys():
|
|
1008
|
+
raise ValueError(
|
|
1009
|
+
"Inifile section convergence must have option {0} supplied".format(
|
|
1010
|
+
option
|
|
1011
|
+
)
|
|
1012
|
+
)
|
|
1013
|
+
|
|
1014
|
+
if sampling is not None:
|
|
1015
|
+
if not isinstance(sampling, dict):
|
|
1016
|
+
raise ValueError("Sampling criteria must be supplied via a dictionary")
|
|
1017
|
+
for option in ["sampling_method", "SF_start", "SF_duration", "metallicity", "keep_singles"]:
|
|
1018
|
+
if option not in sampling.keys():
|
|
1019
|
+
raise ValueError(
|
|
1020
|
+
"Inifile section sampling must have option {0} supplied".format(
|
|
1021
|
+
option
|
|
1022
|
+
)
|
|
1023
|
+
)
|
|
1024
|
+
if ("qmin" not in sampling.keys()) & ("m2_min" not in sampling.keys()) & (sampling["sampling_method"] == 'independent'):
|
|
1025
|
+
raise ValueError("You have not specified qmin or m2_min. At least one of these must be specified.")
|
|
1026
|
+
# filters
|
|
1027
|
+
if filters is not None:
|
|
1028
|
+
flag = "binary_state"
|
|
1029
|
+
if any(x not in [0, 1, 2] for x in filters[flag]):
|
|
1030
|
+
raise ValueError(
|
|
1031
|
+
"{0} needs to be a subset of [0,1,2] (you set it to {1})".format(
|
|
1032
|
+
flag, filters[flag]
|
|
1033
|
+
)
|
|
1034
|
+
)
|
|
1035
|
+
flag = "timestep_conditions"
|
|
1036
|
+
if (type(filters[flag]) != str) and (type(filters[flag]) != list):
|
|
1037
|
+
raise ValueError(
|
|
1038
|
+
"{0} needs to either be a string like 'dtp=None' or a list of conditions like [['binstate==0', 'dtp=1.0']] (you set it to {1})".format(
|
|
1039
|
+
flag, filters[flag]
|
|
1040
|
+
)
|
|
1041
|
+
)
|
|
1042
|
+
|
|
1043
|
+
# convergence
|
|
1044
|
+
if convergence is not None:
|
|
1045
|
+
flag = "convergence_limits"
|
|
1046
|
+
if convergence[flag]:
|
|
1047
|
+
for item, key in zip(convergence.items(), convergence.keys()):
|
|
1048
|
+
if len(item) != 2:
|
|
1049
|
+
raise ValueError(
|
|
1050
|
+
"The value for key '{0:s}' needs to be a list of length 2, it is length: {1:i}".format(
|
|
1051
|
+
key, len(item)
|
|
1052
|
+
)
|
|
1053
|
+
)
|
|
1054
|
+
flag = "pop_select"
|
|
1055
|
+
if not convergence[flag] in [
|
|
1056
|
+
"formation",
|
|
1057
|
+
"1_SN",
|
|
1058
|
+
"2_SN",
|
|
1059
|
+
"disruption",
|
|
1060
|
+
"final_state",
|
|
1061
|
+
"XRB_form",
|
|
1062
|
+
]:
|
|
1063
|
+
raise ValueError(
|
|
1064
|
+
"{0} needs to be in the list: ['formation', '1_SN', '2_SN', 'disruption', 'final_state', 'XRB_form'] "
|
|
1065
|
+
"(you set it to {1})".format(
|
|
1066
|
+
flag, convergence[flag]
|
|
1067
|
+
)
|
|
1068
|
+
)
|
|
1069
|
+
|
|
1070
|
+
flag = "match"
|
|
1071
|
+
if not isinstance(convergence[flag], float):
|
|
1072
|
+
raise ValueError(
|
|
1073
|
+
"{0} must be a float (you set it to {1})".format(
|
|
1074
|
+
flag, convergence[flag]
|
|
1075
|
+
)
|
|
1076
|
+
)
|
|
1077
|
+
|
|
1078
|
+
flag = "convergence_params"
|
|
1079
|
+
acceptable_convergence_params = [
|
|
1080
|
+
"mass_1",
|
|
1081
|
+
"mass_2",
|
|
1082
|
+
"sep",
|
|
1083
|
+
"porb",
|
|
1084
|
+
"ecc",
|
|
1085
|
+
"massc_1",
|
|
1086
|
+
"massc_2",
|
|
1087
|
+
"rad_1",
|
|
1088
|
+
"rad_2",
|
|
1089
|
+
]
|
|
1090
|
+
for param in convergence[flag]:
|
|
1091
|
+
if param not in acceptable_convergence_params:
|
|
1092
|
+
raise ValueError(
|
|
1093
|
+
"Supplied convergence parameter {0} is not in list of "
|
|
1094
|
+
"acceptable convergence parameters {1}".format(
|
|
1095
|
+
param, acceptable_convergence_params
|
|
1096
|
+
)
|
|
1097
|
+
)
|
|
1098
|
+
|
|
1099
|
+
flag = "convergence_limits"
|
|
1100
|
+
if type(convergence[flag]) != dict:
|
|
1101
|
+
raise ValueError(
|
|
1102
|
+
"Supplied convergence limits must be passed as a dict "
|
|
1103
|
+
"(you passed type {0})".format(type(convergence[flag]))
|
|
1104
|
+
)
|
|
1105
|
+
|
|
1106
|
+
for key in convergence[flag].keys():
|
|
1107
|
+
if key not in convergence["convergence_params"]:
|
|
1108
|
+
raise ValueError(
|
|
1109
|
+
"Supplied convergence limits must correspond to already "
|
|
1110
|
+
"supplied convergence_params. The supplied convergence_params "
|
|
1111
|
+
"are {0}, while you supplied {1}".format(
|
|
1112
|
+
convergence["convergence_params"], key)
|
|
1113
|
+
)
|
|
1114
|
+
flag = "apply_convergence_limits"
|
|
1115
|
+
if type(convergence[flag]) != bool:
|
|
1116
|
+
raise ValueError(
|
|
1117
|
+
"apply_convergence_limits must be either True or False, "
|
|
1118
|
+
"you supplied {}".format(
|
|
1119
|
+
convergence[flag])
|
|
1120
|
+
)
|
|
1121
|
+
|
|
1122
|
+
# sampling
|
|
1123
|
+
if sampling is not None:
|
|
1124
|
+
flag = "sampling_method"
|
|
1125
|
+
acceptable_sampling = ["multidim", "independent"]
|
|
1126
|
+
if sampling[flag] not in acceptable_sampling:
|
|
1127
|
+
raise ValueError(
|
|
1128
|
+
"sampling_method must be one of {0} you supplied {1}.".format(
|
|
1129
|
+
acceptable_sampling, sampling[flag]
|
|
1130
|
+
)
|
|
1131
|
+
)
|
|
1132
|
+
|
|
1133
|
+
flag = "metallicity"
|
|
1134
|
+
if not isinstance(sampling[flag], float):
|
|
1135
|
+
raise ValueError(
|
|
1136
|
+
"{0} must be a float (you set it to {1})".format(flag, sampling[flag])
|
|
1137
|
+
)
|
|
1138
|
+
if sampling[flag] <= 0:
|
|
1139
|
+
raise ValueError(
|
|
1140
|
+
"{0} needs to be greater than or equal to 0 (you set it to {1})".format(
|
|
1141
|
+
flag, sampling[flag]
|
|
1142
|
+
)
|
|
1143
|
+
)
|
|
1144
|
+
|
|
1145
|
+
# use the cosmic-settings.json file to define the valid ranges for BSE flags
|
|
1146
|
+
settings_path = io_files("cosmic.data").joinpath('cosmic-settings.json')
|
|
1147
|
+
settings = json.loads(settings_path.read_text(encoding='utf-8'))
|
|
1148
|
+
|
|
1149
|
+
handle_separately = ['qcrit_array', 'natal_kick_array', 'fprimc_array']
|
|
1150
|
+
|
|
1151
|
+
# go through the different categories in the settings file
|
|
1152
|
+
for cat in settings:
|
|
1153
|
+
# ignore anything that's not BSE
|
|
1154
|
+
if cat['category'] != "bse":
|
|
1155
|
+
continue
|
|
1156
|
+
|
|
1157
|
+
# go through each flag in the settings
|
|
1158
|
+
for flag in cat['settings']:
|
|
1159
|
+
# if the user has provided it
|
|
1160
|
+
if flag['name'] in BSEDict and flag['name'] not in handle_separately:
|
|
1161
|
+
user_val = BSEDict[flag['name']]
|
|
1162
|
+
|
|
1163
|
+
# track the valid options and whether the flag has matched any of them
|
|
1164
|
+
options = [o["name"] for o in flag["options"]]
|
|
1165
|
+
flag_is_valid = False
|
|
1166
|
+
|
|
1167
|
+
# check each option
|
|
1168
|
+
for opt in options:
|
|
1169
|
+
# for strings, we do something more complex
|
|
1170
|
+
if isinstance(opt, str):
|
|
1171
|
+
if opt == "positive values" and user_val > 0:
|
|
1172
|
+
flag_is_valid = True
|
|
1173
|
+
break
|
|
1174
|
+
elif opt == "negative values" and user_val < 0:
|
|
1175
|
+
flag_is_valid = True
|
|
1176
|
+
break
|
|
1177
|
+
# for things of the form "range [a,b)"
|
|
1178
|
+
elif opt.startswith("range"):
|
|
1179
|
+
# strip to just the brackets
|
|
1180
|
+
r = opt[5:].strip()
|
|
1181
|
+
|
|
1182
|
+
# get the brackets and ensure the format is correct
|
|
1183
|
+
start_brac, end_brac = r[0], r[-1]
|
|
1184
|
+
if start_brac not in ["[", "("] or end_brac not in ["]", ")"] or ',' not in r:
|
|
1185
|
+
raise ValueError(
|
|
1186
|
+
f"Range option for {flag['name']} is not formatted correctly."
|
|
1187
|
+
)
|
|
1188
|
+
# get the range value and check if the user value is in range
|
|
1189
|
+
r_lo, r_hi = map(float, r[1:-1].split(","))
|
|
1190
|
+
lower_ok = user_val > r_lo if start_brac == "(" else user_val >= r_lo
|
|
1191
|
+
upper_ok = user_val < r_hi if end_brac == ")" else user_val <= r_hi
|
|
1192
|
+
if lower_ok and upper_ok:
|
|
1193
|
+
flag_is_valid = True
|
|
1194
|
+
break
|
|
1195
|
+
# otherwise, just do a direct comparison
|
|
1196
|
+
elif user_val == opt:
|
|
1197
|
+
flag_is_valid = True
|
|
1198
|
+
break
|
|
1199
|
+
|
|
1200
|
+
# if we didn't find a match, raise an error
|
|
1201
|
+
if not flag_is_valid:
|
|
1202
|
+
raise ValueError(
|
|
1203
|
+
f"{flag['name']} must be one of {options} (you set it to '{user_val}')"
|
|
1204
|
+
)
|
|
1205
|
+
|
|
1206
|
+
if "dtp" in BSEDict.keys():
|
|
1207
|
+
if BSEDict["dtp"] < 0:
|
|
1208
|
+
raise ValueError(
|
|
1209
|
+
f"dtp needs to be greater than or equal to 0 (you set it to '{BSEDict['dtp']:0.2f}')"
|
|
1210
|
+
)
|
|
1211
|
+
|
|
1212
|
+
if "kickflag" in BSEDict.keys():
|
|
1213
|
+
if BSEDict["kickflag"] in [-1, -2] and ((BSEDict['ecsn'] != 2.25) or (BSEDict['ecsn_mlow'] != 1.6)):
|
|
1214
|
+
warnings.warn("You have chosen a kick flag that assumes compact object formation "
|
|
1215
|
+
"according to Giacobbo & Mapelli 2020, but supplied electron "
|
|
1216
|
+
"capture SN (ECSN) flags that are inconsistent with this study. "
|
|
1217
|
+
"To maintain consistency, COSMIC will update your "
|
|
1218
|
+
"ECSN flags to be ecsn=2.25 and ecsn_mlow=1.6")
|
|
1219
|
+
BSEDict['ecsn'] = 2.25
|
|
1220
|
+
BSEDict['ecsn_mlow'] = 1.6
|
|
1221
|
+
|
|
1222
|
+
if "ecsn_mlow" in BSEDict.keys() and "ecsn" in BSEDict.keys():
|
|
1223
|
+
if BSEDict["ecsn_mlow"] > BSEDict["ecsn"]:
|
|
1224
|
+
raise ValueError(
|
|
1225
|
+
f"`ecsn_mlow` needs to be less than `ecsn`, (you set `ecsn_mlow` to {BSEDict['ecsn_mlow']} "
|
|
1226
|
+
f"and `ecsn` to {BSEDict['ecsn']})"
|
|
1227
|
+
)
|
|
1228
|
+
|
|
1229
|
+
# ensure the natal kick array is the correct shape and each value is in the valid range
|
|
1230
|
+
if "natal_kick_array" in BSEDict.keys():
|
|
1231
|
+
shape = np.array(BSEDict["natal_kick_array"]).shape
|
|
1232
|
+
if shape != (2, 5):
|
|
1233
|
+
raise ValueError(
|
|
1234
|
+
f"'natal_kick_array' must have shape (2,5) (you supplied list, or array with shape '{shape}')"
|
|
1235
|
+
)
|
|
1236
|
+
|
|
1237
|
+
valid_ranges = [
|
|
1238
|
+
(0, np.inf), # velocity magnitude
|
|
1239
|
+
(-90, 90), # polar angle
|
|
1240
|
+
(0, 360), # azimuthal angle
|
|
1241
|
+
(0, 360), # mean anomaly
|
|
1242
|
+
(-np.inf, np.inf) # random seed
|
|
1243
|
+
]
|
|
1244
|
+
|
|
1245
|
+
for i in range(2):
|
|
1246
|
+
for j in range(5):
|
|
1247
|
+
val = BSEDict["natal_kick_array"][i][j]
|
|
1248
|
+
low, high = valid_ranges[j]
|
|
1249
|
+
if not (low <= val <= high) and val != -100.0:
|
|
1250
|
+
raise ValueError(
|
|
1251
|
+
f"Value at position ({i},{j}) in 'natal_kick_array' must be in range [{low}, {high}] "
|
|
1252
|
+
f"(you set it to '{val}')"
|
|
1253
|
+
)
|
|
1254
|
+
|
|
1255
|
+
|
|
1256
|
+
if "fprimc_array" in BSEDict.keys():
|
|
1257
|
+
if np.any(np.array(BSEDict["fprimc_array"]) < 0.0) or len(BSEDict["fprimc_array"]) != 16:
|
|
1258
|
+
raise ValueError(
|
|
1259
|
+
f"fprimc_array values must be >= 0 and there must be 16 values "
|
|
1260
|
+
f'(you set them to {BSEDict["fprimc_array"]}], length={len(BSEDict["fprimc_array"])})'
|
|
1261
|
+
)
|
|
1262
|
+
|
|
1263
|
+
if "qcrit_array" in BSEDict.keys():
|
|
1264
|
+
if np.any(np.array(BSEDict["qcrit_array"]) < 0.0) or len(BSEDict["qcrit_array"]) != 16:
|
|
1265
|
+
raise ValueError(
|
|
1266
|
+
f"qcrit_array values must be >= 0 and there must be 16 values "
|
|
1267
|
+
f'(you set them to {BSEDict["qcrit_array"]}], length={len(BSEDict["qcrit_array"])})'
|
|
1268
|
+
)
|
|
1269
|
+
|
|
1270
|
+
return
|
|
1271
|
+
|
|
1272
|
+
|
|
1273
|
+
def check_initial_conditions(full_initial_binary_table):
|
|
1274
|
+
"""Checks initial conditions and reports warnings
|
|
1275
|
+
|
|
1276
|
+
Only warning provided right now is if star begins in Roche lobe
|
|
1277
|
+
overflow
|
|
1278
|
+
"""
|
|
1279
|
+
|
|
1280
|
+
def rzamsf(m):
|
|
1281
|
+
"""A function to evaluate Rzams
|
|
1282
|
+
( from Tout et al., 1996, MNRAS, 281, 257 ).
|
|
1283
|
+
"""
|
|
1284
|
+
mx = np.sqrt(m)
|
|
1285
|
+
rzams = (
|
|
1286
|
+
(a[7] * m ** 2 + a[8] * m ** 6) * mx
|
|
1287
|
+
+ a[9] * m ** 11
|
|
1288
|
+
+ (a[10] + a[11] * mx) * m ** 19
|
|
1289
|
+
) / (a[12] + a[13] * m ** 2 + (a[14] * m ** 8 + m ** 18 + a[15] * m ** 19) * mx)
|
|
1290
|
+
|
|
1291
|
+
return rzams
|
|
1292
|
+
|
|
1293
|
+
no_singles = ((full_initial_binary_table["mass_1"] > 0.0)
|
|
1294
|
+
& (full_initial_binary_table["mass_2"] > 0.0)
|
|
1295
|
+
& (full_initial_binary_table["porb"] > 0.0))
|
|
1296
|
+
initial_binary_table = full_initial_binary_table[no_singles]
|
|
1297
|
+
|
|
1298
|
+
z = np.asarray(initial_binary_table["metallicity"])
|
|
1299
|
+
zpars, a = zcnsts(z)
|
|
1300
|
+
|
|
1301
|
+
mass1 = np.asarray(initial_binary_table["mass_1"])
|
|
1302
|
+
mass2 = np.asarray(initial_binary_table["mass_2"])
|
|
1303
|
+
|
|
1304
|
+
if np.all(mass2 == 0.0):
|
|
1305
|
+
return
|
|
1306
|
+
else:
|
|
1307
|
+
rzams1 = rzamsf(mass1)
|
|
1308
|
+
rzams2 = rzamsf(mass2)
|
|
1309
|
+
|
|
1310
|
+
# assume some time step in order to calculate sep
|
|
1311
|
+
yeardy = 365.24
|
|
1312
|
+
aursun = 214.95
|
|
1313
|
+
tb = np.asarray(initial_binary_table["porb"]) / yeardy
|
|
1314
|
+
sep = aursun * (tb * tb * (mass1 + mass2)) ** (1.0 / 3.0)
|
|
1315
|
+
|
|
1316
|
+
rol1 = calc_Roche_radius(mass1, mass2, sep)
|
|
1317
|
+
rol2 = calc_Roche_radius(mass2, mass1, sep)
|
|
1318
|
+
|
|
1319
|
+
# check for a ZAMS that starts in RFOL
|
|
1320
|
+
mask = ((np.array(initial_binary_table["kstar_1"]) == 1) & (rzams1 >= rol1)) | (
|
|
1321
|
+
(initial_binary_table["kstar_2"] == 1) & (rzams2 >= rol2)
|
|
1322
|
+
)
|
|
1323
|
+
if mask.any():
|
|
1324
|
+
warnings.warn(
|
|
1325
|
+
"At least one of your initial binaries is starting in Roche Lobe Overflow:\n{0}".format(
|
|
1326
|
+
initial_binary_table[mask]
|
|
1327
|
+
)
|
|
1328
|
+
)
|
|
1329
|
+
|
|
1330
|
+
return
|
|
1331
|
+
|
|
1332
|
+
|
|
1333
|
+
def convert_kstar_evol_type(bpp):
|
|
1334
|
+
"""Provides way to convert integer values to their string counterpart
|
|
1335
|
+
|
|
1336
|
+
The underlying fortran code relies on integers to indicate
|
|
1337
|
+
things like the evoltuionary stage of the star as well as
|
|
1338
|
+
key moments in its evolutionary track. If you pass the
|
|
1339
|
+
data frame returned from running
|
|
1340
|
+
|
|
1341
|
+
```Evolve.evolve```
|
|
1342
|
+
|
|
1343
|
+
you can convert the columns with these integer proxies
|
|
1344
|
+
to their true astrophysical meaning.
|
|
1345
|
+
"""
|
|
1346
|
+
kstar_int_to_string_dict = {
|
|
1347
|
+
0: "Main Sequence (MS), < 0.7 M⊙",
|
|
1348
|
+
1: "MS, > 0.7 M⊙",
|
|
1349
|
+
2: "Hertzsprung Gap",
|
|
1350
|
+
3: "First Giant Branch",
|
|
1351
|
+
4: "Core Helium Burning",
|
|
1352
|
+
5: "Early Asymptotic Giant Branch (AGB)",
|
|
1353
|
+
6: "Thermally Pulsing AGB",
|
|
1354
|
+
7: "Naked Helium Star MS",
|
|
1355
|
+
8: "Naked Helium Star Hertzsprung Gap",
|
|
1356
|
+
9: "Naked Helium Star Giant Branch",
|
|
1357
|
+
10: "Helium White Dwarf",
|
|
1358
|
+
11: "Carbon/Oxygen White Dwarf",
|
|
1359
|
+
12: "Oxygen/Neon White Dwarf",
|
|
1360
|
+
13: "Neutron Star",
|
|
1361
|
+
14: "Black Hole",
|
|
1362
|
+
15: "Massless Remnant",
|
|
1363
|
+
}
|
|
1364
|
+
|
|
1365
|
+
kstar_string_to_int_dict = {v: k for k, v in kstar_int_to_string_dict.items()}
|
|
1366
|
+
|
|
1367
|
+
evolve_type_int_to_string_dict = {
|
|
1368
|
+
1: "initial state",
|
|
1369
|
+
2: "kstar change",
|
|
1370
|
+
3: "begin Roche lobe overflow",
|
|
1371
|
+
4: "end Roche lobe overlow",
|
|
1372
|
+
5: "contact",
|
|
1373
|
+
6: "coalescence",
|
|
1374
|
+
7: "begin common envelope",
|
|
1375
|
+
8: "end common envelope",
|
|
1376
|
+
9: "no remnant leftover",
|
|
1377
|
+
10: "max evolution time",
|
|
1378
|
+
11: "binary disruption",
|
|
1379
|
+
12: "begin symbiotic phase",
|
|
1380
|
+
13: "end symbiotic phase",
|
|
1381
|
+
14: "blue straggler",
|
|
1382
|
+
15: "supernova of primary",
|
|
1383
|
+
16: "supernova of secondary",
|
|
1384
|
+
100: "RLOF interpolation timeout error"
|
|
1385
|
+
}
|
|
1386
|
+
|
|
1387
|
+
evolve_type_string_to_int_dict = {
|
|
1388
|
+
v: k for k, v in evolve_type_int_to_string_dict.items()
|
|
1389
|
+
}
|
|
1390
|
+
|
|
1391
|
+
if bpp.kstar_1.dtype in [int, float]:
|
|
1392
|
+
# convert from integer to string
|
|
1393
|
+
bpp["kstar_1"] = bpp["kstar_1"].astype(int)
|
|
1394
|
+
bpp["kstar_1"] = bpp["kstar_1"].apply(lambda x: kstar_int_to_string_dict[x])
|
|
1395
|
+
else:
|
|
1396
|
+
# convert from string to integer
|
|
1397
|
+
bpp["kstar_1"] = bpp["kstar_1"].apply(lambda x: kstar_string_to_int_dict[x])
|
|
1398
|
+
|
|
1399
|
+
if bpp.kstar_2.dtype in [int, float]:
|
|
1400
|
+
# convert from integer to string
|
|
1401
|
+
bpp["kstar_2"] = bpp["kstar_2"].astype(int)
|
|
1402
|
+
bpp["kstar_2"] = bpp["kstar_2"].apply(lambda x: kstar_int_to_string_dict[x])
|
|
1403
|
+
else:
|
|
1404
|
+
# convert from string to integer
|
|
1405
|
+
bpp["kstar_2"] = bpp["kstar_2"].apply(lambda x: kstar_string_to_int_dict[x])
|
|
1406
|
+
|
|
1407
|
+
if bpp.evol_type.dtype in [int, float]:
|
|
1408
|
+
# convert from integer to string
|
|
1409
|
+
bpp["evol_type"] = bpp["evol_type"].astype(int)
|
|
1410
|
+
bpp["evol_type"] = bpp["evol_type"].apply(
|
|
1411
|
+
lambda x: evolve_type_int_to_string_dict[x]
|
|
1412
|
+
)
|
|
1413
|
+
else:
|
|
1414
|
+
# convert from string to integer
|
|
1415
|
+
bpp["evol_type"] = bpp["evol_type"].apply(
|
|
1416
|
+
lambda x: evolve_type_string_to_int_dict[x]
|
|
1417
|
+
)
|
|
1418
|
+
|
|
1419
|
+
return bpp
|
|
1420
|
+
|
|
1421
|
+
|
|
1422
|
+
def parse_inifile(inifile):
|
|
1423
|
+
"""Provides a method for parsing the inifile and returning dicts of each section"""
|
|
1424
|
+
if inifile is None:
|
|
1425
|
+
raise ValueError("Please supply an inifile")
|
|
1426
|
+
elif not os.path.isfile(inifile):
|
|
1427
|
+
raise ValueError("inifile supplied does not exist")
|
|
1428
|
+
|
|
1429
|
+
binOps = {
|
|
1430
|
+
ast.Add: operator.add,
|
|
1431
|
+
ast.Sub: operator.sub,
|
|
1432
|
+
ast.Mult: operator.mul,
|
|
1433
|
+
ast.Div: operator.truediv,
|
|
1434
|
+
ast.Mod: operator.mod,
|
|
1435
|
+
}
|
|
1436
|
+
|
|
1437
|
+
def arithmetic_eval(s):
|
|
1438
|
+
"""Allows us to control how the strings from the inifile get parses"""
|
|
1439
|
+
node = ast.parse(s, mode="eval")
|
|
1440
|
+
|
|
1441
|
+
def _eval(node):
|
|
1442
|
+
"""Different strings receive different evaluation"""
|
|
1443
|
+
if isinstance(node, ast.Expression):
|
|
1444
|
+
return _eval(node.body)
|
|
1445
|
+
elif isinstance(node, ast.Str):
|
|
1446
|
+
return node.s
|
|
1447
|
+
elif isinstance(node, ast.Num):
|
|
1448
|
+
return node.n
|
|
1449
|
+
elif isinstance(node, ast.BinOp):
|
|
1450
|
+
return binOps[type(node.op)](_eval(node.left), _eval(node.right))
|
|
1451
|
+
elif isinstance(node, ast.List):
|
|
1452
|
+
return [_eval(x) for x in node.elts]
|
|
1453
|
+
elif isinstance(node, ast.Name):
|
|
1454
|
+
result = VariableKey(item=node)
|
|
1455
|
+
constants_lookup = {
|
|
1456
|
+
"True": True,
|
|
1457
|
+
"False": False,
|
|
1458
|
+
"None": None,
|
|
1459
|
+
}
|
|
1460
|
+
value = constants_lookup.get(
|
|
1461
|
+
result.name,
|
|
1462
|
+
result,
|
|
1463
|
+
)
|
|
1464
|
+
if type(value) == VariableKey:
|
|
1465
|
+
# return regular string
|
|
1466
|
+
return value.name
|
|
1467
|
+
else:
|
|
1468
|
+
# return special string like True or False
|
|
1469
|
+
return value
|
|
1470
|
+
elif isinstance(node, ast.NameConstant):
|
|
1471
|
+
# None, True, False are nameconstants in python3, but names in 2
|
|
1472
|
+
return node.value
|
|
1473
|
+
else:
|
|
1474
|
+
raise Exception("Unsupported type {}".format(node))
|
|
1475
|
+
|
|
1476
|
+
return _eval(node.body)
|
|
1477
|
+
|
|
1478
|
+
# ---- Create configuration-file-parser object and read parameters file.
|
|
1479
|
+
cp = ConfigParser()
|
|
1480
|
+
cp.optionxform = str
|
|
1481
|
+
cp.read(inifile)
|
|
1482
|
+
|
|
1483
|
+
# ---- Read needed variables from the inifile
|
|
1484
|
+
dictionary = {}
|
|
1485
|
+
for section in cp.sections():
|
|
1486
|
+
# for cosmic we skip any CMC stuff
|
|
1487
|
+
# if "cmc" is a section in the ini file, then we can optionally skip the
|
|
1488
|
+
# COSMIC population sections (or not, if they exist)
|
|
1489
|
+
if section == "cmc":
|
|
1490
|
+
if "rand_seed" not in dictionary.keys():
|
|
1491
|
+
dictionary["rand_seed"] = {}
|
|
1492
|
+
dictionary["rand_seed"]["seed"] = 0
|
|
1493
|
+
if "filters" not in dictionary.keys():
|
|
1494
|
+
dictionary["filters"] = 0
|
|
1495
|
+
if "convergence" not in dictionary.keys():
|
|
1496
|
+
dictionary["convergence"] = 0
|
|
1497
|
+
if "sampling" not in dictionary.keys():
|
|
1498
|
+
dictionary["sampling"] = 0
|
|
1499
|
+
continue
|
|
1500
|
+
dictionary[section] = {}
|
|
1501
|
+
for option in cp.options(section):
|
|
1502
|
+
opt = cp.get(section, option)
|
|
1503
|
+
if "\n" in opt:
|
|
1504
|
+
raise ValueError("We have detected an error in your inifile. A parameter was read in with the following "
|
|
1505
|
+
"value: {0}. Likely, you have an unexpected syntax, such as a space before an parameter/option (i.e. "
|
|
1506
|
+
"the parameter must be flush to the far left of the file".format(opt))
|
|
1507
|
+
try:
|
|
1508
|
+
dictionary[section][option] = arithmetic_eval(opt)
|
|
1509
|
+
except Exception:
|
|
1510
|
+
dictionary[section][option] = json.loads(opt)
|
|
1511
|
+
finally:
|
|
1512
|
+
if option not in dictionary[section].keys():
|
|
1513
|
+
raise ValueError("We have detected an error in your inifile. The folloiwng parameter failed to be read correctly: {0}".format(option))
|
|
1514
|
+
|
|
1515
|
+
BSEDict = dictionary["bse"]
|
|
1516
|
+
seed_int = int(dictionary["rand_seed"]["seed"])
|
|
1517
|
+
filters = dictionary["filters"]
|
|
1518
|
+
convergence = dictionary["convergence"]
|
|
1519
|
+
sampling = dictionary["sampling"]
|
|
1520
|
+
|
|
1521
|
+
return BSEDict, seed_int, filters, convergence, sampling
|
|
1522
|
+
|
|
1523
|
+
|
|
1524
|
+
def explain_setting(setting):
|
|
1525
|
+
"""Provides explanation for a BSE setting from the cosmic-settings.json file
|
|
1526
|
+
|
|
1527
|
+
Parameters
|
|
1528
|
+
----------
|
|
1529
|
+
setting : str
|
|
1530
|
+
Name of BSE setting to explain
|
|
1531
|
+
"""
|
|
1532
|
+
# use the cosmic-settings.json file to define the valid ranges for BSE flags
|
|
1533
|
+
settings_path = io_files("cosmic.data").joinpath('cosmic-settings.json')
|
|
1534
|
+
settings = json.loads(settings_path.read_text(encoding='utf-8'))
|
|
1535
|
+
|
|
1536
|
+
strip_tags = lambda s: re.sub(r'<[^>]+>', '', s).replace("&", "&")
|
|
1537
|
+
|
|
1538
|
+
BOLD = '\033[1m'
|
|
1539
|
+
GREEN = '\033[92m'
|
|
1540
|
+
END = '\033[0m'
|
|
1541
|
+
|
|
1542
|
+
for cat in settings:
|
|
1543
|
+
# ignore anything that's not BSE
|
|
1544
|
+
if cat['category'] != "bse":
|
|
1545
|
+
continue
|
|
1546
|
+
|
|
1547
|
+
# go through each flag in the settings
|
|
1548
|
+
for flag in cat['settings']:
|
|
1549
|
+
if flag['name'] == setting:
|
|
1550
|
+
print(f"\n{BOLD}{flag['name']}{END}")
|
|
1551
|
+
print("-" * len(flag['name']))
|
|
1552
|
+
print(f"{strip_tags(flag['description'])}")
|
|
1553
|
+
print("\nValid options (default marked in green and with *):")
|
|
1554
|
+
for opt in flag['options']:
|
|
1555
|
+
print(f" {f'{GREEN}*' if 'default' in opt else '-'} {opt['name']}: {strip_tags(opt['description'])}{END}")
|
|
1556
|
+
return
|
|
1557
|
+
|
|
1558
|
+
raise ValueError(f"Unknown setting '{setting}'")
|
|
1559
|
+
|
|
1560
|
+
|
|
1561
|
+
class VariableKey(object):
|
|
1562
|
+
"""
|
|
1563
|
+
A dictionary key which is a variable.
|
|
1564
|
+
@ivar item: The variable AST object.
|
|
1565
|
+
"""
|
|
1566
|
+
|
|
1567
|
+
def __init__(self, item):
|
|
1568
|
+
self.name = item.id
|
|
1569
|
+
|
|
1570
|
+
def __eq__(self, compare):
|
|
1571
|
+
return compare.__class__ == self.__class__ and compare.name == self.name
|
|
1572
|
+
|
|
1573
|
+
def __hash__(self):
|
|
1574
|
+
return hash(self.name)
|