gammapbh 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of gammapbh might be problematic. Click here for more details.
- gammapbh/__init__.py +5 -0
- gammapbh/__main__.py +7 -0
- gammapbh/cli.py +1396 -0
- gammapbh-1.1.0.dist-info/METADATA +198 -0
- gammapbh-1.1.0.dist-info/RECORD +9 -0
- gammapbh-1.1.0.dist-info/WHEEL +5 -0
- gammapbh-1.1.0.dist-info/entry_points.txt +2 -0
- gammapbh-1.1.0.dist-info/licenses/LICENSE.md +1348 -0
- gammapbh-1.1.0.dist-info/top_level.txt +1 -0
gammapbh/cli.py
ADDED
|
@@ -0,0 +1,1396 @@
|
|
|
1
|
+
# src/gammapbh/cli.py
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
import numpy as np
|
|
7
|
+
import matplotlib.pyplot as plt
|
|
8
|
+
from tqdm import tqdm
|
|
9
|
+
from scipy.special import erf
|
|
10
|
+
from scipy.interpolate import RectBivariateSpline
|
|
11
|
+
from scipy.integrate import trapezoid
|
|
12
|
+
from types import SimpleNamespace
|
|
13
|
+
from colorama import Fore, Style
|
|
14
|
+
|
|
15
|
+
# ---------------------------
|
|
16
|
+
# Matplotlib/NumPy basics
|
|
17
|
+
# ---------------------------
|
|
18
|
+
plt.rcParams.update({'font.size': 12})
|
|
19
|
+
np.seterr(divide='ignore', invalid='ignore') # suppress log/invalid warnings
|
|
20
|
+
|
|
21
|
+
# ---------------------------
|
|
22
|
+
# Paths (package-internal only)
|
|
23
|
+
# ---------------------------
|
|
24
|
+
def _resolve_data_dir() -> str:
|
|
25
|
+
"""
|
|
26
|
+
Where the BlackHawk tables live.
|
|
27
|
+
Only use the copy shipped inside the package.
|
|
28
|
+
"""
|
|
29
|
+
pkg_dir = os.path.dirname(os.path.abspath(__file__))
|
|
30
|
+
return os.path.join(pkg_dir, "blackhawk_data")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _resolve_results_root() -> str:
|
|
34
|
+
"""
|
|
35
|
+
Where to write results.
|
|
36
|
+
Only use the results folder inside the package.
|
|
37
|
+
"""
|
|
38
|
+
pkg_dir = os.path.dirname(os.path.abspath(__file__))
|
|
39
|
+
dest = os.path.join(pkg_dir, "results")
|
|
40
|
+
os.makedirs(dest, exist_ok=True)
|
|
41
|
+
# optional: quick writability check
|
|
42
|
+
try:
|
|
43
|
+
test = os.path.join(dest, ".writetest.tmp")
|
|
44
|
+
with open(test, "w") as fh:
|
|
45
|
+
fh.write("ok")
|
|
46
|
+
os.remove(test)
|
|
47
|
+
except Exception as e:
|
|
48
|
+
raise RuntimeError(f"Results directory is not writable: {dest}\n{e}")
|
|
49
|
+
return dest
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
DATA_DIR = _resolve_data_dir()
|
|
53
|
+
RESULTS_DIR = _resolve_results_root()
|
|
54
|
+
|
|
55
|
+
MONO_RESULTS_DIR = os.path.join(RESULTS_DIR, "monochromatic")
|
|
56
|
+
CUSTOM_RESULTS_DIR = os.path.join(RESULTS_DIR, "custom_equation")
|
|
57
|
+
GAUSS_RESULTS_DIR = os.path.join(RESULTS_DIR, "gaussian")
|
|
58
|
+
NGAUSS_RESULTS_DIR = os.path.join(RESULTS_DIR, "non_gaussian")
|
|
59
|
+
LOGN_RESULTS_DIR = os.path.join(RESULTS_DIR, "lognormal")
|
|
60
|
+
|
|
61
|
+
for d in (MONO_RESULTS_DIR, CUSTOM_RESULTS_DIR, GAUSS_RESULTS_DIR, NGAUSS_RESULTS_DIR, LOGN_RESULTS_DIR):
|
|
62
|
+
os.makedirs(d, exist_ok=True)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# ---------------------------
|
|
66
|
+
# Labels
|
|
67
|
+
# ---------------------------
|
|
68
|
+
GAUSSIAN_METHOD = "Gaussian collapse"
|
|
69
|
+
NON_GAUSSIAN_METHOD = "Non-Gaussian Collapse"
|
|
70
|
+
LOGNORMAL_METHOD = "Log-Normal Distribution"
|
|
71
|
+
|
|
72
|
+
# ---------------------------
|
|
73
|
+
# Helper: required files in each mass folder
|
|
74
|
+
# ---------------------------
|
|
75
|
+
REQUIRED_FILES = [
|
|
76
|
+
"instantaneous_primary_spectra.txt",
|
|
77
|
+
"instantaneous_secondary_spectra.txt",
|
|
78
|
+
"inflight_annihilation_prim.txt",
|
|
79
|
+
"inflight_annihilation_sec.txt",
|
|
80
|
+
"final_state_radiation_prim.txt",
|
|
81
|
+
"final_state_radiation_sec.txt",
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
# ---------------------------
|
|
85
|
+
# Back navigation support
|
|
86
|
+
# ---------------------------
|
|
87
|
+
class BackRequested(Exception):
|
|
88
|
+
"""Raised when the user enters 'b' or 'back' to return to the prior screen."""
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
def discover_mass_folders(data_dir):
|
|
92
|
+
masses, names = [], []
|
|
93
|
+
try:
|
|
94
|
+
for name in os.listdir(data_dir):
|
|
95
|
+
p = os.path.join(data_dir, name)
|
|
96
|
+
if not os.path.isdir(p):
|
|
97
|
+
continue
|
|
98
|
+
try:
|
|
99
|
+
m = float(name)
|
|
100
|
+
except ValueError:
|
|
101
|
+
continue
|
|
102
|
+
if all(os.path.isfile(os.path.join(p, f)) for f in REQUIRED_FILES):
|
|
103
|
+
masses.append(m); names.append(name)
|
|
104
|
+
except FileNotFoundError:
|
|
105
|
+
return [], []
|
|
106
|
+
if not masses:
|
|
107
|
+
return [], []
|
|
108
|
+
order = np.argsort(masses)
|
|
109
|
+
return [float(masses[i]) for i in order], [names[i] for i in order]
|
|
110
|
+
|
|
111
|
+
# ---------------------------
|
|
112
|
+
# CLI + parsing helpers
|
|
113
|
+
# ---------------------------
|
|
114
|
+
def info(msg): print(Fore.CYAN + "ℹ " + msg + Style.RESET_ALL)
|
|
115
|
+
def warn(msg): print(Fore.YELLOW + "⚠ " + msg + Style.RESET_ALL)
|
|
116
|
+
def err(msg): print(Fore.RED + "✖ " + msg + Style.RESET_ALL)
|
|
117
|
+
|
|
118
|
+
def user_input(prompt, *, allow_back=False, allow_exit=True):
|
|
119
|
+
"""
|
|
120
|
+
Wrapper for input() that allows:
|
|
121
|
+
- 'b' or 'back' → raise BackRequested (to go up one menu)
|
|
122
|
+
- 'q' or 'exit' → sys.exit(0)
|
|
123
|
+
"""
|
|
124
|
+
txt = input(prompt).strip()
|
|
125
|
+
low = txt.lower()
|
|
126
|
+
if allow_exit and low in ('exit', 'q'):
|
|
127
|
+
print("Exiting software.")
|
|
128
|
+
sys.exit(0)
|
|
129
|
+
if allow_back and low in ('b', 'back'):
|
|
130
|
+
raise BackRequested()
|
|
131
|
+
return txt
|
|
132
|
+
|
|
133
|
+
def list_saved_runs(base_dir):
|
|
134
|
+
try:
|
|
135
|
+
return sorted(d for d in os.listdir(base_dir) if os.path.isdir(os.path.join(base_dir, d)))
|
|
136
|
+
except FileNotFoundError:
|
|
137
|
+
return []
|
|
138
|
+
|
|
139
|
+
def snap_to_available(mval, available, tol=1e-12):
|
|
140
|
+
log_m = np.log(mval)
|
|
141
|
+
log_available = np.log(np.array(available))
|
|
142
|
+
diffs = np.abs(log_available - log_m)
|
|
143
|
+
idx = np.argmin(diffs)
|
|
144
|
+
return available[idx] if diffs[idx] < tol else None
|
|
145
|
+
|
|
146
|
+
def parse_float_list_verbose(s, *, name="value", bounds=None, allow_empty=False,
|
|
147
|
+
positive_only=False, strict_gt=False, strict_lt=False):
|
|
148
|
+
if (s is None or s.strip() == ""):
|
|
149
|
+
if not allow_empty:
|
|
150
|
+
warn(f"No {name}s provided.")
|
|
151
|
+
return []
|
|
152
|
+
vals, seen = [], set()
|
|
153
|
+
lo, hi = (bounds or (None, None))
|
|
154
|
+
for tok in s.split(","):
|
|
155
|
+
t = tok.strip()
|
|
156
|
+
if not t: continue
|
|
157
|
+
try:
|
|
158
|
+
v = float(t)
|
|
159
|
+
except Exception:
|
|
160
|
+
warn(f"Skipping token '{t}': {name} is not a valid number."); continue
|
|
161
|
+
if positive_only and v <= 0:
|
|
162
|
+
warn(f"Skipping {name} {v:g}: must be > 0."); continue
|
|
163
|
+
if lo is not None:
|
|
164
|
+
if (strict_gt and not (v > lo)) or (not strict_gt and not (v >= lo)):
|
|
165
|
+
cmp = ">" if strict_gt else "≥"
|
|
166
|
+
warn(f"Skipping {name} {v:g}: must be {cmp} {lo:g}."); continue
|
|
167
|
+
if hi is not None:
|
|
168
|
+
if (strict_lt and not (v < hi)) or (not strict_lt and not (v <= hi)):
|
|
169
|
+
cmp = "<" if strict_lt else "≤"
|
|
170
|
+
warn(f"Skipping {name} {v:g}: must be {cmp} {hi:g}."); continue
|
|
171
|
+
if v in seen:
|
|
172
|
+
warn(f"Duplicate {name} {v:g}: keeping first, skipping this one."); continue
|
|
173
|
+
vals.append(v); seen.add(v)
|
|
174
|
+
if not vals and not allow_empty:
|
|
175
|
+
warn(f"No usable {name}s parsed.")
|
|
176
|
+
return vals
|
|
177
|
+
|
|
178
|
+
# ---------------------------
|
|
179
|
+
# PDFs (collapse space)
|
|
180
|
+
# ---------------------------
|
|
181
|
+
def delta_l(mass_ratio, kappa, delta_c, gamma):
|
|
182
|
+
y = (mass_ratio / kappa)**(1.0 / gamma)
|
|
183
|
+
arg = 64 - 96 * (delta_c + y)
|
|
184
|
+
arg = np.clip(arg, 0.0, None)
|
|
185
|
+
return (8 - np.sqrt(arg)) / 6
|
|
186
|
+
|
|
187
|
+
def mass_function(delta_l_val, sigma_x, delta_c, gamma):
|
|
188
|
+
term1 = 1.0 / (np.sqrt(2 * np.pi) * sigma_x)
|
|
189
|
+
term2 = np.exp(-delta_l_val**2 / (2 * sigma_x**2))
|
|
190
|
+
term3 = delta_l_val - (3/8) * delta_l_val**2 - delta_c
|
|
191
|
+
term4 = gamma * np.abs(1 - (3/4) * delta_l_val)
|
|
192
|
+
return term1 * term2 * term3 / term4
|
|
193
|
+
|
|
194
|
+
def mass_function_exact(delta_l_val, sigma_X, sigma_Y, delta_c, gamma):
|
|
195
|
+
# Biagetti et al. Eq. (20)
|
|
196
|
+
A = sigma_X**2 + (sigma_Y * delta_l_val)**2
|
|
197
|
+
exp_pref = np.exp(-1.0 / (2.0 * sigma_Y**2))
|
|
198
|
+
term1 = 2.0 * sigma_Y * np.sqrt(A)
|
|
199
|
+
inner_exp = np.exp(sigma_X**2 / (2.0 * sigma_Y**2 * (sigma_X**2 + 2.0 * (sigma_Y * delta_l_val)**2)))
|
|
200
|
+
erf_arg = sigma_X * np.sqrt(2.0) / np.sqrt(A) # stable
|
|
201
|
+
term2 = np.sqrt(2.0 * np.pi) * sigma_X * inner_exp * erf(erf_arg)
|
|
202
|
+
bracket = term1 + term2
|
|
203
|
+
norm = exp_pref * sigma_X / (2.0 * np.pi * A**1.5)
|
|
204
|
+
jacobian = ((delta_l_val - 0.375 * delta_l_val**2 - delta_c) /
|
|
205
|
+
(gamma * np.abs(1.0 - 0.75 * delta_l_val)))
|
|
206
|
+
return norm * bracket * jacobian
|
|
207
|
+
|
|
208
|
+
def mass_function_lognormal(x, mu, sigma):
|
|
209
|
+
x_clipped = np.clip(x, 1e-16, None)
|
|
210
|
+
return (1.0 / (x_clipped * sigma * np.sqrt(2 * np.pi))
|
|
211
|
+
* np.exp(- (np.log(x_clipped) - mu)**2 / (2 * sigma**2)))
|
|
212
|
+
|
|
213
|
+
# ---------------------------
|
|
214
|
+
# Data loaders
|
|
215
|
+
# ---------------------------
|
|
216
|
+
def load_data(filepath, skip_header=0):
|
|
217
|
+
if not os.path.isfile(filepath):
|
|
218
|
+
raise FileNotFoundError(f"File not found: {filepath}")
|
|
219
|
+
return np.genfromtxt(filepath, skip_header=skip_header)
|
|
220
|
+
|
|
221
|
+
def load_spectra_components(directory):
|
|
222
|
+
primary = load_data(os.path.join(directory, "instantaneous_primary_spectra.txt"), skip_header=2)[123:]
|
|
223
|
+
secondary = load_data(os.path.join(directory, "instantaneous_secondary_spectra.txt"), skip_header=1)
|
|
224
|
+
IFA_prim = load_data(os.path.join(directory, "inflight_annihilation_prim.txt"))
|
|
225
|
+
IFA_sec = load_data(os.path.join(directory, "inflight_annihilation_sec.txt"))
|
|
226
|
+
FSR_prim = load_data(os.path.join(directory, "final_state_radiation_prim.txt"), skip_header=1)
|
|
227
|
+
FSR_sec = load_data(os.path.join(directory, "final_state_radiation_sec.txt"), skip_header=1)
|
|
228
|
+
|
|
229
|
+
E_prim = primary[:,0] * 1e3 # MeV
|
|
230
|
+
E_sec = secondary[:,0] # MeV
|
|
231
|
+
|
|
232
|
+
return {
|
|
233
|
+
'energy_primary': E_prim,
|
|
234
|
+
'energy_secondary': E_sec,
|
|
235
|
+
'direct_gamma_primary': primary[:,1] / 1e3,
|
|
236
|
+
'direct_gamma_secondary': secondary[:,1],
|
|
237
|
+
'IFA_primary': np.interp(E_prim, IFA_prim[:,0], IFA_prim[:,1], left=0.0, right=0.0),
|
|
238
|
+
'IFA_secondary': np.interp(E_sec, IFA_sec[:,0], IFA_sec[:,1], left=0.0, right=0.0),
|
|
239
|
+
'FSR_primary': np.interp(E_prim, FSR_prim[:,0], FSR_prim[:,1]),
|
|
240
|
+
'FSR_secondary': np.interp(E_sec, FSR_sec[:,0], FSR_sec[:,1]),
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
# ---------------------------
|
|
244
|
+
# Monochromatic
|
|
245
|
+
# ---------------------------
|
|
246
|
+
def monochromatic_spectra():
|
|
247
|
+
masses, names = discover_mass_folders(DATA_DIR)
|
|
248
|
+
if not masses:
|
|
249
|
+
warn(f"No valid mass folders found under: {DATA_DIR}")
|
|
250
|
+
return
|
|
251
|
+
MIN_MASS, MAX_MASS = min(masses), max(masses)
|
|
252
|
+
|
|
253
|
+
try:
|
|
254
|
+
masses_str = user_input(
|
|
255
|
+
f"Enter PBH masses (g) to simulate (comma-separated; allowed range [{MIN_MASS:.2e}, {MAX_MASS:.2e}]): ",
|
|
256
|
+
allow_back=True
|
|
257
|
+
)
|
|
258
|
+
except BackRequested:
|
|
259
|
+
return
|
|
260
|
+
|
|
261
|
+
mass_list = []
|
|
262
|
+
if masses_str.strip():
|
|
263
|
+
for tok in masses_str.split(','):
|
|
264
|
+
t = tok.strip()
|
|
265
|
+
if not t: continue
|
|
266
|
+
try:
|
|
267
|
+
mval = float(t)
|
|
268
|
+
except Exception:
|
|
269
|
+
warn(f"Skipping mass token '{t}': not a number."); continue
|
|
270
|
+
if not (MIN_MASS <= mval <= MAX_MASS):
|
|
271
|
+
warn(f"Skipping mass {mval:.3e} g: outside allowed range [{MIN_MASS:.2e}, {MAX_MASS:.2e}]."); continue
|
|
272
|
+
mass_list.append(mval)
|
|
273
|
+
if not mass_list:
|
|
274
|
+
warn("No valid masses provided. Returning to menu."); return
|
|
275
|
+
|
|
276
|
+
info("Pre-loading pre-rendered components …")
|
|
277
|
+
first_S = load_spectra_components(os.path.join(DATA_DIR, names[0]))
|
|
278
|
+
E_ref = first_S['energy_primary']
|
|
279
|
+
N_E = len(E_ref)
|
|
280
|
+
N_M = len(masses)
|
|
281
|
+
|
|
282
|
+
direct_mat = np.zeros((N_M, N_E))
|
|
283
|
+
secondary_mat = np.zeros((N_M, N_E))
|
|
284
|
+
inflight_mat = np.zeros((N_M, N_E))
|
|
285
|
+
finalstate_mat = np.zeros((N_M, N_E))
|
|
286
|
+
Emax_ifa = np.zeros(N_M)
|
|
287
|
+
|
|
288
|
+
for i, m in enumerate(masses):
|
|
289
|
+
sub = os.path.join(DATA_DIR, names[i])
|
|
290
|
+
S = load_spectra_components(sub)
|
|
291
|
+
direct_mat[i] = S['direct_gamma_primary']
|
|
292
|
+
secondary_mat[i] = np.interp(E_ref, S['energy_secondary'], S['direct_gamma_secondary'], left=0, right=0)
|
|
293
|
+
inflight_mat[i] = S['IFA_primary'] + np.interp(E_ref, S['energy_secondary'], S['IFA_secondary'], left=0, right=0)
|
|
294
|
+
finalstate_mat[i] = S['FSR_primary'] + np.interp(E_ref, S['energy_secondary'], S['FSR_secondary'], left=0, right=0)
|
|
295
|
+
p = np.genfromtxt(os.path.join(sub, "inflight_annihilation_prim.txt"))
|
|
296
|
+
s = np.genfromtxt(os.path.join(sub, "inflight_annihilation_sec.txt"))
|
|
297
|
+
Emax_ifa[i] = max(p[:,0].max() if p.size else 0, s[:,0].max() if s.size else 0)
|
|
298
|
+
|
|
299
|
+
logM_all = np.log(masses)
|
|
300
|
+
logE = np.log(E_ref)
|
|
301
|
+
tiny = 1e-300
|
|
302
|
+
|
|
303
|
+
ld = np.log(np.where(direct_mat>tiny, direct_mat, tiny))
|
|
304
|
+
ls = np.log(np.where(secondary_mat>tiny, secondary_mat, tiny))
|
|
305
|
+
li = np.log(np.where(inflight_mat>tiny, inflight_mat, tiny))
|
|
306
|
+
lf = np.log(np.where(finalstate_mat>tiny, finalstate_mat, tiny))
|
|
307
|
+
|
|
308
|
+
spline_direct = RectBivariateSpline(logM_all, logE, ld, kx=1, ky=3, s=0)
|
|
309
|
+
spline_secondary = RectBivariateSpline(logM_all, logE, ls, kx=1, ky=3, s=0)
|
|
310
|
+
spline_inflight = RectBivariateSpline(logM_all, logE, li, kx=1, ky=3, s=0)
|
|
311
|
+
spline_finalstate = RectBivariateSpline(logM_all, logE, lf, kx=1, ky=3, s=0)
|
|
312
|
+
info("Built splines (linear in logM, cubic in logE).")
|
|
313
|
+
|
|
314
|
+
all_data = []
|
|
315
|
+
for mval in mass_list:
|
|
316
|
+
snapped = snap_to_available(mval, masses)
|
|
317
|
+
if snapped is not None:
|
|
318
|
+
i = np.where(np.isclose(masses, snapped, rtol=0, atol=0))[0][0]
|
|
319
|
+
kind = 'pre-rendered'
|
|
320
|
+
d = direct_mat[i].copy()
|
|
321
|
+
s = secondary_mat[i].copy()
|
|
322
|
+
it= inflight_mat[i].copy()
|
|
323
|
+
f = finalstate_mat[i].copy()
|
|
324
|
+
else:
|
|
325
|
+
kind = 'interpolated'
|
|
326
|
+
idx_up = int(np.searchsorted(masses, mval, side='left'))
|
|
327
|
+
idx_low = max(0, idx_up-1)
|
|
328
|
+
idx_up = min(idx_up, N_M-1)
|
|
329
|
+
Ecut = min(Emax_ifa[idx_low], Emax_ifa[idx_up])
|
|
330
|
+
logm = np.log(mval)
|
|
331
|
+
d = np.exp(spline_direct(logm, logE, grid=False))
|
|
332
|
+
s = np.exp(spline_secondary(logm, logE, grid=False))
|
|
333
|
+
it = np.exp(spline_inflight(logm, logE, grid=False))
|
|
334
|
+
f = np.exp(spline_finalstate(logm, logE, grid=False))
|
|
335
|
+
# guard tails in inflight
|
|
336
|
+
for k in range(len(it)-1,0,-1):
|
|
337
|
+
if np.isclose(it[k], it[k-1], rtol=1e-8): it[k] = 0.0
|
|
338
|
+
else: break
|
|
339
|
+
log10i = np.log10(np.where(it>0, it, tiny))
|
|
340
|
+
for j in range(1, len(log10i)):
|
|
341
|
+
if log10i[j] - log10i[j-1] < -50:
|
|
342
|
+
it[j:] = 0.0; break
|
|
343
|
+
it[E_ref >= Ecut] = 0.0
|
|
344
|
+
|
|
345
|
+
tot = d + s + it + f
|
|
346
|
+
tol = 1e-299
|
|
347
|
+
for arr in (d, s, it, f, tot): arr[arr < tol] = 0.0
|
|
348
|
+
|
|
349
|
+
plt.figure(figsize=(10,7))
|
|
350
|
+
if np.any(d>0): plt.plot(E_ref[d>0], d[d>0], label="Direct Hawking", lw=2)
|
|
351
|
+
if np.any(s>0): plt.plot(E_ref[s>0], s[s>0], label="Secondary", lw=2, linestyle='--')
|
|
352
|
+
if np.any(it>0): plt.plot(E_ref[it>0], it[it>0], label="Inflight", lw=2)
|
|
353
|
+
if np.any(f>0): plt.plot(E_ref[f>0], f[f>0], label="Final State", lw=2)
|
|
354
|
+
if np.any(tot>0):plt.plot(E_ref[tot>0],tot[tot>0],'k.', label="Total Spectrum")
|
|
355
|
+
plt.xlabel(r'$E_\gamma$ (MeV)')
|
|
356
|
+
plt.ylabel(r'$dN_\gamma/dE_\gamma$ (MeV$^{-1}$ s$^{-1}$)')
|
|
357
|
+
plt.xscale('log'); plt.yscale('log')
|
|
358
|
+
peak_total = tot.max() if tot.size else 1e-20
|
|
359
|
+
plt.ylim(peak_total/1e3, peak_total*1e1)
|
|
360
|
+
plt.xlim(0.5, 5000.0)
|
|
361
|
+
plt.grid(True, which='both', linestyle='--')
|
|
362
|
+
plt.legend()
|
|
363
|
+
plt.title(f'Components for {mval:.2e} g ({kind})')
|
|
364
|
+
plt.tight_layout()
|
|
365
|
+
plt.show()
|
|
366
|
+
plt.close()
|
|
367
|
+
|
|
368
|
+
all_data.append({'mass': mval,'kind': kind,'E': E_ref.copy(),
|
|
369
|
+
'direct': d.copy(),'secondary': s.copy(),
|
|
370
|
+
'inflight': it.copy(),'finalstate': f.copy(),
|
|
371
|
+
'total': tot.copy()})
|
|
372
|
+
|
|
373
|
+
if all_data:
|
|
374
|
+
fig = plt.figure(figsize=(10,7))
|
|
375
|
+
summed = np.zeros_like(all_data[0]['E']); peaks = []
|
|
376
|
+
for entry in all_data:
|
|
377
|
+
Ecur = entry['E']; tot = entry['total']; valid = tot>0
|
|
378
|
+
if np.any(valid):
|
|
379
|
+
plt.plot(Ecur[valid], Ecur[valid]**2 * tot[valid], lw=2,
|
|
380
|
+
label=f"{entry['mass']:.2e} g ({entry['kind']})")
|
|
381
|
+
summed += tot
|
|
382
|
+
peaks.append((Ecur[valid]**2 * tot[valid]).max())
|
|
383
|
+
vs = summed>0
|
|
384
|
+
plt.plot(all_data[0]['E'][vs], all_data[0]['E'][vs]**2 * summed[vs],
|
|
385
|
+
'k:', lw=3, label="Summed")
|
|
386
|
+
ymax_o = max(peaks) * 1e1; ymin_o = ymax_o / 1e3
|
|
387
|
+
plt.xlabel(r'$E_\gamma$ (MeV)'); plt.ylabel(r'$E^2 dN_\gamma/dE_\gamma$ (MeV s$^{-1}$)')
|
|
388
|
+
plt.xscale('log'); plt.yscale('log'); plt.xlim(0.5, 5000.0); plt.ylim(ymin_o, ymax_o)
|
|
389
|
+
plt.grid(True, which='both', linestyle='--'); plt.legend()
|
|
390
|
+
plt.title('Total Hawking Radiation Spectra (E²·dN/dE)'); plt.tight_layout()
|
|
391
|
+
plt.show()
|
|
392
|
+
plt.close(fig)
|
|
393
|
+
|
|
394
|
+
sv = user_input("Save any spectra? (y/n): ", allow_back=False, allow_exit=True).strip().lower()
|
|
395
|
+
if sv in ['y', 'yes']:
|
|
396
|
+
print("Select spectra by index to save (single file each):")
|
|
397
|
+
for idx, e in enumerate(all_data, start=1):
|
|
398
|
+
print(f" {idx}: {e['mass']:.2e} g ({e['kind']})")
|
|
399
|
+
choice = user_input("Enter comma-separated indices (e.g. 1,3,5) or '0' to save ALL: ",
|
|
400
|
+
allow_back=False, allow_exit=True).strip().lower()
|
|
401
|
+
if choice == '0':
|
|
402
|
+
picks = list(range(1, len(all_data)+1))
|
|
403
|
+
else:
|
|
404
|
+
try:
|
|
405
|
+
picks = [int(x) for x in choice.split(',')]
|
|
406
|
+
except ValueError:
|
|
407
|
+
err("Invalid indices; skipping save."); picks = []
|
|
408
|
+
for i in picks:
|
|
409
|
+
if 1 <= i <= len(all_data):
|
|
410
|
+
e = all_data[i - 1]
|
|
411
|
+
mass_label = f"{e['mass']:.2e}"
|
|
412
|
+
filename = os.path.join(MONO_RESULTS_DIR, f"{mass_label}_spectrum.txt")
|
|
413
|
+
data_cols = np.column_stack((e['E'], e['direct'], e['secondary'], e['inflight'], e['finalstate'], e['total']))
|
|
414
|
+
header = "E_gamma(MeV) Direct Secondary Inflight FinalState Total (MeV^-1 s^-1)"
|
|
415
|
+
np.savetxt(filename, data_cols, header=header, fmt="%e")
|
|
416
|
+
print(f"Saved → {filename}")
|
|
417
|
+
|
|
418
|
+
# ---------------------------
|
|
419
|
+
# (Kept for reference; not used now) Right-edge spike trimming helper
|
|
420
|
+
# ---------------------------
|
|
421
|
+
def _trim_right_spike(x_line, y_line, up_thresh=1.35, down_thresh=0.35, max_trim_frac=0.10):
|
|
422
|
+
y = np.asarray(y_line, dtype=float)
|
|
423
|
+
n = y.size
|
|
424
|
+
if n < 3:
|
|
425
|
+
return n - 1
|
|
426
|
+
y_nm1, y_nm2 = y[-1], y[-2]
|
|
427
|
+
if not (np.isfinite(y_nm1) and np.isfinite(y_nm2)) or y_nm2 == 0:
|
|
428
|
+
return n - 1
|
|
429
|
+
ratio = y_nm1 / max(y_nm2, 1e-300)
|
|
430
|
+
if (ratio <= up_thresh) and (ratio >= down_thresh):
|
|
431
|
+
return n - 1
|
|
432
|
+
max_trim = max(3, int(max_trim_frac * n))
|
|
433
|
+
j = n - 1
|
|
434
|
+
trimmed = 0
|
|
435
|
+
if ratio > up_thresh:
|
|
436
|
+
while (j > 1 and trimmed < max_trim and np.isfinite(y[j]) and np.isfinite(y[j-1]) and
|
|
437
|
+
(y[j] / max(y[j-1], 1e-300) > up_thresh)):
|
|
438
|
+
j -= 1; trimmed += 1
|
|
439
|
+
return max(j, 2)
|
|
440
|
+
while (j > 1 and trimmed < max_trim and np.isfinite(y[j]) and np.isfinite(y[j-1]) and
|
|
441
|
+
(y[j] / max(y[j-1], 1e-300) < down_thresh)):
|
|
442
|
+
j -= 1; trimmed += 1
|
|
443
|
+
return max(j, 2)
|
|
444
|
+
|
|
445
|
+
# ---------------------------
|
|
446
|
+
# Distributed (Gaussian collapse / Non-Gaussian / Lognormal)
|
|
447
|
+
# ---------------------------
|
|
448
|
+
def distributed_spectrum(distribution_method):
|
|
449
|
+
"""
|
|
450
|
+
LOGNORMAL_METHOD: unbounded mass sampling (mode-centered).
|
|
451
|
+
NON_GAUSSIAN: enforce 0.04 ≤ σ_X ≤ 0.16 (with σ_Y/σ_X = 0.75).
|
|
452
|
+
|
|
453
|
+
At any prompt:
|
|
454
|
+
- enter 'b' or 'back' to return to the main menu
|
|
455
|
+
- enter 'q' or 'exit' to quit the program
|
|
456
|
+
"""
|
|
457
|
+
is_g = (distribution_method == GAUSSIAN_METHOD)
|
|
458
|
+
is_ng = (distribution_method == NON_GAUSSIAN_METHOD)
|
|
459
|
+
is_ln = (distribution_method == LOGNORMAL_METHOD)
|
|
460
|
+
|
|
461
|
+
masses, names = discover_mass_folders(DATA_DIR)
|
|
462
|
+
if not masses:
|
|
463
|
+
warn(f"No valid mass folders found under: {DATA_DIR}")
|
|
464
|
+
return
|
|
465
|
+
MIN_MASS, MAX_MASS = min(masses), max(masses)
|
|
466
|
+
|
|
467
|
+
try:
|
|
468
|
+
pstr = user_input(
|
|
469
|
+
f"Enter peak PBH masses (g) (comma-separated; each must be within [{MIN_MASS:.2e}, {MAX_MASS:.2e}]): ",
|
|
470
|
+
allow_back=True, allow_exit=True
|
|
471
|
+
)
|
|
472
|
+
except BackRequested:
|
|
473
|
+
return
|
|
474
|
+
|
|
475
|
+
peaks = parse_float_list_verbose(pstr, name="peak mass (g)", bounds=(MIN_MASS, MAX_MASS), allow_empty=False)
|
|
476
|
+
if not peaks:
|
|
477
|
+
warn("No valid peaks; returning."); return
|
|
478
|
+
|
|
479
|
+
try:
|
|
480
|
+
nstr = user_input("Enter target N (integer, e.g. 1000): ",
|
|
481
|
+
allow_back=True, allow_exit=True)
|
|
482
|
+
except BackRequested:
|
|
483
|
+
return
|
|
484
|
+
|
|
485
|
+
try:
|
|
486
|
+
N_target = int(nstr)
|
|
487
|
+
if N_target <= 0:
|
|
488
|
+
err("N must be > 0. Returning.")
|
|
489
|
+
return
|
|
490
|
+
except Exception:
|
|
491
|
+
err("Invalid N (not an integer). Returning.")
|
|
492
|
+
return
|
|
493
|
+
|
|
494
|
+
# collapse parameters
|
|
495
|
+
kappa, gamma_p, delta_c = 3.3, 0.36, 0.59
|
|
496
|
+
|
|
497
|
+
# read parameter lists
|
|
498
|
+
param_sets = []
|
|
499
|
+
if is_g:
|
|
500
|
+
try:
|
|
501
|
+
sstr = user_input("Enter σ list for Gaussian collapse (comma-separated; each must be within [0.03, 0.255]): ",
|
|
502
|
+
allow_back=True, allow_exit=True).strip()
|
|
503
|
+
except BackRequested:
|
|
504
|
+
return
|
|
505
|
+
sigmas = parse_float_list_verbose(sstr, name="σ", bounds=(0.03, 0.255), allow_empty=False)
|
|
506
|
+
if not sigmas:
|
|
507
|
+
warn("No valid σ for Gaussian; returning.")
|
|
508
|
+
return
|
|
509
|
+
for sx in sigmas:
|
|
510
|
+
param_sets.append({"sigma_x": sx})
|
|
511
|
+
|
|
512
|
+
elif is_ng:
|
|
513
|
+
try:
|
|
514
|
+
sx_str = user_input("Enter σ_X list for Non-Gaussian collapse (comma-separated; σ must be within [0.04, 0.16]): ",
|
|
515
|
+
allow_back=True, allow_exit=True).strip()
|
|
516
|
+
except BackRequested:
|
|
517
|
+
return
|
|
518
|
+
sigmas_X = parse_float_list_verbose(sx_str, name="σ_X", bounds=(0.04, 0.16), allow_empty=False)
|
|
519
|
+
if not sigmas_X:
|
|
520
|
+
warn("No valid σ for Non-Gaussian; returning.")
|
|
521
|
+
return
|
|
522
|
+
for sX in sigmas_X:
|
|
523
|
+
param_sets.append({"sigma_X": sX, "ratio": 0.75})
|
|
524
|
+
|
|
525
|
+
else: # is_ln
|
|
526
|
+
try:
|
|
527
|
+
sig_str = user_input("Enter σ list (log-space std) for Log-Normal (comma-separated; each > 0): ",
|
|
528
|
+
allow_back=True, allow_exit=True).strip()
|
|
529
|
+
except BackRequested:
|
|
530
|
+
return
|
|
531
|
+
sigmas_ln = parse_float_list_verbose(sig_str, name="σ", bounds=(1e-12, None), allow_empty=False, strict_gt=True)
|
|
532
|
+
if not sigmas_ln:
|
|
533
|
+
warn("No valid σ for Log-Normal; returning.")
|
|
534
|
+
return
|
|
535
|
+
for sln in sigmas_ln:
|
|
536
|
+
param_sets.append({"sigma_ln": sln})
|
|
537
|
+
|
|
538
|
+
# pre-load all component matrices on a shared energy grid
|
|
539
|
+
first = load_spectra_components(os.path.join(DATA_DIR, names[0]))
|
|
540
|
+
E_grid = first['energy_primary']
|
|
541
|
+
logE = np.log(E_grid)
|
|
542
|
+
N_M = len(masses)
|
|
543
|
+
|
|
544
|
+
direct_mat = np.zeros((N_M, len(E_grid)))
|
|
545
|
+
secondary_mat = np.zeros_like(direct_mat)
|
|
546
|
+
inflight_mat = np.zeros_like(direct_mat)
|
|
547
|
+
final_mat = np.zeros_like(direct_mat)
|
|
548
|
+
Emax_ifa = np.zeros(N_M)
|
|
549
|
+
|
|
550
|
+
for i, m in enumerate(masses):
|
|
551
|
+
sub = os.path.join(DATA_DIR, names[i])
|
|
552
|
+
S = load_spectra_components(sub)
|
|
553
|
+
direct_mat[i] = S['direct_gamma_primary']
|
|
554
|
+
secondary_mat[i] = np.interp(E_grid, S['energy_secondary'], S['direct_gamma_secondary'], left=0, right=0)
|
|
555
|
+
inflight_mat[i] = S['IFA_primary'] + np.interp(E_grid, S['energy_secondary'], S['IFA_secondary'], left=0, right=0)
|
|
556
|
+
final_mat[i] = S['FSR_primary'] + np.interp(E_grid, S['energy_secondary'], S['FSR_secondary'], left=0, right=0)
|
|
557
|
+
|
|
558
|
+
p = np.genfromtxt(os.path.join(sub, "inflight_annihilation_prim.txt"))
|
|
559
|
+
s = np.genfromtxt(os.path.join(sub, "inflight_annihilation_sec.txt"))
|
|
560
|
+
Emax_ifa[i] = max(p[:,0].max() if p.size else 0, s[:,0].max() if s.size else 0)
|
|
561
|
+
|
|
562
|
+
logM_all = np.log(masses)
|
|
563
|
+
floor = 1e-300
|
|
564
|
+
|
|
565
|
+
ld = np.log(np.where(direct_mat > floor, direct_mat, floor))
|
|
566
|
+
ls = np.log(np.where(secondary_mat > floor, secondary_mat, floor))
|
|
567
|
+
li = np.log(np.where(inflight_mat > floor, inflight_mat, floor))
|
|
568
|
+
lf = np.log(np.where(final_mat > floor, final_mat, floor))
|
|
569
|
+
|
|
570
|
+
sp_d = RectBivariateSpline(logM_all, logE, ld, kx=1, ky=3, s=0)
|
|
571
|
+
sp_s = RectBivariateSpline(logM_all, logE, ls, kx=1, ky=3, s=0)
|
|
572
|
+
sp_i = RectBivariateSpline(logM_all, logE, li, kx=1, ky=3, s=0)
|
|
573
|
+
sp_f = RectBivariateSpline(logM_all, logE, lf, kx=1, ky=3, s=0)
|
|
574
|
+
|
|
575
|
+
results = []
|
|
576
|
+
|
|
577
|
+
for params in param_sets:
|
|
578
|
+
|
|
579
|
+
if is_g:
|
|
580
|
+
sigma_x = params["sigma_x"]
|
|
581
|
+
x = np.linspace(0.001, 1.30909, 2000)
|
|
582
|
+
mf = mass_function(delta_l(x, 3.3, 0.59, 0.36), sigma_x, 0.59, 0.36)
|
|
583
|
+
label_param = f"σ={sigma_x:.3g}"
|
|
584
|
+
mf = np.where(np.isfinite(mf) & (mf > 0), mf, 0.0)
|
|
585
|
+
if mf.sum() <= 0:
|
|
586
|
+
warn(f"Underlying PDF vanished for σ={sigma_x:g}; skipping.")
|
|
587
|
+
continue
|
|
588
|
+
probabilities = mf / mf.sum()
|
|
589
|
+
r_mode = x[np.argmax(mf)] if np.any(mf) else x[len(x)//2]
|
|
590
|
+
|
|
591
|
+
elif is_ng:
|
|
592
|
+
sigma_X = params["sigma_X"]; ratio = params["ratio"]; sigma_Y = ratio * sigma_X
|
|
593
|
+
x = np.linspace(0.001, 1.30909, 2000)
|
|
594
|
+
mf = mass_function_exact(delta_l(x, 3.3, 0.59, 0.36), sigma_X, sigma_Y, 0.59, 0.36)
|
|
595
|
+
label_param = f"σX={sigma_X:.3g}"
|
|
596
|
+
mf = np.where(np.isfinite(mf) & (mf > 0), mf, 0.0)
|
|
597
|
+
if mf.sum() <= 0:
|
|
598
|
+
warn(f"Underlying PDF vanished for σ_X={sigma_X:g}; skipping.")
|
|
599
|
+
continue
|
|
600
|
+
probabilities = mf / mf.sum()
|
|
601
|
+
r_mode = x[np.argmax(mf)] if np.any(mf) else x[len(x)//2]
|
|
602
|
+
|
|
603
|
+
else: # is_ln
|
|
604
|
+
sigma_ln = params["sigma_ln"]
|
|
605
|
+
label_param = f"σ={sigma_ln:.3g}"
|
|
606
|
+
|
|
607
|
+
for peak in peaks:
|
|
608
|
+
sum_d = np.zeros_like(E_grid); sum_s = np.zeros_like(E_grid)
|
|
609
|
+
sum_i = np.zeros_like(E_grid); sum_f = np.zeros_like(E_grid)
|
|
610
|
+
md = []
|
|
611
|
+
|
|
612
|
+
bar = tqdm(total=N_target, desc=f"Sampling peak {peak:.2e} [{label_param}]", unit="BH")
|
|
613
|
+
|
|
614
|
+
if is_ln:
|
|
615
|
+
mu_eff = np.log(peak) + sigma_ln**2
|
|
616
|
+
try:
|
|
617
|
+
masses_drawn = np.random.lognormal(mean=mu_eff, sigma=sigma_ln, size=N_target)
|
|
618
|
+
except Exception as e:
|
|
619
|
+
err(f"Sampling error (lognormal, peak {peak:.3e}, σ={sigma_ln:g}): {e}. Skipping.")
|
|
620
|
+
bar.close()
|
|
621
|
+
continue
|
|
622
|
+
for mraw in masses_drawn:
|
|
623
|
+
md.append(float(mraw))
|
|
624
|
+
if mraw < MIN_MASS or mraw > MAX_MASS:
|
|
625
|
+
d_vals = s_vals = i_vals = f_vals = np.zeros_like(E_grid)
|
|
626
|
+
else:
|
|
627
|
+
try:
|
|
628
|
+
snap = snap_to_available(mraw, masses)
|
|
629
|
+
mval = snap if snap else mraw
|
|
630
|
+
idx_up = int(np.searchsorted(masses, mval, side='left'))
|
|
631
|
+
idx_low = max(0, idx_up-1)
|
|
632
|
+
idx_up = min(idx_up, N_M-1)
|
|
633
|
+
Ecut = min(Emax_ifa[idx_low], Emax_ifa[idx_up])
|
|
634
|
+
logm = np.log(mval)
|
|
635
|
+
d_vals = np.exp(sp_d(logm, logE, grid=False))
|
|
636
|
+
s_vals = np.exp(sp_s(logm, logE, grid=False))
|
|
637
|
+
i_vals = np.exp(sp_i(logm, logE, grid=False))
|
|
638
|
+
f_vals = np.exp(sp_f(logm, logE, grid=False))
|
|
639
|
+
except Exception as e:
|
|
640
|
+
warn(f"Interpolation error at mass {mraw:.3e} g: {e}. Skipping draw.")
|
|
641
|
+
d_vals = s_vals = i_vals = f_vals = np.zeros_like(E_grid)
|
|
642
|
+
# guard inflight tails
|
|
643
|
+
for j in range(len(i_vals)-1,0,-1):
|
|
644
|
+
if np.isclose(i_vals[j], i_vals[j-1], rtol=1e-8): i_vals[j] = 0.0
|
|
645
|
+
else: break
|
|
646
|
+
log10i = np.log10(np.where(i_vals>0, i_vals, floor))
|
|
647
|
+
for j in range(1,len(log10i)):
|
|
648
|
+
if log10i[j] - log10i[j-1] < -50:
|
|
649
|
+
i_vals[j:] = 0.0; break
|
|
650
|
+
i_vals[E_grid >= Ecut] = 0.0
|
|
651
|
+
sum_d += d_vals; sum_s += s_vals; sum_i += i_vals; sum_f += f_vals
|
|
652
|
+
bar.update(1)
|
|
653
|
+
|
|
654
|
+
else:
|
|
655
|
+
scale = peak / r_mode
|
|
656
|
+
for _ in range(N_target):
|
|
657
|
+
r = np.random.choice(x, p=probabilities)
|
|
658
|
+
mraw = r * scale
|
|
659
|
+
md.append(mraw)
|
|
660
|
+
if mraw < MIN_MASS or mraw > MAX_MASS:
|
|
661
|
+
d_vals = s_vals = i_vals = f_vals = np.zeros_like(E_grid)
|
|
662
|
+
else:
|
|
663
|
+
try:
|
|
664
|
+
snap = snap_to_available(mraw, masses)
|
|
665
|
+
mval = snap if snap else mraw
|
|
666
|
+
idx_up = int(np.searchsorted(masses, mval, side='left'))
|
|
667
|
+
idx_low = max(0, idx_up-1)
|
|
668
|
+
idx_up = min(idx_up, N_M-1)
|
|
669
|
+
Ecut = min(Emax_ifa[idx_low], Emax_ifa[idx_up])
|
|
670
|
+
logm = np.log(mval)
|
|
671
|
+
d_vals = np.exp(sp_d(logm, logE, grid=False))
|
|
672
|
+
s_vals = np.exp(sp_s(logm, logE, grid=False))
|
|
673
|
+
i_vals = np.exp(sp_i(logm, logE, grid=False))
|
|
674
|
+
f_vals = np.exp(sp_f(logm, logE, grid=False))
|
|
675
|
+
except Exception as e:
|
|
676
|
+
warn(f"Interpolation error at mass {mraw:.3e} g: {e}. Skipping draw.")
|
|
677
|
+
d_vals = s_vals = i_vals = f_vals = np.zeros_like(E_grid)
|
|
678
|
+
# guard inflight tails
|
|
679
|
+
for j in range(len(i_vals)-1,0,-1):
|
|
680
|
+
if np.isclose(i_vals[j], i_vals[j-1], rtol=1e-8): i_vals[j] = 0.0
|
|
681
|
+
else: break
|
|
682
|
+
log10i = np.log10(np.where(i_vals>0, i_vals, floor))
|
|
683
|
+
for j in range(1,len(log10i)):
|
|
684
|
+
if log10i[j] - log10i[j-1] < -50:
|
|
685
|
+
i_vals[j:] = 0.0; break
|
|
686
|
+
i_vals[E_grid >= Ecut] = 0.0
|
|
687
|
+
sum_d += d_vals; sum_s += s_vals; sum_i += i_vals; sum_f += f_vals
|
|
688
|
+
bar.update(1)
|
|
689
|
+
|
|
690
|
+
bar.close()
|
|
691
|
+
|
|
692
|
+
avg_d = sum_d / N_target; avg_s = sum_s / N_target
|
|
693
|
+
avg_i = sum_i / N_target; avg_f = sum_f / N_target
|
|
694
|
+
avg_tot = avg_d + avg_s + avg_i + avg_f
|
|
695
|
+
tol = 1e-299
|
|
696
|
+
for arr in (avg_d, avg_s, avg_i, avg_f, avg_tot): arr[arr < tol] = 0.0
|
|
697
|
+
|
|
698
|
+
results.append({
|
|
699
|
+
"method": ("gaussian" if is_g else "non_gaussian" if is_ng else "lognormal"),
|
|
700
|
+
"peak": peak,
|
|
701
|
+
"params": params.copy(),
|
|
702
|
+
"E": E_grid.copy(),
|
|
703
|
+
"spectrum": avg_tot.copy(),
|
|
704
|
+
"mdist": md[:],
|
|
705
|
+
"label_param": label_param,
|
|
706
|
+
"nsamp": N_target
|
|
707
|
+
})
|
|
708
|
+
|
|
709
|
+
if not results:
|
|
710
|
+
return
|
|
711
|
+
|
|
712
|
+
# dN/dE overlays
|
|
713
|
+
fig1 = plt.figure(figsize=(10,7))
|
|
714
|
+
peaks_dn = []
|
|
715
|
+
for r in results:
|
|
716
|
+
E = r["E"]; sp = r["spectrum"]; m = sp > 0
|
|
717
|
+
plt.plot(E[m], sp[m], lw=2,
|
|
718
|
+
label=f"{distribution_method} {r['peak']:.1e}_{r['label_param'].replace('σ=','').replace('σX=','')}")
|
|
719
|
+
peaks_dn.append(sp.max())
|
|
720
|
+
plt.xscale('log'); plt.yscale('log'); plt.xlabel(r'$E_\gamma$ (MeV)'); plt.ylabel(r'$dN_\gamma/dE_\gamma$')
|
|
721
|
+
if peaks_dn: plt.ylim(min(peaks_dn)/1e3, max(peaks_dn)*10)
|
|
722
|
+
plt.xlim(0.5, 5e3); plt.grid(True, which='both', linestyle='--'); plt.legend()
|
|
723
|
+
plt.title("Comparison: dN/dE"); plt.tight_layout(); plt.show(); plt.close(fig1)
|
|
724
|
+
|
|
725
|
+
# E^2 dN/dE overlays
|
|
726
|
+
fig2 = plt.figure(figsize=(10,7))
|
|
727
|
+
peaks_e2 = []
|
|
728
|
+
for r in results:
|
|
729
|
+
E = r["E"]; sp = r["spectrum"]; m = sp > 0
|
|
730
|
+
plt.plot(E[m], E[m]**2 * sp[m], lw=2,
|
|
731
|
+
label=f"{distribution_method} {r['peak']:.1e}_{r['label_param'].replace('σ=','').replace('σX=','')}")
|
|
732
|
+
peaks_e2.append((E[m]**2 * sp[m]).max() if np.any(m) else 0.0)
|
|
733
|
+
plt.xscale('log'); plt.yscale('log'); plt.xlabel(r'$E_\gamma$ (MeV)'); plt.ylabel(r'$E^2\,dN_\gamma/dE_\gamma$')
|
|
734
|
+
if peaks_e2: plt.ylim(min(peaks_e2)/1e3, max(peaks_e2)*10)
|
|
735
|
+
plt.xlim(0.5, 5e3); plt.grid(True, which='both', linestyle='--'); plt.legend()
|
|
736
|
+
plt.title("Comparison: $E^2$ dN/dE"); plt.tight_layout(); plt.show(); plt.close(fig2)
|
|
737
|
+
|
|
738
|
+
# Histograms + theoretical mass-PDF overlays (in "counts" space)
|
|
739
|
+
for r in results:
|
|
740
|
+
method = r["method"]
|
|
741
|
+
figH = plt.figure(figsize=(10,6))
|
|
742
|
+
|
|
743
|
+
md = np.asarray(r["mdist"], dtype=float)
|
|
744
|
+
md = md[np.isfinite(md)]
|
|
745
|
+
|
|
746
|
+
if md.size < 2 or (md.size > 0 and md.min() == md.max()):
|
|
747
|
+
center = md[0] if md.size else 0.0
|
|
748
|
+
eps = abs(center)*1e-9 if center != 0 else 1e-9
|
|
749
|
+
_, bins, _ = plt.hist(
|
|
750
|
+
md, bins=1, range=(center - eps, center + eps),
|
|
751
|
+
alpha=0.7, edgecolor='k',
|
|
752
|
+
label=f'{distribution_method} samples ({r["label_param"]})'
|
|
753
|
+
)
|
|
754
|
+
else:
|
|
755
|
+
q25, q75 = np.percentile(md, [25, 75])
|
|
756
|
+
iqr = q75 - q25
|
|
757
|
+
if iqr > 0:
|
|
758
|
+
bw = 2 * iqr * md.size ** (-1/3) # Freedman–Diaconis
|
|
759
|
+
k = int(np.clip(np.ceil((md.max() - md.min()) / bw), 1, 50))
|
|
760
|
+
else:
|
|
761
|
+
k = int(np.clip(np.sqrt(md.size), 1, 50))
|
|
762
|
+
_, bins, _ = plt.hist(
|
|
763
|
+
md, bins=k, alpha=0.7, edgecolor='k',
|
|
764
|
+
label=f'{distribution_method} samples ({r["label_param"]})'
|
|
765
|
+
)
|
|
766
|
+
|
|
767
|
+
bin_widths = (bins[1:] - bins[:-1])
|
|
768
|
+
ref_width = float(np.median(bin_widths)) if bin_widths.size else 1.0
|
|
769
|
+
|
|
770
|
+
if method == "gaussian":
|
|
771
|
+
sigma_x = r["params"]["sigma_x"]
|
|
772
|
+
x = np.linspace(0.001, 1.30909, 2000)
|
|
773
|
+
mf = mass_function(delta_l(x, 3.3, 0.59, 0.36), sigma_x, 0.59, 0.36)
|
|
774
|
+
mf = np.where(np.isfinite(mf) & (mf > 0), mf, 0.0)
|
|
775
|
+
if mf.sum() > 0:
|
|
776
|
+
probabilities = mf / mf.sum()
|
|
777
|
+
r_mode = x[np.argmax(mf)] if np.any(mf) else x[len(x)//2]
|
|
778
|
+
scale = r["peak"] / r_mode
|
|
779
|
+
dx = x[1] - x[0]; dm = dx * scale
|
|
780
|
+
pdf_mass = probabilities / dm # per gram
|
|
781
|
+
m_line = x * scale
|
|
782
|
+
mask = (m_line >= bins[0]) & (m_line <= bins[-1]) & np.isfinite(pdf_mass) & (pdf_mass > 0)
|
|
783
|
+
y_line = pdf_mass[mask] * ref_width * len(r["mdist"]) if np.any(mask) else []
|
|
784
|
+
if np.any(mask):
|
|
785
|
+
plt.plot(m_line[mask], y_line, 'r--', lw=2, zorder=3, label='Underlying PDF (counts)')
|
|
786
|
+
|
|
787
|
+
elif method == "non_gaussian":
|
|
788
|
+
sigma_X = r["params"]["sigma_X"]; ratio = 0.75; sigma_Y = ratio * sigma_X
|
|
789
|
+
x = np.linspace(0.001, 1.30909, 2000)
|
|
790
|
+
mf = mass_function_exact(delta_l(x, 3.3, 0.59, 0.36), sigma_X, sigma_Y, 0.59, 0.36)
|
|
791
|
+
mf = np.where(np.isfinite(mf) & (mf > 0), mf, 0.0)
|
|
792
|
+
if mf.sum() > 0:
|
|
793
|
+
probabilities = mf / mf.sum()
|
|
794
|
+
r_mode = x[np.argmax(mf)] if np.any(mf) else x[len(x)//2]
|
|
795
|
+
scale = r["peak"] / r_mode
|
|
796
|
+
dx = x[1] - x[0]; dm = dx * scale
|
|
797
|
+
pdf_mass = probabilities / dm
|
|
798
|
+
m_line = x * scale
|
|
799
|
+
mask = (m_line >= bins[0]) & (m_line <= bins[-1]) & np.isfinite(pdf_mass) & (pdf_mass > 0)
|
|
800
|
+
y_line = pdf_mass[mask] * ref_width * len(r["mdist"]) if np.any(mask) else []
|
|
801
|
+
if np.any(mask):
|
|
802
|
+
plt.plot(m_line[mask], y_line, 'r--', lw=2, zorder=3, label='Underlying PDF (counts)')
|
|
803
|
+
|
|
804
|
+
else:
|
|
805
|
+
sigma_ln = r["params"]["sigma_ln"]; mu_eff = np.log(r["peak"]) + sigma_ln**2
|
|
806
|
+
mlo_tail = np.exp(mu_eff - 6.0*sigma_ln); mhi_tail = np.exp(mu_eff + 6.0*sigma_ln)
|
|
807
|
+
m_plot = np.logspace(np.log10(min(bins[0], mlo_tail)), np.log10(max(bins[-1], mhi_tail)), 2000)
|
|
808
|
+
pdf = (1.0/(m_plot*sigma_ln*np.sqrt(2*np.pi))) * np.exp( - (np.log(m_plot)-mu_eff)**2 / (2*sigma_ln**2) )
|
|
809
|
+
y_plot = pdf * ref_width * len(r["mdist"])
|
|
810
|
+
plt.plot(m_plot, y_plot, 'r--', lw=2, zorder=3, label='Underlying PDF (counts)')
|
|
811
|
+
plt.legend(title=f"σ={sigma_ln:.3f}")
|
|
812
|
+
|
|
813
|
+
plt.xlabel('Simulated PBH Mass (g)')
|
|
814
|
+
plt.ylabel('Count')
|
|
815
|
+
plt.title(f'Mass Distribution & PDF for Peak {r["peak"]:.2e} g')
|
|
816
|
+
plt.grid(True, which='both', linestyle='--')
|
|
817
|
+
plt.legend()
|
|
818
|
+
plt.tight_layout()
|
|
819
|
+
plt.show(); plt.close(figH)
|
|
820
|
+
|
|
821
|
+
# === Save distributed results ===
|
|
822
|
+
try:
|
|
823
|
+
tosave = user_input("Save distributed results? (y/n): ",
|
|
824
|
+
allow_back=True, allow_exit=True).strip().lower()
|
|
825
|
+
except BackRequested:
|
|
826
|
+
tosave = 'n'
|
|
827
|
+
if tosave in ('y', 'yes'):
|
|
828
|
+
for r in results:
|
|
829
|
+
method = r["method"]
|
|
830
|
+
if method == "gaussian":
|
|
831
|
+
base = GAUSS_RESULTS_DIR
|
|
832
|
+
tag = f"peak_{r['peak']:.2e}_{r['label_param'].replace('=','')}_N{r['nsamp']}"
|
|
833
|
+
elif method == "non_gaussian":
|
|
834
|
+
base = NGAUSS_RESULTS_DIR
|
|
835
|
+
tag = f"peak_{r['peak']:.2e}_{r['label_param'].replace('=','')}_N{r['nsamp']}"
|
|
836
|
+
else:
|
|
837
|
+
base = LOGN_RESULTS_DIR
|
|
838
|
+
tag = f"peak_{r['peak']:.2e}_{r['label_param'].replace('=','')}_N{r['nsamp']}"
|
|
839
|
+
outdir = os.path.join(base, tag)
|
|
840
|
+
k = 1; unique = outdir
|
|
841
|
+
while os.path.exists(unique):
|
|
842
|
+
unique = f"{outdir}_{k}"; k += 1
|
|
843
|
+
os.makedirs(unique, exist_ok=True)
|
|
844
|
+
np.savetxt(os.path.join(unique, "distributed_spectrum.txt"),
|
|
845
|
+
np.column_stack((r["E"], r["spectrum"])),
|
|
846
|
+
header="E_gamma(MeV) TotalSpectrum", fmt="%.10e")
|
|
847
|
+
np.savetxt(os.path.join(unique, "mass_distribution.txt"),
|
|
848
|
+
np.asarray(r["mdist"], dtype=float),
|
|
849
|
+
header="Sampled masses (g)", fmt="%.12e")
|
|
850
|
+
print(f"Saved → {unique}")
|
|
851
|
+
|
|
852
|
+
# ---------------------------
|
|
853
|
+
# Helpers for Custom Equation: safe eval + variable prompting
|
|
854
|
+
# ---------------------------
|
|
855
|
+
def _build_safe_numpy_namespace():
|
|
856
|
+
# Restrict to common math; expose as 'np' and 'numpy'
|
|
857
|
+
safe_np = SimpleNamespace(
|
|
858
|
+
log=np.log, log10=np.log10, log1p=np.log1p, exp=np.exp, sqrt=np.sqrt, power=np.power,
|
|
859
|
+
sin=np.sin, cos=np.cos, tan=np.tan, arctan=np.arctan,
|
|
860
|
+
abs=np.abs, minimum=np.minimum, maximum=np.maximum, clip=np.clip, erf=erf,
|
|
861
|
+
pi=np.pi, e=np.e
|
|
862
|
+
)
|
|
863
|
+
return safe_np
|
|
864
|
+
|
|
865
|
+
SAFE_FUNCS = {
|
|
866
|
+
"log","log10","log1p","exp","sqrt","pow","sin","cos","tan","arctan",
|
|
867
|
+
"abs","minimum","maximum","clip","erf","pi","e","m","np","numpy"
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
def _detect_custom_variables(expr):
|
|
871
|
+
"""
|
|
872
|
+
Return a sorted list of identifiers in expr that are NOT in SAFE_FUNCS.
|
|
873
|
+
Identifiers start with a letter/underscore (Unicode aware), so '1e-3' won't
|
|
874
|
+
yield a spurious 'e'. Greek letters like 'μ','α','β' are supported.
|
|
875
|
+
"""
|
|
876
|
+
expr_wo_strings = re.sub(r"(\".*?\"|'.*?')", "", expr)
|
|
877
|
+
tokens = set(re.findall(r"\b[^\W\d]\w*\b", expr_wo_strings, flags=re.UNICODE))
|
|
878
|
+
unknown = sorted([t for t in tokens if t not in SAFE_FUNCS])
|
|
879
|
+
return unknown
|
|
880
|
+
|
|
881
|
+
def _prompt_variable_values(var_names):
|
|
882
|
+
"""
|
|
883
|
+
Ask user for each variable; values can be numeric or expressions using pi, e, and np.*
|
|
884
|
+
Returns dict {var: float_value}
|
|
885
|
+
"""
|
|
886
|
+
vals = {}
|
|
887
|
+
safe_np = _build_safe_numpy_namespace()
|
|
888
|
+
num_ctx = {"__builtins__": None, "pi": np.pi, "e": np.e, "np": safe_np, "numpy": safe_np}
|
|
889
|
+
for name in var_names:
|
|
890
|
+
while True:
|
|
891
|
+
try:
|
|
892
|
+
s = user_input(f"Enter value for variable '{name}': ",
|
|
893
|
+
allow_back=True, allow_exit=True).strip()
|
|
894
|
+
# allow expressions like 2*np.pi, 1e16, etc.
|
|
895
|
+
val = eval(s, {"__builtins__": None}, num_ctx)
|
|
896
|
+
val = float(val)
|
|
897
|
+
vals[name] = val
|
|
898
|
+
break
|
|
899
|
+
except BackRequested:
|
|
900
|
+
raise
|
|
901
|
+
except SystemExit:
|
|
902
|
+
raise
|
|
903
|
+
except Exception:
|
|
904
|
+
err("Could not parse value. Use a number or an expression like '1e16' or '2*np.pi'. Try again.")
|
|
905
|
+
return vals
|
|
906
|
+
|
|
907
|
+
# ---------------------------
|
|
908
|
+
# Custom Mass PDF from user-entered EQUATION (RHS only; no 'fm=' needed)
|
|
909
|
+
# ---------------------------
|
|
910
|
+
def custom_equation_pdf_tool():
|
|
911
|
+
"""
|
|
912
|
+
Build a PBH mass PDF from a user-entered equation f(m, params...) given as a RIGHT-HAND SIDE expression in m,
|
|
913
|
+
normalize it (per gram), prompt for any custom variables, sample N masses, accumulate ONLY the TOTAL spectrum,
|
|
914
|
+
then show:
|
|
915
|
+
(1) total dN/dE,
|
|
916
|
+
(2) total E^2 dN/dE,
|
|
917
|
+
(3) mass histogram (counts) with analytic PDF scaled to counts (log bins).
|
|
918
|
+
|
|
919
|
+
Saves (if requested): equation.txt, samples_sorted.txt, distributed_spectrum.txt
|
|
920
|
+
"""
|
|
921
|
+
# Discover data domain
|
|
922
|
+
masses, names = discover_mass_folders(DATA_DIR)
|
|
923
|
+
if masses:
|
|
924
|
+
M_MIN, M_MAX = min(masses), max(masses)
|
|
925
|
+
else:
|
|
926
|
+
M_MIN, M_MAX = 5e13, 1e19
|
|
927
|
+
|
|
928
|
+
N_BINS = 50
|
|
929
|
+
|
|
930
|
+
def log_edges(a, b, k):
|
|
931
|
+
return np.logspace(np.log10(a), np.log10(b), k + 1)
|
|
932
|
+
|
|
933
|
+
def safe_eval_on_grid(expr, m_grid, user_vars):
|
|
934
|
+
safe_np = _build_safe_numpy_namespace()
|
|
935
|
+
safe = {
|
|
936
|
+
"m": m_grid,
|
|
937
|
+
"log": np.log, "log10": np.log10, "log1p": np.log1p,
|
|
938
|
+
"exp": np.exp, "sqrt": np.sqrt, "pow": np.power,
|
|
939
|
+
"sin": np.sin, "cos": np.cos, "tan": np.tan, "arctan": np.arctan,
|
|
940
|
+
"abs": np.abs, "minimum": np.minimum, "maximum": np.maximum, "clip": np.clip,
|
|
941
|
+
"erf": erf, "pi": np.pi, "e": np.e,
|
|
942
|
+
"np": safe_np, "numpy": safe_np
|
|
943
|
+
}
|
|
944
|
+
safe.update(user_vars)
|
|
945
|
+
try:
|
|
946
|
+
y = eval(expr, {"__builtins__": None}, safe)
|
|
947
|
+
except BackRequested:
|
|
948
|
+
raise
|
|
949
|
+
except SystemExit:
|
|
950
|
+
raise
|
|
951
|
+
except Exception as e:
|
|
952
|
+
raise ValueError(f"Could not evaluate expression: {e}")
|
|
953
|
+
y = np.asarray(y, dtype=float)
|
|
954
|
+
if y.size == 1:
|
|
955
|
+
y = np.full_like(m_grid, float(y))
|
|
956
|
+
if y.shape != m_grid.shape:
|
|
957
|
+
raise ValueError("Expression did not return an array of the same shape as m.")
|
|
958
|
+
return y
|
|
959
|
+
|
|
960
|
+
def cdf_from_pdf(m, pdf):
|
|
961
|
+
cdf = np.empty_like(pdf)
|
|
962
|
+
cdf[0] = 0.0
|
|
963
|
+
dm = np.diff(m)
|
|
964
|
+
cdf[1:] = np.cumsum(0.5 * (pdf[1:] + pdf[:-1]) * dm)
|
|
965
|
+
total = cdf[-1]
|
|
966
|
+
if not np.isfinite(total) or total <= 0:
|
|
967
|
+
raise ValueError("PDF integrates to non-positive value.")
|
|
968
|
+
cdf /= total
|
|
969
|
+
return cdf
|
|
970
|
+
|
|
971
|
+
# ---- read the equation & prompt variables ----
|
|
972
|
+
print("\n=== Custom Equation Mass PDF ===")
|
|
973
|
+
print("Domain: m in [{:.2e}, {:.2e}] g".format(M_MIN, M_MAX))
|
|
974
|
+
print("Enter a Python expression for your PDF f(m) using 'm' in grams and any constants/variables you define.")
|
|
975
|
+
print("Examples:")
|
|
976
|
+
print("f(m) = (m/mp)**(-(alpha0 + beta*log(m/mp))) / m")
|
|
977
|
+
print("f(m) = exp(-m/5e17) / m")
|
|
978
|
+
try:
|
|
979
|
+
expr = user_input("f(m) = ", allow_back=True, allow_exit=True).strip()
|
|
980
|
+
except BackRequested:
|
|
981
|
+
return
|
|
982
|
+
|
|
983
|
+
# If someone pastes "f(m) = ..." or "fm = ...", strip the prefix anyway.
|
|
984
|
+
expr = re.sub(r'^\s*(?:f\s*\(\s*m\s*\)|fm)\s*=\s*', '', expr, flags=re.IGNORECASE)
|
|
985
|
+
|
|
986
|
+
# Detect custom variables (excluding allowed function names, m, pi, e, np, numpy)
|
|
987
|
+
vars_needed = _detect_custom_variables(expr)
|
|
988
|
+
user_vars = {}
|
|
989
|
+
if vars_needed:
|
|
990
|
+
info(f"Variables detected: {', '.join(vars_needed)}")
|
|
991
|
+
try:
|
|
992
|
+
user_vars = _prompt_variable_values(vars_needed)
|
|
993
|
+
except BackRequested:
|
|
994
|
+
return
|
|
995
|
+
|
|
996
|
+
# ---- build normalized PDF on a fine m-grid ----
|
|
997
|
+
m_grid = np.logspace(np.log10(M_MIN), np.log10(M_MAX), 20000)
|
|
998
|
+
try:
|
|
999
|
+
f = safe_eval_on_grid(expr, m_grid, user_vars)
|
|
1000
|
+
except BackRequested:
|
|
1001
|
+
return
|
|
1002
|
+
except ValueError as e:
|
|
1003
|
+
err(str(e))
|
|
1004
|
+
return
|
|
1005
|
+
|
|
1006
|
+
f = np.clip(f, 0.0, None)
|
|
1007
|
+
area = trapezoid(f, m_grid)
|
|
1008
|
+
if not np.isfinite(area) or area <= 0.0:
|
|
1009
|
+
err("Your f(m) is nonpositive or non-integrable over the domain.")
|
|
1010
|
+
return
|
|
1011
|
+
pdf = f / area # per gram
|
|
1012
|
+
cdf = cdf_from_pdf(m_grid, pdf)
|
|
1013
|
+
|
|
1014
|
+
# ---- ask for N ----
|
|
1015
|
+
try:
|
|
1016
|
+
n_default = 1000
|
|
1017
|
+
n_str = user_input(f"Enter target N (integer, e.g. 1000): ",
|
|
1018
|
+
allow_back=True, allow_exit=True).strip()
|
|
1019
|
+
if n_str == "":
|
|
1020
|
+
N = n_default
|
|
1021
|
+
else:
|
|
1022
|
+
N = int(n_str)
|
|
1023
|
+
if N <= 0:
|
|
1024
|
+
err("N must be > 0.")
|
|
1025
|
+
return
|
|
1026
|
+
except BackRequested:
|
|
1027
|
+
return
|
|
1028
|
+
except Exception:
|
|
1029
|
+
err("Invalid N (must be a positive integer).")
|
|
1030
|
+
return
|
|
1031
|
+
|
|
1032
|
+
# ---- pre-load spectral grids & splines ----
|
|
1033
|
+
if not masses:
|
|
1034
|
+
err("No valid mass folders found under the data directory.")
|
|
1035
|
+
return
|
|
1036
|
+
|
|
1037
|
+
first = load_spectra_components(os.path.join(DATA_DIR, names[0]))
|
|
1038
|
+
E_grid = first['energy_primary']
|
|
1039
|
+
logE = np.log(E_grid)
|
|
1040
|
+
N_M = len(masses)
|
|
1041
|
+
|
|
1042
|
+
direct_mat = np.zeros((N_M, len(E_grid)))
|
|
1043
|
+
secondary_mat = np.zeros_like(direct_mat)
|
|
1044
|
+
inflight_mat = np.zeros_like(direct_mat)
|
|
1045
|
+
final_mat = np.zeros_like(direct_mat)
|
|
1046
|
+
Emax_ifa = np.zeros(N_M)
|
|
1047
|
+
|
|
1048
|
+
for i, m in enumerate(masses):
|
|
1049
|
+
sub = os.path.join(DATA_DIR, names[i])
|
|
1050
|
+
S = load_spectra_components(sub)
|
|
1051
|
+
direct_mat[i] = S['direct_gamma_primary']
|
|
1052
|
+
secondary_mat[i] = np.interp(E_grid, S['energy_secondary'], S['direct_gamma_secondary'], left=0, right=0)
|
|
1053
|
+
inflight_mat[i] = S['IFA_primary'] + np.interp(E_grid, S['energy_secondary'], S['IFA_secondary'], left=0, right=0)
|
|
1054
|
+
final_mat[i] = S['FSR_primary'] + np.interp(E_grid, S['energy_secondary'], S['FSR_secondary'], left=0, right=0)
|
|
1055
|
+
|
|
1056
|
+
p = np.genfromtxt(os.path.join(sub, "inflight_annihilation_prim.txt"))
|
|
1057
|
+
s = np.genfromtxt(os.path.join(sub, "inflight_annihilation_sec.txt"))
|
|
1058
|
+
Emax_ifa[i] = max(p[:,0].max() if p.size else 0, s[:,0].max() if s.size else 0)
|
|
1059
|
+
|
|
1060
|
+
logM_all = np.log(masses)
|
|
1061
|
+
floor = 1e-300
|
|
1062
|
+
ld = np.log(np.where(direct_mat > floor, direct_mat, floor))
|
|
1063
|
+
ls = np.log(np.where(secondary_mat > floor, secondary_mat, floor))
|
|
1064
|
+
li = np.log(np.where(inflight_mat > floor, inflight_mat, floor))
|
|
1065
|
+
lf = np.log(np.where(final_mat > floor, final_mat, floor))
|
|
1066
|
+
|
|
1067
|
+
sp_d = RectBivariateSpline(logM_all, logE, ld, kx=1, ky=3, s=0)
|
|
1068
|
+
sp_s = RectBivariateSpline(logM_all, logE, ls, kx=1, ky=3, s=0)
|
|
1069
|
+
sp_i = RectBivariateSpline(logM_all, logE, li, kx=1, ky=3, s=0)
|
|
1070
|
+
sp_f = RectBivariateSpline(logM_all, logE, lf, kx=1, ky=3, s=0)
|
|
1071
|
+
|
|
1072
|
+
# ---- sample masses via inverse CDF and accumulate ONLY total spectrum ----
|
|
1073
|
+
rng = np.random.default_rng()
|
|
1074
|
+
u = rng.random(N)
|
|
1075
|
+
samples = np.interp(u, cdf, m_grid)
|
|
1076
|
+
samples.sort()
|
|
1077
|
+
|
|
1078
|
+
sum_tot = np.zeros_like(E_grid)
|
|
1079
|
+
|
|
1080
|
+
bar = tqdm(total=N, desc="Sampling custom PDF", unit="BH")
|
|
1081
|
+
for mraw in samples:
|
|
1082
|
+
if mraw < masses[0] or mraw > masses[-1]:
|
|
1083
|
+
bar.update(1)
|
|
1084
|
+
continue
|
|
1085
|
+
try:
|
|
1086
|
+
snap = snap_to_available(mraw, masses)
|
|
1087
|
+
mval = snap if snap else mraw
|
|
1088
|
+
idx_up = int(np.searchsorted(masses, mval, side='left'))
|
|
1089
|
+
idx_low = max(0, idx_up-1)
|
|
1090
|
+
idx_up = min(idx_up, len(masses)-1)
|
|
1091
|
+
Ecut = min(Emax_ifa[idx_low], Emax_ifa[idx_up])
|
|
1092
|
+
logm = np.log(mval)
|
|
1093
|
+
d_vals = np.exp(sp_d(logm, logE, grid=False))
|
|
1094
|
+
s_vals = np.exp(sp_s(logm, logE, grid=False))
|
|
1095
|
+
i_vals = np.exp(sp_i(logm, logE, grid=False))
|
|
1096
|
+
f_vals = np.exp(sp_f(logm, logE, grid=False))
|
|
1097
|
+
except Exception:
|
|
1098
|
+
bar.update(1)
|
|
1099
|
+
continue
|
|
1100
|
+
|
|
1101
|
+
# trim inflight tails (stability)
|
|
1102
|
+
for j in range(len(i_vals)-1, 0, -1):
|
|
1103
|
+
if np.isclose(i_vals[j], i_vals[j-1], rtol=1e-8):
|
|
1104
|
+
i_vals[j] = 0.0
|
|
1105
|
+
else:
|
|
1106
|
+
break
|
|
1107
|
+
log10i = np.log10(np.where(i_vals > 0, i_vals, floor))
|
|
1108
|
+
for j in range(1, len(log10i)):
|
|
1109
|
+
if log10i[j] - log10i[j-1] < -50:
|
|
1110
|
+
i_vals[j:] = 0.0
|
|
1111
|
+
break
|
|
1112
|
+
i_vals[E_grid >= Ecut] = 0.0
|
|
1113
|
+
|
|
1114
|
+
sum_tot += (d_vals + s_vals + i_vals + f_vals)
|
|
1115
|
+
bar.update(1)
|
|
1116
|
+
bar.close()
|
|
1117
|
+
|
|
1118
|
+
avg_tot = sum_tot / max(N, 1)
|
|
1119
|
+
avg_tot[avg_tot < 1e-299] = 0.0
|
|
1120
|
+
|
|
1121
|
+
# ---- FIGURE A: total dN/dE ----
|
|
1122
|
+
msk = avg_tot > 0
|
|
1123
|
+
plt.figure(figsize=(10, 7))
|
|
1124
|
+
plt.plot(E_grid[msk], avg_tot[msk], lw=2, label="Total spectrum")
|
|
1125
|
+
plt.xscale('log'); plt.yscale('log')
|
|
1126
|
+
plt.xlim(0.5, 5e3)
|
|
1127
|
+
if np.any(msk):
|
|
1128
|
+
peak = avg_tot[msk].max()
|
|
1129
|
+
plt.ylim(peak/1e3, peak*10)
|
|
1130
|
+
plt.xlabel(r'$E_\gamma$ (MeV)')
|
|
1131
|
+
plt.ylabel(r'$dN_\gamma/dE_\gamma$ (MeV$^{-1}$ s$^{-1}$)')
|
|
1132
|
+
plt.grid(True, which='both', linestyle='--')
|
|
1133
|
+
plt.legend()
|
|
1134
|
+
plt.title("Custom Equation — Total $dN/dE$")
|
|
1135
|
+
plt.tight_layout()
|
|
1136
|
+
plt.show()
|
|
1137
|
+
|
|
1138
|
+
# ---- FIGURE B: total E^2 dN/dE ----
|
|
1139
|
+
plt.figure(figsize=(10, 7))
|
|
1140
|
+
if np.any(msk):
|
|
1141
|
+
plt.plot(E_grid[msk], (E_grid[msk]**2) * avg_tot[msk], lw=2, label="Total")
|
|
1142
|
+
peak_e2 = ((E_grid[msk]**2) * avg_tot[msk]).max()
|
|
1143
|
+
plt.ylim(peak_e2/1e3, peak_e2*10)
|
|
1144
|
+
plt.xscale('log'); plt.yscale('log')
|
|
1145
|
+
plt.xlim(0.5, 5e3)
|
|
1146
|
+
plt.xlabel(r'$E_\gamma$ (MeV)')
|
|
1147
|
+
plt.ylabel(r'$E^2\,dN_\gamma/dE_\gamma$ (MeV s$^{-1}$)')
|
|
1148
|
+
plt.grid(True, which='both', linestyle='--')
|
|
1149
|
+
plt.legend()
|
|
1150
|
+
plt.title("Custom Equation — Total $E^2 dN/dE$")
|
|
1151
|
+
plt.tight_layout()
|
|
1152
|
+
plt.show()
|
|
1153
|
+
|
|
1154
|
+
# ---- FIGURE C: Mass histogram (counts) + SMOOTH analytic PDF scaled to counts ----
|
|
1155
|
+
def log_edges(a, b, k): # redefined for local clarity
|
|
1156
|
+
return np.logspace(np.log10(a), np.log10(b), k + 1)
|
|
1157
|
+
edges = log_edges(masses[0], masses[-1], N_BINS)
|
|
1158
|
+
plt.figure(figsize=(10, 6))
|
|
1159
|
+
# Blue = sampled counts per (log) bin
|
|
1160
|
+
plt.hist(samples, bins=edges, density=False, alpha=0.6, edgecolor='k',
|
|
1161
|
+
label=f"Sampled counts per bin (N={N})")
|
|
1162
|
+
|
|
1163
|
+
# Orange = smooth line proportional to expected counts/bin for log bins:
|
|
1164
|
+
# expected counts in a narrow log bin: N * pdf(m) * m * d(ln m)
|
|
1165
|
+
dln = (np.log(masses[-1]) - np.log(masses[0])) / N_BINS
|
|
1166
|
+
counts_line = N * pdf * m_grid * dln
|
|
1167
|
+
plt.plot(m_grid, counts_line, lw=2.5, label="Analytic PDF (scaled to counts)")
|
|
1168
|
+
plt.xscale("log")
|
|
1169
|
+
plt.xlabel("Mass m (g)")
|
|
1170
|
+
plt.ylabel("Count per bin")
|
|
1171
|
+
plt.title("Custom Equation — Mass Histogram (counts) + Smooth PDF overlay")
|
|
1172
|
+
plt.grid(True, which='both', linestyle='--', alpha=0.5)
|
|
1173
|
+
plt.legend()
|
|
1174
|
+
plt.tight_layout()
|
|
1175
|
+
plt.show()
|
|
1176
|
+
|
|
1177
|
+
# ---- Save exactly 3 files for custom: equation, mass distribution, distributed spectrum ----
|
|
1178
|
+
try:
|
|
1179
|
+
sv = user_input("\nSave this custom spectrum? (y/n): ",
|
|
1180
|
+
allow_back=True, allow_exit=True).strip().lower()
|
|
1181
|
+
except BackRequested:
|
|
1182
|
+
return
|
|
1183
|
+
if sv in ('y', 'yes'):
|
|
1184
|
+
median_mass = float(np.median(samples)) if samples.size else 0.0
|
|
1185
|
+
folder = f"{median_mass:.2e}_custom_eq"
|
|
1186
|
+
outdir = os.path.join(CUSTOM_RESULTS_DIR, folder)
|
|
1187
|
+
base = outdir; k = 1
|
|
1188
|
+
while os.path.exists(outdir):
|
|
1189
|
+
outdir = f"{base}_{k}"; k += 1
|
|
1190
|
+
os.makedirs(outdir, exist_ok=True)
|
|
1191
|
+
|
|
1192
|
+
with open(os.path.join(outdir, "equation.txt"), "w", encoding="utf-8") as fh:
|
|
1193
|
+
if user_vars:
|
|
1194
|
+
fh.write("# Variables:\n")
|
|
1195
|
+
for kname, kval in user_vars.items():
|
|
1196
|
+
fh.write(f"# {kname} = {kval:.10e}\n")
|
|
1197
|
+
fh.write(expr + "\n")
|
|
1198
|
+
np.savetxt(os.path.join(outdir, "samples_sorted.txt"), samples,
|
|
1199
|
+
header="Simulated masses (g), sorted ascending", fmt="%.12e")
|
|
1200
|
+
np.savetxt(os.path.join(outdir, "distributed_spectrum.txt"),
|
|
1201
|
+
np.column_stack((E_grid, avg_tot)),
|
|
1202
|
+
header="E_gamma(MeV) TotalSpectrum", fmt="%.10e")
|
|
1203
|
+
print(f"Saved → {outdir}")
|
|
1204
|
+
|
|
1205
|
+
# ---------------------------
|
|
1206
|
+
# Helper used by "View previous spectra → option 1"
|
|
1207
|
+
# ---------------------------
|
|
1208
|
+
def generate_monochromatic_for_mass(mass_g, data_dir, out_dir):
|
|
1209
|
+
"""
|
|
1210
|
+
Save a *pre-rendered* monochromatic total spectrum for the nearest available mass.
|
|
1211
|
+
Returns the filepath to the saved spectrum.
|
|
1212
|
+
"""
|
|
1213
|
+
masses, names = discover_mass_folders(data_dir)
|
|
1214
|
+
if not masses:
|
|
1215
|
+
raise RuntimeError(f"No valid mass folders found under: {data_dir}")
|
|
1216
|
+
|
|
1217
|
+
snap = snap_to_available(mass_g, masses, tol=1e-12)
|
|
1218
|
+
if snap is None:
|
|
1219
|
+
idx = int(np.clip(np.searchsorted(masses, mass_g), 1, len(masses)-1))
|
|
1220
|
+
lo = masses[idx-1]; hi = masses[idx]
|
|
1221
|
+
snap = lo if abs(lo-mass_g) <= abs(hi-mass_g) else hi
|
|
1222
|
+
|
|
1223
|
+
i = np.where(np.isclose(masses, snap, rtol=0, atol=0))[0][0]
|
|
1224
|
+
sub = os.path.join(data_dir, names[i])
|
|
1225
|
+
S = load_spectra_components(sub)
|
|
1226
|
+
|
|
1227
|
+
E = S['energy_primary']
|
|
1228
|
+
tot = (S['direct_gamma_primary']
|
|
1229
|
+
+ np.interp(E, S['energy_secondary'], S['direct_gamma_secondary'], left=0, right=0)
|
|
1230
|
+
+ S['IFA_primary'] + np.interp(E, S['energy_secondary'], S['IFA_secondary'], left=0, right=0)
|
|
1231
|
+
+ S['FSR_primary'] + np.interp(E, S['energy_secondary'], S['FSR_secondary'], left=0, right=0))
|
|
1232
|
+
|
|
1233
|
+
os.makedirs(out_dir, exist_ok=True)
|
|
1234
|
+
fname = os.path.join(out_dir, f"{snap:.2e}_spectrum.txt")
|
|
1235
|
+
np.savetxt(fname, np.column_stack((E, tot)),
|
|
1236
|
+
header="E_gamma(MeV) Total (MeV^-1 s^-1)", fmt="%.10e")
|
|
1237
|
+
return fname
|
|
1238
|
+
|
|
1239
|
+
# ---------------------------
|
|
1240
|
+
# View previous spectra (handles 'b' correctly)
|
|
1241
|
+
# ---------------------------
|
|
1242
|
+
def view_previous_spectra():
|
|
1243
|
+
cat_map = {
|
|
1244
|
+
'2': (GAUSSIAN_METHOD, GAUSS_RESULTS_DIR),
|
|
1245
|
+
'3': (NON_GAUSSIAN_METHOD, NGAUSS_RESULTS_DIR),
|
|
1246
|
+
'4': (LOGNORMAL_METHOD, LOGN_RESULTS_DIR),
|
|
1247
|
+
}
|
|
1248
|
+
sels = []
|
|
1249
|
+
while True:
|
|
1250
|
+
print("\nView Previous — choose:")
|
|
1251
|
+
print(" 1: Monochromatic Distribution")
|
|
1252
|
+
print(" 2: Gaussian collapse")
|
|
1253
|
+
print(" 3: Non-Gaussian collapse")
|
|
1254
|
+
print(" 4: Log-Normal distribution")
|
|
1255
|
+
print(" 0: Plot all Queued | b: Back | q: Quit")
|
|
1256
|
+
try:
|
|
1257
|
+
c = user_input("Choice: ", allow_back=True, allow_exit=True).strip().lower()
|
|
1258
|
+
except BackRequested:
|
|
1259
|
+
return
|
|
1260
|
+
|
|
1261
|
+
if c == '1':
|
|
1262
|
+
try:
|
|
1263
|
+
masses = user_input("Enter mass(es) to queue (comma-separated): ",
|
|
1264
|
+
allow_back=True, allow_exit=True)
|
|
1265
|
+
except BackRequested:
|
|
1266
|
+
continue
|
|
1267
|
+
for mstr in masses.split(','):
|
|
1268
|
+
try:
|
|
1269
|
+
mval = float(mstr)
|
|
1270
|
+
path = generate_monochromatic_for_mass(mval, DATA_DIR, MONO_RESULTS_DIR)
|
|
1271
|
+
data = np.loadtxt(path, skiprows=1)
|
|
1272
|
+
E, tot = data[:,0], data[:,-1]
|
|
1273
|
+
sels.append((f"Mono {mval:.2e}", E, tot))
|
|
1274
|
+
print(f"Queued Mono {mval:.2e}")
|
|
1275
|
+
except Exception as e:
|
|
1276
|
+
warn(f"Skipping token '{mstr.strip()}': {e}")
|
|
1277
|
+
continue
|
|
1278
|
+
|
|
1279
|
+
if c in cat_map:
|
|
1280
|
+
lbl, base = cat_map[c]
|
|
1281
|
+
runs = list_saved_runs(base)
|
|
1282
|
+
if not runs:
|
|
1283
|
+
print("None saved yet.")
|
|
1284
|
+
continue
|
|
1285
|
+
print(f"Available in {lbl}:")
|
|
1286
|
+
for i, run in enumerate(runs, 1):
|
|
1287
|
+
print(f" {i}: {run}")
|
|
1288
|
+
try:
|
|
1289
|
+
picks = user_input("Enter indices to queue (comma-separated): ",
|
|
1290
|
+
allow_back=True, allow_exit=True)
|
|
1291
|
+
except BackRequested:
|
|
1292
|
+
continue
|
|
1293
|
+
for idx in picks.split(','):
|
|
1294
|
+
try:
|
|
1295
|
+
run = runs[int(idx)-1]
|
|
1296
|
+
fn = os.path.join(base, run, "distributed_spectrum.txt")
|
|
1297
|
+
data = np.loadtxt(fn, skiprows=1)
|
|
1298
|
+
E, tot = data[:,0], data[:,1]
|
|
1299
|
+
sels.append((f"{lbl} {run}", E, tot))
|
|
1300
|
+
print(f"Queued {lbl} {run}")
|
|
1301
|
+
except Exception as e:
|
|
1302
|
+
warn(f"Skipping selection '{idx.strip()}': {e}")
|
|
1303
|
+
continue
|
|
1304
|
+
|
|
1305
|
+
if c == '0':
|
|
1306
|
+
break
|
|
1307
|
+
|
|
1308
|
+
print("Invalid choice.")
|
|
1309
|
+
|
|
1310
|
+
if not sels:
|
|
1311
|
+
print("None queued.")
|
|
1312
|
+
return
|
|
1313
|
+
|
|
1314
|
+
peaks_dn = [sp.max() for _,_,sp in sels if sp.size]
|
|
1315
|
+
ymax1 = max(peaks_dn)*10 if peaks_dn else 1
|
|
1316
|
+
ymin1 = max(min(peaks_dn)/1e3, 1e-299) if peaks_dn else 1e-299
|
|
1317
|
+
|
|
1318
|
+
peaks_e2 = [(E**2 * sp).max() for _,E,sp in sels if sp.size]
|
|
1319
|
+
ymax2 = max(peaks_e2)*10 if peaks_e2 else 1
|
|
1320
|
+
ymin2 = max(min(peaks_e2)/1e3, 1e-299) if peaks_e2 else 1e-299
|
|
1321
|
+
|
|
1322
|
+
figA = plt.figure(figsize=(10,7))
|
|
1323
|
+
for name, E, sp in sels:
|
|
1324
|
+
m = (sp > 0)
|
|
1325
|
+
plt.plot(E[m], sp[m], lw=2, label=name)
|
|
1326
|
+
plt.xscale('log'); plt.yscale('log')
|
|
1327
|
+
plt.xlabel(r'$E_\gamma$ (MeV)'); plt.ylabel(r'$dN_\gamma/dE_\gamma$')
|
|
1328
|
+
plt.ylim(ymin1, ymax1); plt.xlim(0.5, 5e3)
|
|
1329
|
+
plt.grid(True, which='both', linestyle='--'); plt.legend()
|
|
1330
|
+
plt.title("Comparison: dN/dE"); plt.tight_layout(); plt.show(); plt.close(figA)
|
|
1331
|
+
|
|
1332
|
+
figB = plt.figure(figsize=(10,7))
|
|
1333
|
+
for name, E, sp in sels:
|
|
1334
|
+
m = (sp > 0)
|
|
1335
|
+
plt.plot(E[m], E[m]**2 * sp[m], lw=2, label=name)
|
|
1336
|
+
plt.xscale('log'); plt.yscale('log')
|
|
1337
|
+
plt.xlabel(r'$E_\gamma$ (MeV)'); plt.ylabel(r'$E^2\,dN_\gamma/dE_\gamma$')
|
|
1338
|
+
plt.ylim(ymin2, ymax2); plt.xlim(0.5, 5e3)
|
|
1339
|
+
plt.grid(True, which='both', linestyle='--'); plt.legend()
|
|
1340
|
+
plt.title("Comparison: $E^2$ dN/dE"); plt.tight_layout(); plt.show(); plt.close(figB)
|
|
1341
|
+
|
|
1342
|
+
# ---------------------------
|
|
1343
|
+
# UI
|
|
1344
|
+
# ---------------------------
|
|
1345
|
+
def show_start_screen():
|
|
1346
|
+
print("\n" + Fore.CYAN + Style.BRIGHT + "╔════════════════════════════════════════════════════════╗")
|
|
1347
|
+
print( Fore.CYAN + Style.BRIGHT + "║ GammaPBHPlotter: PBH Spectrum Tool ║")
|
|
1348
|
+
print( Fore.CYAN + Style.BRIGHT + "║ Version 1.1.0 ║")
|
|
1349
|
+
print( Fore.CYAN + Style.BRIGHT + "╚════════════════════════════════════════════════════════╝")
|
|
1350
|
+
print()
|
|
1351
|
+
print("Analyze and visualize Hawking radiation spectra of primordial black holes.\n")
|
|
1352
|
+
print(Fore.YELLOW + "📄 Associated Publication:" + Style.RESET_ALL)
|
|
1353
|
+
print(" John Carlini & Ilias Cholis — Particle Astrophysics Research\n")
|
|
1354
|
+
print("At any prompt: 'b' = back, 'q' = quit.")
|
|
1355
|
+
|
|
1356
|
+
def main():
|
|
1357
|
+
show_start_screen()
|
|
1358
|
+
while True:
|
|
1359
|
+
print("\nSelect:")
|
|
1360
|
+
print("1: Monochromatic spectra")
|
|
1361
|
+
print(f"2: Distributed spectra ({GAUSSIAN_METHOD})")
|
|
1362
|
+
print(f"3: Distributed spectra ({NON_GAUSSIAN_METHOD})")
|
|
1363
|
+
print(f"4: Distributed spectra ({LOGNORMAL_METHOD})")
|
|
1364
|
+
print("5: Distributed spectra (Custom mass PDF)")
|
|
1365
|
+
print("6: View previous spectra")
|
|
1366
|
+
print("0: Exit")
|
|
1367
|
+
choice = user_input("Choice: ", allow_back=False, allow_exit=True).strip().lower()
|
|
1368
|
+
if choice == '1':
|
|
1369
|
+
monochromatic_spectra()
|
|
1370
|
+
elif choice == '2':
|
|
1371
|
+
distributed_spectrum(GAUSSIAN_METHOD)
|
|
1372
|
+
elif choice == '3':
|
|
1373
|
+
distributed_spectrum(NON_GAUSSIAN_METHOD)
|
|
1374
|
+
elif choice == '4':
|
|
1375
|
+
distributed_spectrum(LOGNORMAL_METHOD)
|
|
1376
|
+
elif choice == '5':
|
|
1377
|
+
custom_equation_pdf_tool()
|
|
1378
|
+
elif choice == '6':
|
|
1379
|
+
view_previous_spectra()
|
|
1380
|
+
elif choice in ['0','exit','q']:
|
|
1381
|
+
print("Goodbye."); break
|
|
1382
|
+
else:
|
|
1383
|
+
print("Invalid; try again.")
|
|
1384
|
+
|
|
1385
|
+
if __name__ == '__main__':
|
|
1386
|
+
try:
|
|
1387
|
+
main()
|
|
1388
|
+
except BackRequested:
|
|
1389
|
+
pass
|
|
1390
|
+
except Exception:
|
|
1391
|
+
import traceback
|
|
1392
|
+
traceback.print_exc()
|
|
1393
|
+
try:
|
|
1394
|
+
input("\nAn error occurred. Press Enter to exit…")
|
|
1395
|
+
except Exception:
|
|
1396
|
+
pass
|