xarpes 0.2.4__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
xarpes/functions.py CHANGED
@@ -1,88 +1,253 @@
1
- # Copyright (C) 2024 xARPES Developers
1
+ # Copyright (C) 2025 xARPES Developers
2
2
  # This program is free software under the terms of the GNU GPLv3 license.
3
3
 
4
4
  """Separate functions mostly used in conjunction with various classes."""
5
5
 
6
6
  import numpy as np
7
7
 
8
- def error_function(p, xdata, ydata, function, extra_args):
8
+ def resolve_param_name(params, label, pname):
9
+ """
10
+ Try to find the lmfit param key corresponding to this component `label`
11
+ and bare parameter name `pname` (e.g., 'amplitude', 'peak', 'broadening').
12
+ Works with common token separators.
13
+ """
14
+ import re
15
+ names = list(params.keys())
16
+ # Fast exact candidates
17
+ candidates = (
18
+ f"{pname}_{label}", f"{label}_{pname}",
19
+ f"{pname}:{label}", f"{label}:{pname}",
20
+ f"{label}.{pname}", f"{label}|{pname}",
21
+ f"{label}-{pname}", f"{pname}-{label}",
22
+ )
23
+ for c in candidates:
24
+ if c in params:
25
+ return c
26
+
27
+ # Regex fallback: label and pname as tokens in any order
28
+ esc_l = re.escape(str(label))
29
+ esc_p = re.escape(str(pname))
30
+ tok = r"[.:/_\-]" # common separators
31
+ pat = re.compile(rf"(^|{tok}){esc_l}({tok}|$).*({tok}){esc_p}({tok}|$)")
32
+ for n in names:
33
+ if pat.search(n):
34
+ return n
35
+
36
+ # Last resort: unique tail match on pname that also contains the label somewhere
37
+ tails = [n for n in names if n.endswith(pname) and str(label) in n]
38
+ if len(tails) == 1:
39
+ return tails[0]
40
+
41
+ # Give up
42
+ return None
43
+
44
+
45
+ def build_distributions(distributions, parameters):
46
+ r"""TBD
47
+ """
48
+ for dist in distributions:
49
+ if dist.class_name == 'Constant':
50
+ dist.offset = parameters['offset_' + dist.label].value
51
+ elif dist.class_name == 'Linear':
52
+ dist.offset = parameters['offset_' + dist.label].value
53
+ dist.slope = parameters['slope_' + dist.label].value
54
+ elif dist.class_name == 'SpectralLinear':
55
+ dist.amplitude = parameters['amplitude_' + dist.label].value
56
+ dist.peak = parameters['peak_' + dist.label].value
57
+ dist.broadening = parameters['broadening_' + dist.label].value
58
+ elif dist.class_name == 'SpectralQuadratic':
59
+ dist.amplitude = parameters['amplitude_' + dist.label].value
60
+ dist.peak = parameters['peak_' + dist.label].value
61
+ dist.broadening = parameters['broadening_' + dist.label].value
62
+ return distributions
63
+
64
+
65
+ def construct_parameters(distribution_list, matrix_args=None):
66
+ r"""TBD
67
+ """
68
+ from lmfit import Parameters
69
+
70
+ parameters = Parameters()
71
+
72
+ for dist in distribution_list:
73
+ if dist.class_name == 'Constant':
74
+ parameters.add(name='offset_' + dist.label, value=dist.offset)
75
+ elif dist.class_name == 'Linear':
76
+ parameters.add(name='offset_' + dist.label, value=dist.offset)
77
+ parameters.add(name='slope_' + dist.label, value=dist.slope)
78
+ elif dist.class_name == 'SpectralLinear':
79
+ parameters.add(name='amplitude_' + dist.label,
80
+ value=dist.amplitude, min=0)
81
+ parameters.add(name='peak_' + dist.label, value=dist.peak)
82
+ parameters.add(name='broadening_' + dist.label,
83
+ value=dist.broadening, min=0)
84
+ elif dist.class_name == 'SpectralQuadratic':
85
+ parameters.add(name='amplitude_' + dist.label,
86
+ value=dist.amplitude, min=0)
87
+ parameters.add(name='peak_' + dist.label, value=dist.peak)
88
+ parameters.add(name='broadening_' + dist.label,
89
+ value=dist.broadening, min=0)
90
+
91
+ if matrix_args is not None:
92
+ element_names = list()
93
+ for key, value in matrix_args.items():
94
+ parameters.add(name=key, value=value)
95
+ element_names.append(key)
96
+ return parameters, element_names
97
+ else:
98
+ return parameters
99
+
100
+
101
+ def residual(parameters, xdata, ydata, angle_resolution, new_distributions,
102
+ kinetic_energy, hnuminPhi, matrix_element=None,
103
+ element_names=None):
104
+ r"""
105
+ """
106
+ from scipy.ndimage import gaussian_filter
107
+ from xarpes.distributions import Dispersion
108
+
109
+ if matrix_element is not None:
110
+ matrix_parameters = {}
111
+ for name in element_names:
112
+ if name in parameters:
113
+ matrix_parameters[name] = parameters[name].value
114
+
115
+ new_distributions = build_distributions(new_distributions, parameters)
116
+
117
+ extend, step, numb = extend_function(xdata, angle_resolution)
118
+
119
+ model = np.zeros_like(extend)
120
+
121
+ for dist in new_distributions:
122
+ if getattr(dist, 'class_name', type(dist).__name__) == \
123
+ 'SpectralQuadratic':
124
+ part = dist.evaluate(extend, kinetic_energy, hnuminPhi)
125
+ else:
126
+ part = dist.evaluate(extend)
127
+
128
+ if (matrix_element is not None) and isinstance(dist, Dispersion):
129
+ part *= matrix_element(extend, **matrix_parameters)
130
+
131
+ model += part
132
+
133
+ model = gaussian_filter(model, sigma=step)[numb:-numb if numb else None]
134
+ return model - ydata
135
+
136
+
137
+ def extend_function(abscissa_range, abscissa_resolution):
138
+ r"""TBD
139
+ """
140
+ from .constants import FWHM2STD
141
+ from . import settings_parameters as xprs
142
+ step_size = np.abs(abscissa_range[1] - abscissa_range[0])
143
+ step = abscissa_resolution / (step_size * FWHM2STD)
144
+ numb = int(xprs.sigma_extend * step)
145
+ extend = np.linspace(abscissa_range[0] - numb * step_size,
146
+ abscissa_range[-1] + numb * step_size,
147
+ len(abscissa_range) + 2 * numb)
148
+ return extend, step, numb
149
+
150
+
151
+ def error_function(p, xdata, ydata, function, resolution, yerr, extra_args):
9
152
  r"""The error function used inside the fit_leastsq function.
10
153
 
11
154
  Parameters
12
155
  ----------
13
156
  p : ndarray
14
- Array of parameters during the optimization
157
+ Array of parameters during the optimization.
15
158
  xdata : ndarray
16
- Array of abscissa values the function is evaluated on
159
+ Abscissa values the function is evaluated on.
17
160
  ydata : ndarray
18
- Outcomes on ordinate the evaluated function is compared to
19
- function : function
20
- Function or class with call method to be evaluated
21
- extra_args :
22
- Arguments provided to function that should not be optimized
161
+ Measured values to compare to.
162
+ function : callable
163
+ Function or class with __call__ method to evaluate.
164
+ resolution : float or None
165
+ Convolution resolution (sigma), if applicable.
166
+ yerr : ndarray
167
+ Standard deviations of ydata.
168
+ extra_args : tuple
169
+ Additional arguments passed to function.
23
170
 
24
171
  Returns
25
172
  -------
26
- residual :
27
- Residual between evaluated function and ydata
173
+ residual : ndarray
174
+ Normalized residuals between model and ydata.
28
175
  """
29
- residual = function(xdata, *p, extra_args) - ydata
176
+ from scipy.ndimage import gaussian_filter
177
+
178
+ if resolution:
179
+ extend, step, numb = extend_function(xdata, resolution)
180
+ model = gaussian_filter(function(extend, *p, *extra_args),
181
+ sigma=step)
182
+ model = model[numb:-numb if numb else None]
183
+ else:
184
+ model = function(xdata, *p, *extra_args)
185
+
186
+ residual = (model - ydata) / yerr
30
187
  return residual
31
188
 
32
- def fit_leastsq(p0, xdata, ydata, function, extra_args):
33
- r"""Wrapper arround scipy.optimize.leastsq.
189
+
190
+ def fit_leastsq(p0, xdata, ydata, function, resolution=None,
191
+ yerr=None, *extra_args):
192
+ r"""Wrapper around scipy.optimize.leastsq.
34
193
 
35
194
  Parameters
36
195
  ----------
37
196
  p0 : ndarray
38
- Initial guess for parameters to be optimized
197
+ Initial guess for parameters to be optimized.
39
198
  xdata : ndarray
40
- Array of abscissa values the function is evaluated on
199
+ Abscissa values the function is evaluated on.
41
200
  ydata : ndarray
42
- Outcomes on ordinate the evaluated function is compared to
43
- function : function
44
- Function or class with call method to be evaluated
45
- extra_args :
46
- Arguments provided to function that should not be optimized
201
+ Measured values to compare to.
202
+ function : callable
203
+ Function or class with __call__ method to evaluate.
204
+ resolution : float or None, optional
205
+ Convolution resolution (sigma), if applicable.
206
+ yerr : ndarray or None, optional
207
+ Standard deviations of ydata. Defaults to ones if None.
208
+ extra_args : tuple
209
+ Additional arguments passed to the function.
47
210
 
48
211
  Returns
49
212
  -------
50
213
  pfit_leastsq : ndarray
51
- Array containing the optimized parameters
52
- perr_leastsq : ndarray
53
- Covariance matrix of the optimized parameters
214
+ Optimized parameters.
215
+ pcov : ndarray or float
216
+ Scaled covariance matrix of the optimized parameters.
217
+ If the covariance could not be estimated, returns np.inf.
54
218
  """
55
219
  from scipy.optimize import leastsq
56
220
 
221
+ if yerr is None:
222
+ yerr = np.ones_like(ydata)
223
+
57
224
  pfit, pcov, infodict, errmsg, success = leastsq(
58
- error_function, p0, args=(xdata, ydata, function, extra_args),
59
- full_output=1)
225
+ error_function,
226
+ p0,
227
+ args=(xdata, ydata, function, resolution, yerr, extra_args),
228
+ full_output=1
229
+ )
60
230
 
61
231
  if (len(ydata) > len(p0)) and pcov is not None:
62
- s_sq = (error_function(pfit, xdata, ydata, function,
63
- extra_args) ** 2).sum() / (len(ydata) - len(p0))
64
- pcov = pcov * s_sq
232
+ s_sq = (
233
+ error_function(pfit, xdata, ydata, function, resolution,
234
+ yerr, extra_args) ** 2
235
+ ).sum() / (len(ydata) - len(p0))
236
+ pcov *= s_sq
65
237
  else:
66
238
  pcov = np.inf
67
239
 
68
- error = []
69
- for i in range(len(pfit)):
70
- try:
71
- error.append(np.absolute(pcov[i][i]) ** 0.5)
72
- except:
73
- error.append(0.00)
74
- pfit_leastsq = pfit
75
- perr_leastsq = np.array(error)
240
+ return pfit, pcov
76
241
 
77
- return pfit_leastsq, perr_leastsq
78
242
 
79
243
  def download_examples():
80
- """Downloads the examples folder from the xARPES code only if it does not
81
- already exist. Prints executed steps and a final cleanup/failure message.
244
+ """Downloads the examples folder from the main xARPES repository only if it
245
+ does not already exist in the current directory. Prints executed steps and a
246
+ final cleanup/failure message.
82
247
 
83
248
  Returns
84
249
  -------
85
- 0, 1 : int
250
+ 0 or 1 : int
86
251
  Returns 0 if the execution succeeds, 1 if it fails.
87
252
  """
88
253
  import requests
@@ -90,48 +255,377 @@ def download_examples():
90
255
  import os
91
256
  import shutil
92
257
  import io
258
+ import jupytext
259
+ import tempfile
260
+ import re
93
261
 
94
- repo_url = 'https://github.com/xARPES/xARPES_examples'
95
- output_dir = '.' # Directory from which the function is called
262
+ # Main xARPES repo (examples live under /examples there)
263
+ repo_url = "https://github.com/xARPES/xARPES"
264
+ output_dir = "." # Directory from which the function is called
96
265
 
97
- # Check if 'examples' directory already exists
98
- final_examples_path = os.path.join(output_dir, 'examples')
266
+ # Target 'examples' directory in the user's current location
267
+ final_examples_path = os.path.join(output_dir, "examples")
99
268
  if os.path.exists(final_examples_path):
100
- print("Warning: 'examples' folder already exists. \
101
- No download will be performed.")
102
- return 1 # Exit the function if 'examples' directory exists
103
-
104
- # Proceed with download if 'examples' directory does not exist
105
- repo_parts = repo_url.replace('https://github.com/', '').rstrip('/')
106
- zip_url = f'https://github.com/{repo_parts}/archive/refs/heads/main.zip'
107
-
108
- # Make the HTTP request to download the zip file
109
- print(f'Downloading {zip_url}')
110
- response = requests.get(zip_url)
111
- if response.status_code == 200:
112
- zip_file_bytes = io.BytesIO(response.content)
113
-
114
- with zipfile.ZipFile(zip_file_bytes, 'r') as zip_ref:
115
- zip_ref.extractall(output_dir)
116
-
117
- # Path to the extracted main folder
118
- main_folder_path = os.path.join(output_dir,
119
- repo_parts.split('/')[-1]
120
- + '-main')
121
- examples_path = os.path.join(main_folder_path, 'examples')
122
-
123
- # Move the 'examples' directory to the target location
124
- if os.path.exists(examples_path):
125
- shutil.move(examples_path, final_examples_path)
126
- print(f"'examples' subdirectory moved to {final_examples_path}")
269
+ print("Warning: 'examples' folder already exists. "
270
+ "No download will be performed.")
271
+ return 1 # Exit the function if 'examples' directory exists
272
+
273
+ # --- Determine version from xarpes.__init__.__version__ -----------------
274
+ try:
275
+ # Import inside the function, avoiding circular imports at import time
276
+ import xarpes as _xarpes
277
+ raw_version = getattr(_xarpes, "__version__", None)
278
+ except Exception as exc:
279
+ print(f"Warning: could not import xarpes to determine version: {exc}")
280
+ raw_version = None
281
+
282
+ tag_version = None
283
+ if raw_version is not None:
284
+ raw_version = str(raw_version)
285
+ # Strip dev/local suffixes so that '0.3.3.dev1' or '0.3.3+0.gHASH'
286
+ # maps to the tag 'v0.3.3'. If you use plain '0.3.3' already, this is
287
+ # a no-op.
288
+ m = re.match(r"(\d+\.\d+\.\d+)", raw_version)
289
+ if m:
290
+ tag_version = m.group(1)
127
291
  else:
128
- print("'examples' subdirectory not found in the repository.")
292
+ tag_version = raw_version
293
+
294
+ print(f"Determined xARPES version from __init__: {raw_version} "
295
+ f"(using tag version '{tag_version}').")
296
+ else:
297
+ print("Warning: xarpes.__version__ is not defined; will skip "
298
+ "tag-based download and try the main branch only.")
129
299
 
130
- # Remove the rest of the extracted content
131
- shutil.rmtree(main_folder_path)
132
- print(f'Cleaned up temporary files in {main_folder_path}')
133
- return 0
300
+ # --- Build refs and use for–else to try them in order -------------------
301
+ repo_parts = repo_url.replace("https://github.com/", "").rstrip("/")
302
+
303
+ refs_to_try = []
304
+ if tag_version is not None:
305
+ refs_to_try.append(f"tags/v{tag_version}") # version-matched examples
306
+ refs_to_try.append("heads/main") # fallback: latest examples
307
+
308
+ response = None
309
+ for ref in refs_to_try:
310
+ zip_url = f"https://github.com/{repo_parts}/archive/refs/{ref}.zip"
311
+ print(f"Attempting to download examples from '{ref}':\n {zip_url}")
312
+ response = requests.get(zip_url)
313
+
314
+ if response.status_code == 200:
315
+ if ref.startswith("tags/"):
316
+ print(f"Successfully downloaded examples from tagged release "
317
+ f"'v{tag_version}'.")
318
+ else:
319
+ print("Tagged release not available; using latest examples "
320
+ "from the 'main' branch instead.")
321
+ break
322
+ else:
323
+ print("Failed to download from this ref. HTTP status code: "
324
+ f"{response.status_code}")
134
325
  else:
135
- print(f'Failed to download the repository. Status code: \
136
- {response.status_code}')
326
+ # for–else: only executed if we never hit 'break'
327
+ print("Error: could not download examples from any ref "
328
+ f"(tried: {', '.join(refs_to_try)}).")
137
329
  return 1
330
+
331
+ # At this point, 'response' holds a successful download
332
+ zip_file_bytes = io.BytesIO(response.content)
333
+
334
+ # --- Extract into a temporary directory to avoid polluting CWD ----------
335
+ with tempfile.TemporaryDirectory() as tmpdir:
336
+ with zipfile.ZipFile(zip_file_bytes, "r") as zip_ref:
337
+ zip_ref.extractall(tmpdir)
338
+ # First member gives us the top-level directory in the archive,
339
+ # typically something like 'xARPES-0.3.3/' or 'xARPES-main/'.
340
+ first_member = zip_ref.namelist()[0]
341
+
342
+ top_level_dir = first_member.split("/")[0]
343
+ main_folder_path = os.path.join(tmpdir, top_level_dir)
344
+ examples_path = os.path.join(main_folder_path, "examples")
345
+
346
+ if not os.path.exists(examples_path):
347
+ print("Error: downloaded archive does not contain an 'examples' "
348
+ "directory.")
349
+ return 1
350
+
351
+ # Move the 'examples' directory to the target location in the CWD
352
+ shutil.move(examples_path, final_examples_path)
353
+ print(f"'examples' subdirectory moved to {final_examples_path}")
354
+
355
+ # Convert all .Rmd files in the examples directory to .ipynb
356
+ # and delete the .Rmd files
357
+ for dirpath, dirnames, filenames in os.walk(final_examples_path):
358
+ for filename in filenames:
359
+ if filename.endswith(".Rmd"):
360
+ full_path = os.path.join(dirpath, filename)
361
+ jupytext.write(
362
+ jupytext.read(full_path),
363
+ full_path.replace(".Rmd", ".ipynb")
364
+ )
365
+ os.remove(full_path) # Deletes .Rmd file afterwards
366
+ print(f"Converted and deleted {full_path}")
367
+
368
+ # Temporary directory is cleaned up automatically
369
+ print("Cleaned up temporary files.")
370
+ return 0
371
+
372
+
373
+ def set_script_dir():
374
+ r"""This function sets the directory such that the xARPES code can be
375
+ executed either inside IPython environments or as .py scripts from
376
+ arbitrary locations.
377
+ """
378
+ import os
379
+ import inspect
380
+ try:
381
+ # This block checks if the script is running in an IPython environment
382
+ cfg = get_ipython().config
383
+ script_dir = os.getcwd()
384
+ except NameError:
385
+ # If not in IPython, get the caller's file location
386
+ frame = inspect.stack()[1]
387
+ module = inspect.getmodule(frame[0])
388
+ script_dir = os.path.dirname(os.path.abspath(module.__file__))
389
+ except:
390
+ # If __file__ isn't defined, fall back to current working directory
391
+ script_dir = os.getcwd()
392
+
393
+ return script_dir
394
+
395
+
396
+ def MEM_core(dvec, model_in, uvec, mu, alpha, wvec, V_Sigma, U,
397
+ t_criterion, iter_max):
398
+ r"""
399
+ Implementation of Bryan's algorithm (not to be confused with Bryan's
400
+ 'method' for determining the Lagrange multiplier alpha. For details, see
401
+ Eur. Biophys. J. 18, 165 (1990).
402
+ """
403
+ import numpy as np
404
+ import warnings
405
+
406
+ spectrum_in = model_in * np.exp(U @ uvec) # Eq. 9
407
+ alphamu = alpha + mu
408
+
409
+ converged = False
410
+ iter_count = 0
411
+ while not converged and iter_count < iter_max:
412
+
413
+ T = V_Sigma @ (U.T @ spectrum_in) # Below Eq. 7
414
+ gvec = V_Sigma.T @ (wvec * (T - dvec)) # Eq. 10
415
+ M = V_Sigma.T @ (wvec[:, None] * V_Sigma) # Above Eq. 11
416
+ K = U.T @ (spectrum_in[:, None] * U) # Above Eq. 11
417
+
418
+ xi, P = np.linalg.eigh(K) # Eq. 13
419
+ sqrt_xi = np.sqrt(xi)
420
+ P_sqrt_xi = P * sqrt_xi[None, :]
421
+ A = P_sqrt_xi.T @ (M @ P_sqrt_xi) # Between Eqs. 13 and 14
422
+ Lambda, R = np.linalg.eigh(A) # Eq. 14
423
+ Y_inv = R.T @ (sqrt_xi[:, None] * P.T) # Below Eq. 15
424
+
425
+ # From Eq. 16:
426
+ Y_inv_du = -(Y_inv @ (alpha * uvec + gvec)) / (alphamu + Lambda)
427
+ d_uvec = (
428
+ -alpha * uvec - gvec - M @ (Y_inv.T @ Y_inv_du)
429
+ ) / alphamu # Eq. 20
430
+
431
+ uvec += d_uvec
432
+ spectrum_in = model_in * np.exp(U @ uvec) # Eq. 9
433
+
434
+ # Convergence block: Section 2.3
435
+ alpha_K_u = alpha * (K @ uvec) # Skipping the minus sign twice
436
+ K_g = K @ gvec
437
+ tcon = (
438
+ 2 * np.linalg.norm(alpha_K_u + K_g)**2
439
+ / (np.linalg.norm(alpha_K_u) + np.linalg.norm(K_g))**2
440
+ )
441
+ converged = (tcon < t_criterion)
442
+
443
+ iter_count += 1
444
+
445
+ if not converged:
446
+ with warnings.catch_warnings():
447
+ warnings.simplefilter("always", RuntimeWarning)
448
+ warnings.warn(
449
+ f"MEM_core did not converge within iter_max={iter_max} "
450
+ f"(performed {iter_count} iterations).",
451
+ category=RuntimeWarning,
452
+ stacklevel=2,
453
+ )
454
+
455
+ return spectrum_in, uvec
456
+
457
+
458
+ def bose_einstein(omega, k_BT):
459
+ """Bose-Einstein distribution n_B(omega) for k_BT > 0 and omega >= 0."""
460
+ x_over = np.log(np.finfo(float).max) # ~709.78 for float64
461
+
462
+ x = omega / k_BT
463
+
464
+ out = np.empty_like(omega, dtype=float)
465
+
466
+ momega0 = (omega == 0)
467
+ if np.any(momega0):
468
+ out[momega0] = np.inf
469
+
470
+ mpos_big = (x > x_over) & (omega != 0)
471
+ if np.any(mpos_big):
472
+ out[mpos_big] = 0.0
473
+
474
+ mnorm = (omega != 0) & ~mpos_big
475
+ if np.any(mnorm):
476
+ out[mnorm] = 1.0 / np.expm1(x[mnorm])
477
+
478
+ return out
479
+
480
+
481
+ def fermi(omega, k_BT):
482
+ """Fermi-Dirac distribution f(omega) for k_BT > 0 and omega >= 0.
483
+ Could potentially be made a core block of the FermiDirac distribution."""
484
+ x_over = np.log(np.finfo(float).max) # ~709.78 for float64
485
+
486
+ x = omega / k_BT
487
+ out = np.empty_like(omega, dtype=float)
488
+
489
+ mover = x > x_over
490
+ out[mover] = 0.0
491
+
492
+ mnorm = ~mover
493
+ y = np.exp(-x[mnorm])
494
+ out[mnorm] = y / (1.0 + y)
495
+
496
+ return out
497
+
498
+
499
+ def create_kernel_function(enel, omega, k_BT):
500
+ r"""Kernel function. Eq. 17 from https://arxiv.org/abs/2508.13845.
501
+
502
+ Returns
503
+ -------
504
+ K : ndarray, complex
505
+ Shape (enel.size, omega.size) if enel and omega are 1D.
506
+ """
507
+ from scipy.special import digamma
508
+
509
+ enel = enel[:, None] # (Ne, 1)
510
+ omega = omega[None, :] # (1, Nw)
511
+
512
+ denom = 2.0 * np.pi * k_BT
513
+
514
+ K = (digamma(0.5 - 1j * (enel - omega) / denom)
515
+ - digamma(0.5 - 1j * (enel + omega) / denom)
516
+ - 2j * np.pi * (bose_einstein(omega, k_BT) + 0.5))
517
+
518
+ return K
519
+
520
+
521
+ def singular_value_decomposition(kernel, sigma_svd):
522
+ r"""
523
+ Some papers use kernel = U Sigma V^T; we follow Bryan's algorithm.
524
+ """
525
+ V, Sigma, U_transpose = np.linalg.svd(kernel)
526
+ U = U_transpose.T
527
+ Sigma = Sigma[Sigma > sigma_svd]
528
+ s_reduced = Sigma.size
529
+ V = V[:, :s_reduced]
530
+ U = U[:, :s_reduced]
531
+ V_Sigma = V * Sigma[None, :]
532
+
533
+ uvec = np.zeros(s_reduced)
534
+
535
+ print('Dimensionality has been reduced from a matrix of rank ' + str(min(kernel.shape)) +
536
+ ' to ' + str(int(s_reduced)) + ' in the singular space.')
537
+
538
+ return V_Sigma, U, uvec
539
+
540
+
541
+ def create_model_function(omega, omega_I, omega_M, omega_S, h_n):
542
+ r"""Piecewise model m_n(omega) defined on the omega grid.
543
+
544
+ Implements the piecewise definition in the figure, interpreting
545
+ omega_min/max as omega.min()/omega.max().
546
+
547
+ Parameters
548
+ ----------
549
+ omega : ndarray
550
+ Frequency grid (assumed sorted, but only min/max are used).
551
+ omega_I : float
552
+ ω_n^I
553
+ omega_M : float
554
+ ω_n^M
555
+ omega_S : float
556
+ ω_n^S
557
+ h_n : float
558
+ h_n in the prefactor m_n(omega) = 2 h_n * ( ... ).
559
+
560
+ Returns
561
+ -------
562
+ model : ndarray
563
+ m_n(omega) evaluated on the omega grid.
564
+ """
565
+ w_min = omega.min()
566
+ w_max = omega.max()
567
+
568
+ if omega_I <= 0:
569
+ raise ValueError("omega_I must be > 0.")
570
+ denom = w_max + omega_S - omega_M
571
+ if denom == 0:
572
+ raise ValueError("omega_max + omega_S - omega_M must be nonzero.")
573
+
574
+ w_I_half = 0.5 * omega_I
575
+ w_mid = 0.5 * (w_max + omega_S + omega_M)
576
+
577
+ domains = np.empty_like(omega)
578
+
579
+ m1 = (omega >= w_min) & (omega < w_I_half)
580
+ domains[m1] = (omega[m1] / omega_I) ** 2
581
+
582
+ m2 = (omega >= w_I_half) & (omega < omega_I)
583
+ domains[m2] = 0.5 - (omega[m2] / omega_I - 1.0) ** 2
584
+
585
+ m3 = (omega >= omega_I) & (omega < omega_M)
586
+ domains[m3] = 0.5
587
+
588
+ m4 = (omega >= omega_M) & (omega < w_mid)
589
+ domains[m4] = 0.5 - ((omega[m4] - omega_M) / denom) ** 2
590
+
591
+ m5 = (omega >= w_mid) & (omega <= w_max)
592
+ domains[m5] = ((omega[m5] - omega_M) / denom - 1.0) ** 2
593
+
594
+ return 2.0 * h_n * domains
595
+
596
+
597
+ def chi2kink_logistic(x, a, b, c, d):
598
+ """Four-parameter logistic (scaled sigmoid), evaluated stably.
599
+
600
+ Parameters
601
+ ----------
602
+ x : array_like
603
+ Input values.
604
+ a : float
605
+ Lower asymptote.
606
+ b : float
607
+ Amplitude (upper - lower).
608
+ c : float
609
+ Midpoint (inflection point).
610
+ d : float
611
+ Slope parameter (steepness).
612
+
613
+ Returns
614
+ -------
615
+ phi : ndarray
616
+ Logistic curve evaluated at x.
617
+ """
618
+ z = d * (x - c)
619
+
620
+ phi = np.empty_like(z, dtype=float)
621
+
622
+ mpos = z >= 0
623
+ if np.any(mpos):
624
+ phi[mpos] = a + b / (1.0 + np.exp(-z[mpos]))
625
+
626
+ mneg = ~mpos
627
+ if np.any(mneg):
628
+ expz = np.exp(z[mneg])
629
+ phi[mneg] = a + b * expz / (1.0 + expz)
630
+
631
+ return phi