xarpes 0.6.3__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
xarpes/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.6.3"
1
+ __version__ = "0.6.4"
2
2
 
3
3
  from importlib import import_module
4
4
 
xarpes/selfenergies.py CHANGED
@@ -1175,47 +1175,52 @@ class SelfEnergy:
1175
1175
  return fig, spectrum, model, omega_range, alpha_select
1176
1176
 
1177
1177
 
1178
- def bayesian_loop(self, *, omega_min, omega_max, omega_num, omega_I,
1179
- omega_M, fermi_velocity=None,
1180
- fermi_wavevector=None, bare_mass=None, vary=(),
1181
- opt_method="Nelder-Mead", opt_options=None,
1182
- mem=None, loop=None, **mem_kwargs):
1178
+ def bayesian_loop(self, *, omega_min, omega_max, omega_num, omega_I, omega_M,
1179
+ fermi_velocity=None, fermi_wavevector=None, bare_mass=None,
1180
+ vary=(), opt_method="Nelder-Mead", opt_options=None,
1181
+ mem=None, loop=None, print_lines=None, **mem_kwargs):
1183
1182
  r"""
1184
1183
  Bayesian outer loop calling `_cost_function()`.
1185
1184
 
1186
- If `vary` is non-empty, runs a SciPy optimization over the selected
1187
- parameters in `vary`.
1188
-
1189
- Supported entries in `vary` depend on `self._class`:
1190
-
1191
- - Common: "fermi_wavevector", "impurity_magnitude", "lambda_el", "h_n"
1192
- - SpectralLinear: additionally "fermi_velocity"
1193
- - SpectralQuadratic: additionally "bare_mass"
1194
-
1195
- Notes
1196
- -----
1197
- **Convergence behaviour**
1198
-
1199
- By default, convergence is controlled by a *custom patience criterion*:
1200
- the optimization terminates when the absolute difference between the
1201
- current cost and the best cost seen so far is smaller than `tole` for
1202
- `converge_iters` consecutive iterations.
1203
-
1204
- To instead rely on SciPy's native convergence criteria (e.g. Nelder–Mead
1205
- `xatol` / `fatol`), disable the custom criterion by setting
1206
- `converge_iters=0` or `tole=None`. In that case, SciPy termination options
1207
- supplied via `opt_options` are used.
1208
-
1209
1185
  Parameters
1210
1186
  ----------
1211
- opt_options : dict, optional
1212
- Options passed directly to `scipy.optimize.minimize`. These are only
1213
- used for convergence if the custom criterion is disabled (see Notes).
1187
+ print_lines : int or None, optional
1188
+ If an integer, prints the first/last `print_lines` iteration lines.
1189
+ If None, prints all lines as usual.
1214
1190
  """
1191
+ return self._bayesian_loop_core(
1192
+ omega_min=omega_min,
1193
+ omega_max=omega_max,
1194
+ omega_num=omega_num,
1195
+ omega_I=omega_I,
1196
+ omega_M=omega_M,
1197
+ fermi_velocity=fermi_velocity,
1198
+ fermi_wavevector=fermi_wavevector,
1199
+ bare_mass=bare_mass,
1200
+ vary=vary,
1201
+ opt_method=opt_method,
1202
+ opt_options=opt_options,
1203
+ mem=mem,
1204
+ loop=loop,
1205
+ print_lines=print_lines,
1206
+ **mem_kwargs,
1207
+ )
1208
+
1209
+
1210
+ def _bayesian_loop_core(self, *, omega_min, omega_max, omega_num, omega_I,
1211
+ omega_M, fermi_velocity=None, fermi_wavevector=None,
1212
+ bare_mass=None, vary=(), opt_method="Nelder-Mead",
1213
+ opt_options=None, mem=None, loop=None,
1214
+ print_lines=None, **mem_kwargs):
1215
+ r"""
1216
+ Core implementation of `bayesian_loop` (prints controlled by print_lines).
1215
1217
 
1218
+ See `bayesian_loop` for API documentation.
1219
+ """
1216
1220
  fermi_velocity, fermi_wavevector, bare_mass = self._prepare_bare(
1217
- fermi_velocity, fermi_wavevector, bare_mass)
1218
-
1221
+ fermi_velocity, fermi_wavevector, bare_mass
1222
+ )
1223
+
1219
1224
  vary = tuple(vary) if vary is not None else ()
1220
1225
 
1221
1226
  allowed = {"fermi_wavevector", "impurity_magnitude", "lambda_el", "h_n"}
@@ -1226,16 +1231,17 @@ class SelfEnergy:
1226
1231
  allowed.add("bare_mass")
1227
1232
  else:
1228
1233
  raise NotImplementedError(
1229
- f"bayesian_loop does not support spectral class '{self._class}'."
1234
+ "bayesian_loop does not support spectral class "
1235
+ f"'{self._class}'."
1230
1236
  )
1231
-
1237
+
1232
1238
  unknown = set(vary).difference(allowed)
1233
1239
  if unknown:
1234
1240
  raise ValueError(
1235
1241
  f"Unsupported entries in vary: {sorted(unknown)}. "
1236
1242
  f"Allowed: {sorted(allowed)}."
1237
1243
  )
1238
-
1244
+
1239
1245
  omega_num = int(omega_num)
1240
1246
  if omega_num < 2:
1241
1247
  raise ValueError("omega_num must be an integer >= 2.")
@@ -1301,21 +1307,15 @@ class SelfEnergy:
1301
1307
  f"Initial h_n ({h_n0:g}) must be >= h_n_min ({h_n_min:g})."
1302
1308
  )
1303
1309
  if kF0 is None:
1304
- raise ValueError(
1305
- "bayesian_loop requires an initial fermi_wavevector."
1306
- )
1310
+ raise ValueError("bayesian_loop requires an initial fermi_wavevector.")
1307
1311
  if self._class == "SpectralLinear" and vF0 is None:
1308
- raise ValueError(
1309
- "bayesian_loop requires an initial fermi_velocity."
1310
- )
1312
+ raise ValueError("bayesian_loop requires an initial fermi_velocity.")
1311
1313
  if self._class == "SpectralQuadratic" and mb0 is None:
1312
1314
  raise ValueError("bayesian_loop requires an initial bare_mass.")
1313
-
1315
+
1314
1316
  from scipy.optimize import minimize
1317
+ from collections import deque
1315
1318
  from . import create_kernel_function, singular_value_decomposition
1316
-
1317
- ecut_left = float(mem_cfg["ecut_left"])
1318
- ecut_right = mem_cfg["ecut_right"]
1319
1319
 
1320
1320
  ecut_left_eV = ecut_left / KILO
1321
1321
  if ecut_right is None:
@@ -1346,10 +1346,14 @@ class SelfEnergy:
1346
1346
  kernel_used = np.concatenate((np.real(kernel_raw), -np.imag(kernel_raw)))
1347
1347
  elif parts == "real":
1348
1348
  kernel_used = np.real(kernel_raw)
1349
- else: # parts == "imag"
1349
+ else:
1350
1350
  kernel_used = -np.imag(kernel_raw)
1351
1351
 
1352
- V_Sigma, U, uvec0 = singular_value_decomposition(kernel_used, sigma_svd)
1352
+ # Try to silence SVD diagnostics during trimmed-output runs, if supported.
1353
+ svd_verbose = (print_lines is None)
1354
+ V_Sigma, U, uvec0 = singular_value_decomposition(
1355
+ kernel_used, sigma_svd
1356
+ )
1353
1357
 
1354
1358
  _precomp = {
1355
1359
  "omega_range": omega_range,
@@ -1361,43 +1365,47 @@ class SelfEnergy:
1361
1365
  "ecut_left": ecut_left,
1362
1366
  "ecut_right": ecut_right,
1363
1367
  }
1364
-
1368
+
1365
1369
  def _reflect_min(xi, p0, p_min, scale):
1366
1370
  """Map R -> [p_min, +inf) using linear reflection around p_min."""
1367
1371
  return p_min + np.abs((float(p0) - p_min) + scale * float(xi))
1368
1372
 
1369
1373
  def _unpack_params(x):
1370
1374
  params = {}
1371
-
1372
1375
  i = 0
1373
1376
  for name in vary:
1374
1377
  xi = float(x[i])
1375
1378
 
1376
1379
  if name == "fermi_velocity":
1377
1380
  if vF0 is None:
1378
- raise ValueError("Cannot vary fermi_velocity: no "
1379
- "initial vF provided.")
1381
+ raise ValueError(
1382
+ "Cannot vary fermi_velocity: no initial vF provided."
1383
+ )
1380
1384
  params["fermi_velocity"] = vF0 + scale_vF * xi
1381
1385
 
1382
1386
  elif name == "bare_mass":
1383
1387
  if mb0 is None:
1384
- raise ValueError("Cannot vary bare_mass: no initial "
1385
- "bare_mass provided.")
1388
+ raise ValueError(
1389
+ "Cannot vary bare_mass: no initial bare_mass provided."
1390
+ )
1386
1391
  params["bare_mass"] = mb0 + scale_mb * xi
1387
1392
 
1388
1393
  elif name == "fermi_wavevector":
1389
1394
  if kF0 is None:
1390
1395
  raise ValueError(
1391
- "Cannot vary fermi_wavevector: no initial kF "
1392
- "provided."
1396
+ "Cannot vary fermi_wavevector: no initial kF provided."
1393
1397
  )
1394
1398
  params["fermi_wavevector"] = kF0 + scale_kF * xi
1395
-
1399
+
1396
1400
  elif name == "impurity_magnitude":
1397
- params["impurity_magnitude"] = _reflect_min(xi, imp0, 0.0, scale_imp)
1401
+ params["impurity_magnitude"] = _reflect_min(
1402
+ xi, imp0, 0.0, scale_imp
1403
+ )
1398
1404
 
1399
1405
  elif name == "lambda_el":
1400
- params["lambda_el"] = _reflect_min(xi, lae0, 0.0, scale_lambda_el)
1406
+ params["lambda_el"] = _reflect_min(
1407
+ xi, lae0, 0.0, scale_lambda_el
1408
+ )
1401
1409
 
1402
1410
  elif name == "h_n":
1403
1411
  params["h_n"] = _reflect_min(xi, h_n0, h_n_min, scale_hn)
@@ -1435,14 +1443,14 @@ class SelfEnergy:
1435
1443
 
1436
1444
  return self._cost_function(
1437
1445
  optimisation_parameters=optimisation_parameters,
1438
- omega_min=omega_min, omega_max=omega_max, omega_num=omega_num,
1439
- omega_I=omega_I, omega_M=omega_M, mem_cfg=mem_cfg,
1440
- _precomp=_precomp
1441
- )
1442
-
1443
- last = {"cost": None, "spectrum": None, "model": None, "alpha": None}
1444
-
1445
- iter_counter = {"n": 0}
1446
+ omega_min=omega_min,
1447
+ omega_max=omega_max,
1448
+ omega_num=omega_num,
1449
+ omega_I=omega_I,
1450
+ omega_M=omega_M,
1451
+ mem_cfg=mem_cfg,
1452
+ _precomp=_precomp,
1453
+ )
1446
1454
 
1447
1455
  class ConvergenceException(RuntimeError):
1448
1456
  """Raised when optimisation has converged successfully."""
@@ -1461,7 +1469,6 @@ class SelfEnergy:
1461
1469
  if converge_iters < 0:
1462
1470
  raise ValueError("converge_iters must be >= 0.")
1463
1471
 
1464
- # Track best solution seen across all obj calls (not just last).
1465
1472
  best_global = {
1466
1473
  "x": None,
1467
1474
  "params": None,
@@ -1472,25 +1479,53 @@ class SelfEnergy:
1472
1479
  }
1473
1480
 
1474
1481
  history = []
1475
-
1476
- # Cache most recent evaluation so the callback can read a cost without
1477
- # forcing an extra objective evaluation.
1478
- last_x = {"x": None}
1479
1482
  last_cost = {"cost": None}
1480
1483
  initial_cost = {"cost": None}
1481
-
1482
1484
  iter_counter = {"n": 0}
1483
1485
 
1484
1486
  def _clean_params(params):
1485
1487
  """Convert NumPy scalar values to plain Python scalars."""
1486
1488
  out = {}
1487
1489
  for key, val in params.items():
1488
- if isinstance(val, np.generic):
1489
- out[key] = float(val)
1490
- else:
1491
- out[key] = val
1490
+ out[key] = float(val) if isinstance(val, np.generic) else val
1492
1491
  return out
1493
1492
 
1493
+ # --- Deterministic iteration printing (bulletproof) ---
1494
+ if print_lines is None:
1495
+ n_print = None
1496
+ else:
1497
+ n_print = int(print_lines)
1498
+ if n_print < 0:
1499
+ raise ValueError("print_lines must be >= 0 or None.")
1500
+
1501
+ tail_buf = deque(maxlen=0 if n_print is None else n_print)
1502
+ omitted = {"n": 0}
1503
+
1504
+ def _emit_iter_line(line):
1505
+ if n_print is None:
1506
+ print(line)
1507
+ return
1508
+ if n_print == 0:
1509
+ return
1510
+ if iter_counter["n"] <= n_print:
1511
+ print(line)
1512
+ else:
1513
+ tail_buf.append(line)
1514
+ omitted["n"] += 1
1515
+
1516
+ def _flush_tail(clear=True):
1517
+ if n_print is None or n_print == 0:
1518
+ return
1519
+
1520
+ if omitted["n"] > 0:
1521
+ print(f"... ({omitted['n']} lines omitted) ...")
1522
+ for line in tail_buf:
1523
+ print(line)
1524
+
1525
+ if clear:
1526
+ tail_buf.clear()
1527
+ omitted["n"] = 0
1528
+
1494
1529
  def obj(x):
1495
1530
  import warnings
1496
1531
 
@@ -1503,7 +1538,10 @@ class SelfEnergy:
1503
1538
  try:
1504
1539
  cost, spectrum, model, alpha_select = _evaluate_cost(params)
1505
1540
  except RuntimeWarning as exc:
1506
- raise ValueError(f"RuntimeWarning during cost eval: {exc}") from exc
1541
+ raise ValueError(
1542
+ f"RuntimeWarning during cost eval: {exc}"
1543
+ ) from exc
1544
+
1507
1545
  cost_f = float(cost)
1508
1546
 
1509
1547
  history.append(
@@ -1517,14 +1555,7 @@ class SelfEnergy:
1517
1555
  }
1518
1556
  )
1519
1557
 
1520
- last["cost"] = cost_f
1521
- last["spectrum"] = spectrum
1522
- last["model"] = model
1523
- last["alpha"] = float(alpha_select)
1524
-
1525
- last_x["x"] = np.array(x, dtype=float, copy=True)
1526
1558
  last_cost["cost"] = cost_f
1527
-
1528
1559
  if initial_cost["cost"] is None:
1529
1560
  initial_cost["cost"] = cost_f
1530
1561
 
@@ -1539,18 +1570,15 @@ class SelfEnergy:
1539
1570
  msg = [f"Iter {iter_counter['n']:4d} | cost = {cost: .4e}"]
1540
1571
  for key in sorted(params):
1541
1572
  msg.append(f"{key}={params[key]:.8g}")
1542
- print(" | ".join(msg))
1573
+ _emit_iter_line(" | ".join(msg))
1543
1574
 
1544
1575
  return cost_f
1545
-
1576
+
1546
1577
  class TerminationCallback:
1547
- def __init__(self, tole, converge_iters,
1548
- min_steps_for_regression):
1578
+ def __init__(self, tole, converge_iters, min_steps_for_regression):
1549
1579
  self.tole = None if tole is None else float(tole)
1550
1580
  self.converge_iters = int(converge_iters)
1551
- self.min_steps_for_regression = int(
1552
- min_steps_for_regression
1553
- )
1581
+ self.min_steps_for_regression = int(min_steps_for_regression)
1554
1582
  self.iter_count = 0
1555
1583
  self.call_count = 0
1556
1584
 
@@ -1566,7 +1594,7 @@ class SelfEnergy:
1566
1594
 
1567
1595
  best_cost = float(best_global["cost"])
1568
1596
  if np.isfinite(best_cost):
1569
- if abs(current - best_cost) < self.tole:
1597
+ if abs(float(current) - best_cost) < self.tole:
1570
1598
  self.iter_count += 1
1571
1599
  else:
1572
1600
  self.iter_count = 0
@@ -1574,11 +1602,10 @@ class SelfEnergy:
1574
1602
  if self.iter_count >= self.converge_iters:
1575
1603
  raise ConvergenceException(
1576
1604
  "Converged: |cost-best| < "
1577
- f"{self.tole:g} for "
1578
- f"{self.converge_iters} iterations."
1605
+ f"{self.tole:g} for {self.converge_iters} iterations."
1579
1606
  )
1580
1607
 
1581
- if self.call_count < self.min_steps_for_regression:
1608
+ if self.call_count < min_steps_for_regression:
1582
1609
  return
1583
1610
 
1584
1611
  init_cost = initial_cost["cost"]
@@ -1591,10 +1618,7 @@ class SelfEnergy:
1591
1618
  if not np.isfinite(best_cost):
1592
1619
  return
1593
1620
 
1594
- if (
1595
- abs(current - init_cost) * relative_best
1596
- < abs(current - best_cost)
1597
- ):
1621
+ if abs(current - init_cost) * relative_best < abs(current - best_cost):
1598
1622
  raise RegressionException(
1599
1623
  "Regression toward initial guess detected."
1600
1624
  )
@@ -1624,19 +1648,12 @@ class SelfEnergy:
1624
1648
  res = None
1625
1649
 
1626
1650
  while retry_count <= max_retries:
1627
- best = {
1628
- "x": None,
1629
- "params": None,
1630
- "cost": np.inf,
1631
- "spectrum": None,
1632
- "model": None,
1633
- "alpha": None,
1634
- }
1635
- last_x["x"] = None
1636
1651
  last_cost["cost"] = None
1637
1652
  initial_cost["cost"] = None
1638
1653
  iter_counter["n"] = 0
1639
1654
  history.clear()
1655
+ tail_buf.clear()
1656
+ omitted["n"] = 0
1640
1657
 
1641
1658
  callback = TerminationCallback(
1642
1659
  tole=tole,
@@ -1660,6 +1677,7 @@ class SelfEnergy:
1660
1677
  break
1661
1678
 
1662
1679
  except RegressionException as exc:
1680
+ _flush_tail()
1663
1681
  print(f"{exc} Rolling back {rollback_steps} steps.")
1664
1682
  retry_count += 1
1665
1683
 
@@ -1671,6 +1689,7 @@ class SelfEnergy:
1671
1689
  continue
1672
1690
 
1673
1691
  except ValueError as exc:
1692
+ _flush_tail()
1674
1693
  print(f"ValueError encountered: {exc}. Rolling back.")
1675
1694
  retry_count += 1
1676
1695
 
@@ -1681,6 +1700,8 @@ class SelfEnergy:
1681
1700
  x0 = np.array(history[-back]["x"], dtype=float, copy=True)
1682
1701
  continue
1683
1702
 
1703
+ _flush_tail()
1704
+
1684
1705
  if retry_count > max_retries:
1685
1706
  print("Max retries reached. Parameters may not be optimal.")
1686
1707
 
@@ -1702,14 +1723,14 @@ class SelfEnergy:
1702
1723
  print("Optimised parameters:")
1703
1724
  print(args)
1704
1725
 
1705
- # store inside class methods
1706
1726
  self._a2f_spectrum = spectrum
1707
1727
  self._a2f_model = model
1708
1728
  self._a2f_omega_range = omega_range
1709
1729
  self._a2f_alpha_select = alpha_select
1710
1730
  self._a2f_cost = cost
1711
1731
 
1712
- return spectrum, model, omega_range, alpha_select, cost, params
1732
+ return spectrum, model, omega_range, alpha_select, cost, params
1733
+
1713
1734
 
1714
1735
  @staticmethod
1715
1736
  def _merge_defaults(defaults, override_dict=None, override_kwargs=None):
@@ -2147,6 +2168,101 @@ class SelfEnergy:
2147
2168
  return spectrum_out, alpha_select, fit_curve, guess_curve, chi2kink_result
2148
2169
 
2149
2170
 
2171
+ @staticmethod
2172
+ def _trimmed_stdout(print_lines):
2173
+ """Optionally tee stdout+stderr and replace final output with head/tail."""
2174
+ from contextlib import contextmanager
2175
+
2176
+ @contextmanager
2177
+ def _ctx():
2178
+ if print_lines is None:
2179
+ yield
2180
+ return
2181
+
2182
+ n = int(print_lines)
2183
+ import sys
2184
+ import io
2185
+
2186
+ class _Tee:
2187
+ def __init__(self, out_stream, err_stream, nlines):
2188
+ self.out_stream = out_stream
2189
+ self.err_stream = err_stream
2190
+ self.nlines = int(nlines)
2191
+ self.buf = io.StringIO()
2192
+
2193
+ def write(self, text):
2194
+ # Jupyter sometimes calls write with empty strings
2195
+ if text is None:
2196
+ return
2197
+ if self.nlines > 0:
2198
+ self.out_stream.write(text)
2199
+ self.out_stream.flush()
2200
+ self.buf.write(text)
2201
+
2202
+ def flush(self):
2203
+ self.out_stream.flush()
2204
+
2205
+ def err_write(self, text):
2206
+ if text is None:
2207
+ return
2208
+ if self.nlines > 0:
2209
+ self.err_stream.write(text)
2210
+ self.err_stream.flush()
2211
+ self.buf.write(text)
2212
+
2213
+ def err_flush(self):
2214
+ self.err_stream.flush()
2215
+
2216
+ def cleaned(self):
2217
+ lines = self.buf.getvalue().splitlines()
2218
+ if self.nlines <= 0:
2219
+ return ""
2220
+
2221
+ if len(lines) <= 2 * self.nlines:
2222
+ return "\n".join(lines)
2223
+
2224
+ head = lines[:self.nlines]
2225
+ tail = lines[-self.nlines:]
2226
+ omitted = len(lines) - 2 * self.nlines
2227
+ mid = f"... ({omitted} lines omitted) ..."
2228
+ return "\n".join(head + [mid] + tail)
2229
+
2230
+ stdout_orig = sys.stdout
2231
+ stderr_orig = sys.stderr
2232
+
2233
+ tee = _Tee(stdout_orig, stderr_orig, n)
2234
+
2235
+ class _ErrProxy:
2236
+ def write(self, text):
2237
+ tee.err_write(text)
2238
+
2239
+ def flush(self):
2240
+ tee.err_flush()
2241
+
2242
+ try:
2243
+ sys.stdout = tee
2244
+ sys.stderr = _ErrProxy()
2245
+ yield
2246
+ finally:
2247
+ sys.stdout = stdout_orig
2248
+ sys.stderr = stderr_orig
2249
+
2250
+ try:
2251
+ from IPython.display import clear_output
2252
+ clear_output(wait=True)
2253
+ except Exception:
2254
+ pass
2255
+
2256
+ cleaned = tee.cleaned()
2257
+ if cleaned:
2258
+ print(cleaned)
2259
+ else:
2260
+ # Avoid a "blank cell" surprise.
2261
+ print("(output trimmed; nothing captured from stdout/stderr)")
2262
+
2263
+ return _ctx()
2264
+
2265
+
2150
2266
  @staticmethod
2151
2267
  def _el_el_self_energy(enel_range, k_BT, lambda_el, W, power):
2152
2268
  """Electron–electron contribution to the self-energy."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: xarpes
3
- Version: 0.6.3
3
+ Version: 0.6.4
4
4
  Summary: Extraction from angle resolved photoemission spectra
5
5
  Author: xARPES Developers
6
6
  Requires-Python: >=3.7.0
@@ -1,15 +1,15 @@
1
- xarpes/__init__.py,sha256=I3xq0d87dR2rGdO7N2rhLAcg1S_DDrPpArMy3AUTg70,756
1
+ xarpes/__init__.py,sha256=vXNdJ2VWNYVMLPB6MXlUskWnyEgsJOEsLMea57P7DXE,756
2
2
  xarpes/bandmap.py,sha256=1B5GbRXFdBPqnmeKPJW0mzlz-IUoLD-28rxrThpp4co,34664
3
3
  xarpes/constants.py,sha256=XOdgSzyrmHr5xocHZfmcFHHoVAa1G05a305hm3XOTtY,504
4
4
  xarpes/distributions.py,sha256=pC8V5MlZDNFdooMonFREEASiN5QodHiyKc2ehnxMKvQ,23498
5
5
  xarpes/functions.py,sha256=ibWoSa7_yXD9XsIEif0kbbbHidIozBivvaAEui9f64A,20675
6
6
  xarpes/mdcs.py,sha256=WRKSfGlRVKBssJp9FIHcAFsINVunPkmW9fBnFjqBHYI,42844
7
7
  xarpes/plotting.py,sha256=lGCReHcXhYLQXR5ns3EHFjCQjJ9Sc-HifV7n4BnWby4,5189
8
- xarpes/selfenergies.py,sha256=iP4WDPpcS5-3lyoREC0YaJZm3QXFdukvMD1_oH3I6mc,81141
8
+ xarpes/selfenergies.py,sha256=40mgFGZUWZIYywmyT1PTazP7elb4DKzcrrXzwtvkaMo,84534
9
9
  xarpes/settings_parameters.py,sha256=yOYvgEiDeDiLzzLkvysCTiVwqg6fKIkN48B-WSad728,1912
10
10
  xarpes/settings_plots.py,sha256=X-qteB2fIbBKOAcLMvMYDfQ8QdlUeA5xYQqF_Nyb4uA,1562
11
- xarpes-0.6.3.dist-info/entry_points.txt,sha256=917UR-cqFTMMI_vMqIbk7boYSuFX_zHwQlXKcj9vlCE,79
12
- xarpes-0.6.3.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
13
- xarpes-0.6.3.dist-info/WHEEL,sha256=jPMR_Dzkc4X4icQtmz81lnNY_kAsfog7ry7qoRvYLXw,81
14
- xarpes-0.6.3.dist-info/METADATA,sha256=zNYkNUXTutskjLjtaeM8BaXh7WcHc6jGE57UBEMhp1A,7154
15
- xarpes-0.6.3.dist-info/RECORD,,
11
+ xarpes-0.6.4.dist-info/entry_points.txt,sha256=917UR-cqFTMMI_vMqIbk7boYSuFX_zHwQlXKcj9vlCE,79
12
+ xarpes-0.6.4.dist-info/LICENSE,sha256=OXLcl0T2SZ8Pmy2_dmlvKuetivmyPd5m1q-Gyd-zaYY,35149
13
+ xarpes-0.6.4.dist-info/WHEEL,sha256=jPMR_Dzkc4X4icQtmz81lnNY_kAsfog7ry7qoRvYLXw,81
14
+ xarpes-0.6.4.dist-info/METADATA,sha256=erfAZH4Hn-z2-RiJThNjStBw_7UbFKGM9ExzxY_36GA,7154
15
+ xarpes-0.6.4.dist-info/RECORD,,
File without changes