MatSciKit-COSMOTIM 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. MatSciKit_COSMOTIM/__init__.py +19 -0
  2. MatSciKit_COSMOTIM/constants.py +25 -0
  3. MatSciKit_COSMOTIM/fitting/__init__.py +13 -0
  4. MatSciKit_COSMOTIM/fitting/linear.py +67 -0
  5. MatSciKit_COSMOTIM/heat_capacity/__init__.py +15 -0
  6. MatSciKit_COSMOTIM/heat_capacity/debye.py +61 -0
  7. MatSciKit_COSMOTIM/heat_capacity/dulong_petit.py +38 -0
  8. MatSciKit_COSMOTIM/heat_capacity/low_t_fitting.py +154 -0
  9. MatSciKit_COSMOTIM/io/__init__.py +36 -0
  10. MatSciKit_COSMOTIM/io/dsc.py +59 -0
  11. MatSciKit_COSMOTIM/io/lfa.py +49 -0
  12. MatSciKit_COSMOTIM/io/lfa_excel.py +116 -0
  13. MatSciKit_COSMOTIM/io/ppms_hc.py +76 -0
  14. MatSciKit_COSMOTIM/io/ppms_tto.py +81 -0
  15. MatSciKit_COSMOTIM/io/readers.py +387 -0
  16. MatSciKit_COSMOTIM/py.typed +0 -0
  17. MatSciKit_COSMOTIM/structure/__init__.py +17 -0
  18. MatSciKit_COSMOTIM/structure/cif_reader.py +426 -0
  19. MatSciKit_COSMOTIM/structure/material.py +730 -0
  20. MatSciKit_COSMOTIM/structure/xrd_plot.py +184 -0
  21. MatSciKit_COSMOTIM/structure/xrd_reader.py +88 -0
  22. MatSciKit_COSMOTIM/thermal_conductivity/__init__.py +25 -0
  23. MatSciKit_COSMOTIM/thermal_conductivity/cahill.py +148 -0
  24. MatSciKit_COSMOTIM/thermal_conductivity/gruneisen.py +64 -0
  25. MatSciKit_COSMOTIM/thermal_conductivity/lfa_dsc.py +101 -0
  26. MatSciKit_COSMOTIM/thermal_conductivity/mean_free_path.py +99 -0
  27. MatSciKit_COSMOTIM/thermal_conductivity/pipeline.py +262 -0
  28. MatSciKit_COSMOTIM/thermal_conductivity/porosity_correction.py +57 -0
  29. MatSciKit_COSMOTIM/tui/__init__.py +3 -0
  30. MatSciKit_COSMOTIM/tui/__main__.py +8 -0
  31. MatSciKit_COSMOTIM/tui/app.py +879 -0
  32. MatSciKit_COSMOTIM/visualization/__init__.py +15 -0
  33. MatSciKit_COSMOTIM/visualization/inset_style.py +99 -0
  34. MatSciKit_COSMOTIM/visualization/journal_style.py +106 -0
  35. MatSciKit_COSMOTIM/visualization/thermal_conductivity.py +252 -0
  36. matscikit_cosmotim-0.2.2.dist-info/METADATA +299 -0
  37. matscikit_cosmotim-0.2.2.dist-info/RECORD +41 -0
  38. matscikit_cosmotim-0.2.2.dist-info/WHEEL +5 -0
  39. matscikit_cosmotim-0.2.2.dist-info/entry_points.txt +2 -0
  40. matscikit_cosmotim-0.2.2.dist-info/licenses/LICENSE +21 -0
  41. matscikit_cosmotim-0.2.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,19 @@
1
+ """
2
+ MatSciKit — Materials Science Toolkit.
3
+
4
+ A Python package for materials science research data processing,
5
+ with a focus on thermal transport analysis.
6
+
7
+ Subpackages
8
+ -----------
9
+ io : Instrument-specific data readers
10
+ structure : Crystallography and XRD analysis
11
+ heat_capacity : Heat capacity analysis (Pipeline 1)
12
+ thermal_conductivity : Thermal conductivity analysis (Pipeline 2)
13
+ fitting : Curve fitting utilities
14
+ visualization : Publication-quality figure export
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ __version__ = "0.2.0"
@@ -0,0 +1,25 @@
1
+ """
2
+ Physical constants in SI units.
3
+
4
+ Values are sourced from :mod:`scipy.constants` (CODATA 2018).
5
+ Aliases are provided for backward compatibility.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from scipy import constants as _c
11
+
12
+ # Boltzmann constant (J/K)
13
+ kb: float = _c.Boltzmann
14
+
15
+ # Planck constant (J·s)
16
+ h: float = _c.Planck
17
+
18
+ # Reduced Planck constant ħ (J·s)
19
+ hbar: float = _c.hbar
20
+
21
+ # Avogadro constant (mol⁻¹)
22
+ NA: float = _c.Avogadro
23
+
24
+ # Atomic mass unit (kg)
25
+ AMU_TO_KG: float = _c.atomic_mass
@@ -0,0 +1,13 @@
1
+ """
2
+ Curve fitting utilities.
3
+
4
+ Modules
5
+ -------
6
+ linear : Weighted linear regression with error propagation
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ from . import linear
12
+
13
+ __all__ = ["linear"]
@@ -0,0 +1,67 @@
1
+ """
2
+ Weighted linear regression with error propagation.
3
+
4
+ Translated from linear_fit_with_errors.m
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import numpy as np
10
+
11
+
12
+ def fit_with_errors(x: np.ndarray, y: np.ndarray, y_err: np.ndarray) -> tuple[float, float, float]:
13
+ """
14
+ Weighted linear fit to data with measurement errors.
15
+
16
+ Performs a weighted least-squares linear fit y = slope * (x - x0) + intercept,
17
+ where weights are 1/y_err^2.
18
+
19
+ Parameters
20
+ ----------
21
+ x : np.ndarray
22
+ Independent variable values.
23
+ y : np.ndarray
24
+ Dependent variable values.
25
+ y_err : np.ndarray
26
+ Standard errors on y values.
27
+
28
+ Returns
29
+ -------
30
+ slope : float
31
+ Fitted slope.
32
+ se_slope : float
33
+ Standard error of the slope.
34
+ intercept : float
35
+ Fitted intercept (at x = mean(x)).
36
+
37
+ Notes
38
+ -----
39
+ Translated from MATLAB ``linear_fit_with_errors.m``. The fit is performed
40
+ with x shifted by its mean (x0 = mean(x)) for numerical stability.
41
+ """
42
+ x = np.asarray(x, dtype=float)
43
+ y = np.asarray(y, dtype=float)
44
+ y_err = np.asarray(y_err, dtype=float)
45
+
46
+ n = len(x)
47
+ weights = 1.0 / y_err**2
48
+
49
+ x0 = np.mean(x)
50
+ dx = x - x0
51
+
52
+ sum_w = np.sum(weights)
53
+ sum_wx = np.sum(weights * dx)
54
+ sum_wv = np.sum(weights * y)
55
+ sum_wxx = np.sum(weights * dx**2)
56
+ sum_wvx = np.sum(weights * y * dx)
57
+
58
+ delta = sum_w * sum_wxx - sum_wx**2
59
+ slope = (sum_wvx * sum_w - sum_wx * sum_wv) / delta
60
+ intercept = (sum_wxx * sum_wv - sum_wx * sum_wvx) / delta
61
+
62
+ # Standard error of slope
63
+ residuals = y - slope * dx - intercept
64
+ sum_wrr = np.sum(weights * residuals**2)
65
+ se_slope = np.sqrt(sum_wrr * sum_w / delta / (n - 2))
66
+
67
+ return slope, se_slope, intercept
@@ -0,0 +1,15 @@
1
+ """
2
+ Heat capacity analysis (Pipeline 1).
3
+
4
+ Modules
5
+ -------
6
+ low_t_fitting : Debye temperature and sound velocity from low-T Cp fit
7
+ dulong_petit : Dulong-Petit limit calculation
8
+ debye : Debye temperature converters (velocity, modulus)
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from . import debye, dulong_petit, low_t_fitting
14
+
15
+ __all__ = ["debye", "dulong_petit", "low_t_fitting"]
@@ -0,0 +1,61 @@
1
+ """
2
+ Debye temperature converters.
3
+
4
+ Convert between Debye temperature, sound velocity, and elastic modulus.
5
+
6
+ Translated from modulus2debyeT.m and velocity2debyeT.m
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import numpy as np
12
+
13
+ from MatSciKit_COSMOTIM.constants import hbar, kb
14
+
15
+
16
+ def from_velocity(v_s: float, n_density: float) -> float:
17
+ """
18
+ Calculate Debye temperature from sound velocity.
19
+
20
+ Parameters
21
+ ----------
22
+ v_s : float
23
+ Sound velocity (m/s).
24
+ n_density : float
25
+ Number density N/V (atoms/m³).
26
+
27
+ Returns
28
+ -------
29
+ theta_D : float
30
+ Debye temperature (K).
31
+
32
+ Notes
33
+ -----
34
+ Formula: θ_D = v_s · (ħ/kb) · (6π²·N_density)^(1/3)
35
+ """
36
+ return v_s * (hbar / kb) * (6 * np.pi**2 * n_density) ** (1.0 / 3)
37
+
38
+
39
+ def from_modulus(modulus_gpa: float, density: float, n_density: float) -> float:
40
+ """
41
+ Calculate Debye temperature from bulk modulus.
42
+
43
+ First computes the sound velocity as v = sqrt(B/ρ), then converts
44
+ to Debye temperature.
45
+
46
+ Parameters
47
+ ----------
48
+ modulus_gpa : float
49
+ Bulk modulus (GPa).
50
+ density : float
51
+ Mass density (kg/m³).
52
+ n_density : float
53
+ Number density N/V (atoms/m³).
54
+
55
+ Returns
56
+ -------
57
+ theta_D : float
58
+ Debye temperature (K).
59
+ """
60
+ v_s = np.sqrt(modulus_gpa * 1e9 / density)
61
+ return from_velocity(v_s, n_density)
@@ -0,0 +1,38 @@
1
+ """
2
+ Dulong-Petit limit calculation.
3
+
4
+ The classical upper limit of heat capacity per unit mass.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from MatSciKit_COSMOTIM.constants import kb
10
+
11
+
12
+ def calculate(n_density: float, density: float) -> float:
13
+ """
14
+ Calculate the Dulong-Petit heat capacity limit.
15
+
16
+ The Dulong-Petit law states that the molar heat capacity of a solid
17
+ approaches 3R per mole of atoms at high temperatures.
18
+
19
+ Parameters
20
+ ----------
21
+ n_density : float
22
+ Number density N/V (atoms/m³).
23
+ density : float
24
+ Mass density (kg/m³).
25
+
26
+ Returns
27
+ -------
28
+ limit : float
29
+ Dulong-Petit limit in J/(g·K).
30
+
31
+ Notes
32
+ -----
33
+ Formula: C_DP = 3 · N_density · kb / Density × 1e-3
34
+
35
+ The factor 1e-3 converts from J/(kg·K) to J/(g·K) to match
36
+ typical specific heat capacity units in materials science.
37
+ """
38
+ return 3 * n_density * kb / density * 1e-3
@@ -0,0 +1,154 @@
1
+ """
2
+ Low-temperature heat capacity fitting for Debye temperature and sound velocity.
3
+
4
+ Fits Cp/T vs T² in the low-temperature regime to extract the Debye temperature
5
+ (θ_D) and average sound velocity (v_s).
6
+
7
+ Translated from LowT_Cp_fitting.m
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import numpy as np
13
+ from scipy.optimize import curve_fit
14
+
15
+ from MatSciKit_COSMOTIM.constants import hbar, kb
16
+
17
+
18
+ def _linear(x: np.ndarray, a: float, b: float) -> np.ndarray:
19
+ """Linear model: y = a*x + b."""
20
+ return a * x + b
21
+
22
+
23
+ def fit(
24
+ T: np.ndarray,
25
+ Cp: np.ndarray,
26
+ Cp_err: np.ndarray,
27
+ n_density: float,
28
+ density: float,
29
+ t_range: tuple[float, float] | None = None,
30
+ n_range: tuple[int, int] | None = None,
31
+ ) -> tuple[float, float, float, float]:
32
+ """
33
+ Extract Debye temperature and sound velocity from low-T Cp data.
34
+
35
+ Performs a weighted linear fit of Cp/T vs T² in the low-temperature
36
+ regime. The slope relates to the Debye temperature through:
37
+
38
+ Cp/T = β·T² + γ
39
+
40
+ where β = (12π⁴/5) · N_density · kb / (Density · θ_D³)
41
+
42
+ The fitting region can be specified in two ways:
43
+
44
+ - **Temperature range:** ``t_range=(T_min, T_max)`` selects all points
45
+ within that range.
46
+ - **Index range:** ``n_range=(n_start, n_end)`` uses data points at
47
+ indices n_start through n_end (1-based, inclusive) after sorting by T.
48
+ This matches the MATLAB convention ``data(n_start:n_end, :)``.
49
+
50
+ If both are given, ``t_range`` takes priority. If neither is given,
51
+ all data is used.
52
+
53
+ Parameters
54
+ ----------
55
+ T : np.ndarray
56
+ Temperature values (K).
57
+ Cp : np.ndarray
58
+ Heat capacity values (J/(g·K)).
59
+ Cp_err : np.ndarray
60
+ Heat capacity errors (J/(g·K)).
61
+ n_density : float
62
+ Number density N/V (atoms/m³).
63
+ density : float
64
+ Mass density (kg/m³).
65
+ t_range : tuple of (float, float), optional
66
+ Temperature range (T_min, T_max) in Kelvin for the fitting region.
67
+ n_range : tuple of (int, int), optional
68
+ Index range (n_start, n_end) using 1-based inclusive indexing,
69
+ applied after sorting by temperature. Matches MATLAB convention
70
+ ``data(n_start:n_end, :)``. For example, ``n_range=(13, 41)``
71
+ selects the 13th through 41st data points.
72
+
73
+ Returns
74
+ -------
75
+ theta_D : float
76
+ Debye temperature (K).
77
+ v_s : float
78
+ Average sound velocity (m/s).
79
+ theta_D_error : float
80
+ Uncertainty in Debye temperature (K).
81
+ v_s_error : float
82
+ Uncertainty in sound velocity (m/s).
83
+
84
+ Notes
85
+ -----
86
+ The fit is weighted by (Cp_err/Cp)^(-2), matching the MATLAB implementation
87
+ which uses relative errors as weights for the ``poly1`` fit.
88
+
89
+ Examples
90
+ --------
91
+ >>> # Method 1: Fit using temperature range
92
+ >>> theta_D, v_s, err_D, err_v = fit(T, Cp, Cp_err, n_density, density,
93
+ ... t_range=(3.0, 10.0))
94
+ >>> # Method 2: Use data points 13 through 41 (1-based, like MATLAB)
95
+ >>> theta_D, v_s, err_D, err_v = fit(T, Cp, Cp_err, n_density, density,
96
+ ... n_range=(13, 41))
97
+ """
98
+ T = np.asarray(T, dtype=float)
99
+ Cp = np.asarray(Cp, dtype=float)
100
+ Cp_err = np.asarray(Cp_err, dtype=float)
101
+
102
+ # Sort by temperature
103
+ sort_idx = np.argsort(T)
104
+ T = T[sort_idx]
105
+ Cp = Cp[sort_idx]
106
+ Cp_err = Cp_err[sort_idx]
107
+
108
+ # Select fitting region
109
+ if t_range is not None:
110
+ # Method 1: Temperature range
111
+ t_min, t_max = t_range
112
+ mask = (t_min <= T) & (t_max >= T)
113
+ T = T[mask]
114
+ Cp = Cp[mask]
115
+ Cp_err = Cp_err[mask]
116
+ elif n_range is not None:
117
+ # Method 2: Index range (1-based inclusive, like MATLAB)
118
+ n_start, n_end = n_range
119
+ # Convert 1-based inclusive to 0-based Python slice
120
+ T = T[n_start - 1 : n_end]
121
+ Cp = Cp[n_start - 1 : n_end]
122
+ Cp_err = Cp_err[n_start - 1 : n_end]
123
+
124
+ if len(T) < 3:
125
+ raise ValueError(
126
+ f"Fewer than 3 data points in the selected range. "
127
+ f"Need at least 3 for a meaningful linear fit. Got {len(T)}."
128
+ )
129
+
130
+ # Prepare data: Cp/T vs T²
131
+ x = T**2
132
+ y = Cp / T
133
+
134
+ # Weights: inverse of relative error squared, matching MATLAB W = (Cp_err./Cp).^(-2)
135
+ sigma = Cp_err / Cp # relative errors used as sigma for curve_fit
136
+
137
+ # Weighted linear fit
138
+ popt, pcov = curve_fit(_linear, x, y, sigma=sigma, absolute_sigma=False)
139
+ slope = popt[0]
140
+
141
+ # 95% confidence interval for slope error
142
+ slope_std = np.sqrt(pcov[0, 0])
143
+ slope_95 = slope_std * 1.96 # approximate 95% CI
144
+ r_error = slope_95 / abs(slope)
145
+
146
+ # Debye temperature: θ_D = (slope * Density * 1e3 / (12π⁴/5 * N_density * kb))^(-1/3)
147
+ theta_D = (1.0 / (12 * np.pi**4 / 5 * n_density * kb) * slope * density * 1e3) ** (-1.0 / 3)
148
+ theta_D_error = theta_D * r_error / 3
149
+
150
+ # Sound velocity: v_s = θ_D / (ħ/kb * (6·N_density·π²)^(1/3))
151
+ v_s = theta_D / (hbar / kb * (6 * n_density * np.pi**2) ** (1.0 / 3))
152
+ v_s_error = v_s * r_error / 3
153
+
154
+ return theta_D, v_s, theta_D_error, v_s_error
@@ -0,0 +1,36 @@
1
+ """
2
+ Instrument-specific data readers.
3
+
4
+ Function-based readers (quick use):
5
+ ``ppms_hc.read()``, ``ppms_tto.read()``, ``dsc.read()``, ``lfa.read()``
6
+
7
+ Class-based readers (metadata + caching):
8
+ ``PPMSHCReader``, ``PPMSTTOReader``, ``DSCReader``, ``LFAReader``,
9
+ ``auto_reader()``
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from . import dsc, lfa, lfa_excel, ppms_hc, ppms_tto
15
+ from .readers import (
16
+ BaseReader,
17
+ DSCReader,
18
+ LFAReader,
19
+ PPMSHCReader,
20
+ PPMSTTOReader,
21
+ auto_reader,
22
+ )
23
+
24
+ __all__ = [
25
+ "BaseReader",
26
+ "DSCReader",
27
+ "LFAReader",
28
+ "PPMSHCReader",
29
+ "PPMSTTOReader",
30
+ "auto_reader",
31
+ "dsc",
32
+ "lfa",
33
+ "lfa_excel",
34
+ "ppms_hc",
35
+ "ppms_tto",
36
+ ]
@@ -0,0 +1,59 @@
1
+ """
2
+ DSC (Differential Scanning Calorimetry) data reader.
3
+
4
+ Reads heat capacity data from DSC CSV export files (e.g., Netzsch DSC 214).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+
13
+
14
+ def read(filepath: str, skip_rows: int = 34) -> np.ndarray:
15
+ """
16
+ Read DSC heat capacity data from a CSV export file.
17
+
18
+ The file is expected to have header rows followed by data columns:
19
+ [Temperature (°C), Time (min), Cp (J/(g·K))].
20
+ Temperature is automatically converted from °C to K.
21
+
22
+ Parameters
23
+ ----------
24
+ filepath : str
25
+ Full path to the DSC CSV file.
26
+ skip_rows : int, optional
27
+ Number of header rows to skip. Default is 34 (Netzsch format).
28
+
29
+ Returns
30
+ -------
31
+ data : np.ndarray
32
+ Array with columns [Temperature (K), Cp (J/(g·K))].
33
+
34
+ Raises
35
+ ------
36
+ FileNotFoundError
37
+ If the specified file does not exist.
38
+ """
39
+ filepath = Path(filepath)
40
+ if not filepath.exists():
41
+ raise FileNotFoundError(f"File not found: {filepath}")
42
+
43
+ # Read CSV data
44
+ # DSC files may contain non-UTF-8 characters (e.g., µ in µV)
45
+ # Read with latin-1 encoding, then parse with numpy
46
+ with open(filepath, encoding="latin-1") as f:
47
+ lines = f.readlines()[skip_rows:]
48
+
49
+ # Parse numeric data from remaining lines
50
+ raw = np.genfromtxt(lines, delimiter=",")
51
+
52
+ if raw.ndim == 1:
53
+ raw = raw.reshape(1, -1)
54
+
55
+ # Extract Temperature (°C) and Cp columns, convert T to Kelvin
56
+ temp_k = raw[:, 0] + 273.15
57
+ cp = raw[:, 2] # Third column is Cp
58
+
59
+ return np.column_stack([temp_k, cp])
@@ -0,0 +1,49 @@
1
+ """
2
+ LFA (Laser Flash Analysis) data reader.
3
+
4
+ Reads thermal diffusivity or conductivity data from LFA CSV files.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from pathlib import Path
10
+
11
+ import numpy as np
12
+
13
+
14
+ def read(filepath: str) -> np.ndarray:
15
+ """
16
+ Read LFA data from a CSV file.
17
+
18
+ Expected format: columns [Temperature (K), Value, Error].
19
+ Rows containing NaN values are automatically dropped.
20
+
21
+ Parameters
22
+ ----------
23
+ filepath : str
24
+ Full path to the LFA CSV file.
25
+
26
+ Returns
27
+ -------
28
+ data : np.ndarray
29
+ Array with columns [Temperature (K), Value, Error], NaN rows removed.
30
+
31
+ Raises
32
+ ------
33
+ FileNotFoundError
34
+ If the specified file does not exist.
35
+ """
36
+ filepath = Path(filepath)
37
+ if not filepath.exists():
38
+ raise FileNotFoundError(f"File not found: {filepath}")
39
+
40
+ data = np.genfromtxt(filepath, delimiter=",")
41
+
42
+ if data.ndim == 1:
43
+ data = data.reshape(1, -1)
44
+
45
+ # Drop rows with any NaN
46
+ mask = ~np.any(np.isnan(data), axis=1)
47
+ data = data[mask]
48
+
49
+ return data
@@ -0,0 +1,116 @@
1
+ """
2
+ LFA Excel data reader.
3
+
4
+ Reads laser flash analysis diffusivity data from Excel files
5
+ (e.g., UT LFA instrument export format).
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from pathlib import Path
11
+
12
+ import numpy as np
13
+
14
+
15
+ def read(filepath: str, sheet_name: str | int = 0, skip_rows: int = 1) -> np.ndarray:
16
+ """Read LFA diffusivity data from an Excel file.
17
+
18
+ Expected format after skipping header:
19
+ columns [Temperature (°C), T_error, Diffusivity (mm²/s), Diff_error].
20
+
21
+ Temperature is converted from °C to K.
22
+
23
+ Parameters
24
+ ----------
25
+ filepath : str
26
+ Path to the Excel file (.xlsx).
27
+ sheet_name : str or int, optional
28
+ Sheet name or index (default 0).
29
+ skip_rows : int, optional
30
+ Number of header rows to skip (default 1).
31
+
32
+ Returns
33
+ -------
34
+ data : np.ndarray
35
+ Array with columns [Temperature (K), Diffusivity (mm²/s), Diffusivity_error].
36
+
37
+ Raises
38
+ ------
39
+ FileNotFoundError
40
+ If the file does not exist.
41
+ ImportError
42
+ If openpyxl is not installed.
43
+ """
44
+ filepath = Path(filepath)
45
+ if not filepath.exists():
46
+ raise FileNotFoundError(f"File not found: {filepath}")
47
+
48
+ try:
49
+ import openpyxl
50
+ except ImportError as exc:
51
+ raise ImportError(
52
+ "openpyxl is required to read Excel files. Install with: pip install openpyxl"
53
+ ) from exc
54
+
55
+ wb = openpyxl.load_workbook(filepath, read_only=True, data_only=True)
56
+
57
+ ws = wb.worksheets[sheet_name] if isinstance(sheet_name, int) else wb[sheet_name]
58
+
59
+ rows = []
60
+ for i, row in enumerate(ws.iter_rows(values_only=True)):
61
+ if i < skip_rows:
62
+ continue
63
+ # Skip empty rows
64
+ if row[0] is None:
65
+ continue
66
+ try:
67
+ vals = [float(v) if v is not None else np.nan for v in row[:4]]
68
+ rows.append(vals)
69
+ except (ValueError, TypeError):
70
+ continue
71
+
72
+ wb.close()
73
+
74
+ if not rows:
75
+ raise ValueError(f"No numeric data found in sheet '{sheet_name}'")
76
+
77
+ raw = np.array(rows)
78
+
79
+ # Extract: Temperature (°C → K), Diffusivity, Diffusivity_error
80
+ temp_k = raw[:, 0] + 273.15
81
+ diffusivity = raw[:, 2]
82
+ diff_error = raw[:, 3]
83
+
84
+ result = np.column_stack([temp_k, diffusivity, diff_error])
85
+
86
+ # Drop NaN rows
87
+ mask = ~np.any(np.isnan(result), axis=1)
88
+ result = result[mask]
89
+
90
+ return result
91
+
92
+
93
+ def list_sheets(filepath: str) -> list[str]:
94
+ """List available sheet names in an LFA Excel file.
95
+
96
+ Parameters
97
+ ----------
98
+ filepath : str
99
+ Path to the Excel file.
100
+
101
+ Returns
102
+ -------
103
+ sheets : list of str
104
+ Sheet names.
105
+ """
106
+ try:
107
+ import openpyxl
108
+ except ImportError as exc:
109
+ raise ImportError(
110
+ "openpyxl is required to read Excel files. Install with: pip install openpyxl"
111
+ ) from exc
112
+
113
+ wb = openpyxl.load_workbook(filepath, read_only=True)
114
+ names = wb.sheetnames
115
+ wb.close()
116
+ return names