pyTEMlib 0.2025.4.1__py3-none-any.whl → 0.2025.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyTEMlib might be problematic. Click here for more details.
- build/lib/pyTEMlib/__init__.py +33 -0
- build/lib/pyTEMlib/animation.py +640 -0
- build/lib/pyTEMlib/atom_tools.py +238 -0
- build/lib/pyTEMlib/config_dir.py +31 -0
- build/lib/pyTEMlib/crystal_tools.py +1219 -0
- build/lib/pyTEMlib/diffraction_plot.py +756 -0
- build/lib/pyTEMlib/dynamic_scattering.py +293 -0
- build/lib/pyTEMlib/eds_tools.py +826 -0
- build/lib/pyTEMlib/eds_xsections.py +432 -0
- build/lib/pyTEMlib/eels_tools/__init__.py +44 -0
- build/lib/pyTEMlib/eels_tools/core_loss_tools.py +751 -0
- build/lib/pyTEMlib/eels_tools/eels_database.py +134 -0
- build/lib/pyTEMlib/eels_tools/low_loss_tools.py +655 -0
- build/lib/pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
- build/lib/pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
- build/lib/pyTEMlib/file_reader.py +274 -0
- build/lib/pyTEMlib/file_tools.py +811 -0
- build/lib/pyTEMlib/get_bote_salvat.py +69 -0
- build/lib/pyTEMlib/graph_tools.py +1153 -0
- build/lib/pyTEMlib/graph_viz.py +599 -0
- build/lib/pyTEMlib/image/__init__.py +37 -0
- build/lib/pyTEMlib/image/image_atoms.py +270 -0
- build/lib/pyTEMlib/image/image_clean.py +197 -0
- build/lib/pyTEMlib/image/image_distortion.py +299 -0
- build/lib/pyTEMlib/image/image_fft.py +277 -0
- build/lib/pyTEMlib/image/image_graph.py +926 -0
- build/lib/pyTEMlib/image/image_registration.py +316 -0
- build/lib/pyTEMlib/image/image_utilities.py +309 -0
- build/lib/pyTEMlib/image/image_window.py +421 -0
- build/lib/pyTEMlib/image_tools.py +699 -0
- build/lib/pyTEMlib/interactive_image.py +1 -0
- build/lib/pyTEMlib/kinematic_scattering.py +1196 -0
- build/lib/pyTEMlib/microscope.py +61 -0
- build/lib/pyTEMlib/probe_tools.py +906 -0
- build/lib/pyTEMlib/sidpy_tools.py +153 -0
- build/lib/pyTEMlib/simulation_tools.py +104 -0
- build/lib/pyTEMlib/test.py +437 -0
- build/lib/pyTEMlib/utilities.py +314 -0
- build/lib/pyTEMlib/version.py +5 -0
- build/lib/pyTEMlib/xrpa_x_sections.py +20976 -0
- pyTEMlib/__init__.py +25 -3
- pyTEMlib/animation.py +31 -22
- pyTEMlib/atom_tools.py +29 -34
- pyTEMlib/config_dir.py +2 -28
- pyTEMlib/crystal_tools.py +129 -165
- pyTEMlib/eds_tools.py +559 -342
- pyTEMlib/eds_xsections.py +432 -0
- pyTEMlib/eels_tools/__init__.py +44 -0
- pyTEMlib/eels_tools/core_loss_tools.py +751 -0
- pyTEMlib/eels_tools/eels_database.py +134 -0
- pyTEMlib/eels_tools/low_loss_tools.py +655 -0
- pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
- pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
- pyTEMlib/file_reader.py +274 -0
- pyTEMlib/file_tools.py +260 -1130
- pyTEMlib/get_bote_salvat.py +69 -0
- pyTEMlib/graph_tools.py +101 -174
- pyTEMlib/graph_viz.py +150 -0
- pyTEMlib/image/__init__.py +37 -0
- pyTEMlib/image/image_atoms.py +270 -0
- pyTEMlib/image/image_clean.py +197 -0
- pyTEMlib/image/image_distortion.py +299 -0
- pyTEMlib/image/image_fft.py +277 -0
- pyTEMlib/image/image_graph.py +926 -0
- pyTEMlib/image/image_registration.py +316 -0
- pyTEMlib/image/image_utilities.py +309 -0
- pyTEMlib/image/image_window.py +421 -0
- pyTEMlib/image_tools.py +154 -915
- pyTEMlib/kinematic_scattering.py +1 -1
- pyTEMlib/probe_tools.py +1 -1
- pyTEMlib/test.py +437 -0
- pyTEMlib/utilities.py +314 -0
- pyTEMlib/version.py +2 -3
- pyTEMlib/xrpa_x_sections.py +14 -10
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/METADATA +13 -16
- pytemlib-0.2025.9.1.dist-info/RECORD +86 -0
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/WHEEL +1 -1
- pytemlib-0.2025.9.1.dist-info/top_level.txt +6 -0
- pyTEMlib/core_loss_widget.py +0 -721
- pyTEMlib/eels_dialog.py +0 -754
- pyTEMlib/eels_dialog_utilities.py +0 -1199
- pyTEMlib/eels_tools.py +0 -2359
- pyTEMlib/file_tools_qt.py +0 -193
- pyTEMlib/image_dialog.py +0 -158
- pyTEMlib/image_dlg.py +0 -146
- pyTEMlib/info_widget.py +0 -1086
- pyTEMlib/info_widget3.py +0 -1120
- pyTEMlib/low_loss_widget.py +0 -479
- pyTEMlib/peak_dialog.py +0 -1129
- pyTEMlib/peak_dlg.py +0 -286
- pytemlib-0.2025.4.1.dist-info/RECORD +0 -38
- pytemlib-0.2025.4.1.dist-info/top_level.txt +0 -1
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/entry_points.txt +0 -0
- {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/licenses/LICENSE +0 -0
pyTEMlib/eds_tools.py
CHANGED
|
@@ -10,51 +10,47 @@ Sources:
|
|
|
10
10
|
|
|
11
11
|
Units:
|
|
12
12
|
everything is in SI units, except length is given in nm and angles in mrad.
|
|
13
|
-
|
|
14
13
|
Usage:
|
|
15
14
|
See the notebooks for examples of these routines
|
|
16
15
|
|
|
17
16
|
All the input and output is done through a dictionary which is to be found in the meta_data
|
|
18
17
|
attribute of the sidpy.Dataset
|
|
19
18
|
"""
|
|
20
|
-
import numpy as np
|
|
21
|
-
|
|
22
|
-
import scipy
|
|
23
|
-
from scipy.interpolate import interp1d, splrep # splev, splint
|
|
24
|
-
from scipy import interpolate
|
|
25
|
-
from scipy.signal import peak_prominences
|
|
26
|
-
from scipy.ndimage import gaussian_filter
|
|
27
|
-
from sklearn.mixture import GaussianMixture
|
|
28
|
-
from sklearn.cluster import KMeans
|
|
29
|
-
import scipy.constants as const
|
|
30
|
-
|
|
31
|
-
from scipy import constants
|
|
32
|
-
import matplotlib.pyplot as plt
|
|
33
|
-
# import matplotlib.patches as patches
|
|
34
19
|
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
20
|
+
import os
|
|
21
|
+
import csv
|
|
22
|
+
import json
|
|
23
|
+
import xml
|
|
38
24
|
|
|
39
|
-
import
|
|
25
|
+
import numpy as np
|
|
26
|
+
import matplotlib.pyplot as plt
|
|
40
27
|
|
|
41
|
-
|
|
28
|
+
import scipy
|
|
29
|
+
import scipy.interpolate # use interp1d,
|
|
30
|
+
import scipy.optimize # leastsq # least square fitting routine fo scipy
|
|
31
|
+
import sklearn # .mixture import GaussianMixture
|
|
42
32
|
|
|
43
33
|
import sidpy
|
|
44
34
|
|
|
45
|
-
import
|
|
46
|
-
import pyTEMlib.
|
|
47
|
-
from
|
|
35
|
+
import pyTEMlib
|
|
36
|
+
import pyTEMlib.file_reader
|
|
37
|
+
from .utilities import elements
|
|
38
|
+
from . eds_xsections import quantify_cross_section, quantification_k_factors
|
|
39
|
+
from .config_dir import config_path
|
|
40
|
+
elements_list = elements
|
|
48
41
|
|
|
49
|
-
elements_list = pyTEMlib.eels_tools.elements
|
|
50
42
|
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
43
|
+
def detector_response(dataset):
|
|
44
|
+
"""
|
|
45
|
+
Calculate the detector response for the given dataset based on its metadata.
|
|
54
46
|
|
|
47
|
+
Parameters:
|
|
48
|
+
- dataset: A sidpy.Dataset object containing the spectral data and metadata.
|
|
55
49
|
|
|
56
|
-
|
|
57
|
-
|
|
50
|
+
Returns:
|
|
51
|
+
- A numpy array representing the detector efficiency across the energy scale.
|
|
52
|
+
"""
|
|
53
|
+
tags = dataset.metadata['EDS']
|
|
58
54
|
|
|
59
55
|
energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
|
|
60
56
|
if 'start_channel' not in tags['detector']:
|
|
@@ -69,7 +65,8 @@ def detector_response(dataset):
|
|
|
69
65
|
|
|
70
66
|
def get_detector_response(detector_definition, energy_scale):
|
|
71
67
|
"""
|
|
72
|
-
Calculates response of Si drift detector for EDS spectrum background based
|
|
68
|
+
Calculates response of Si drift detector for EDS spectrum background based
|
|
69
|
+
on detector parameters
|
|
73
70
|
|
|
74
71
|
Parameters:
|
|
75
72
|
----------
|
|
@@ -91,19 +88,18 @@ def get_detector_response(detector_definition, energy_scale):
|
|
|
91
88
|
|
|
92
89
|
tags['detector'] ={}
|
|
93
90
|
|
|
94
|
-
## layer thicknesses of
|
|
95
|
-
tags['detector']['
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
tags['detector']['
|
|
99
|
-
|
|
100
|
-
tags['detector']['SiDeadThickness'] = .03 *1e-6 # in m
|
|
101
|
-
|
|
91
|
+
## layer thicknesses of common materials in EDS detectors in m
|
|
92
|
+
tags['detector']['layers'] = {13: {'thickness':= 0.05*1e-6, 'Z': 13, 'element': 'Al'},
|
|
93
|
+
6: {'thickness':= 0.15*1e-6, 'Z': 6, 'element': 'C'}
|
|
94
|
+
}
|
|
95
|
+
tags['detector']['SiDeadThickness'] = .13 *1e-6 # in m
|
|
102
96
|
tags['detector']['SiLiveThickness'] = 0.05 # in m
|
|
103
97
|
tags['detector']['detector_area'] = 30 * 1e-6 #in m2
|
|
104
|
-
tags['detector']['
|
|
105
|
-
|
|
106
|
-
|
|
98
|
+
tags['detector']['energy_resolution'] = 125 # in eV
|
|
99
|
+
tags['detector']['start_energy'] = 120 # in eV
|
|
100
|
+
tags['detector']['start_channel'] = np.searchsorted(spectrum.energy_scale.values,120)
|
|
101
|
+
|
|
102
|
+
energy_scale = np.linspace(.01, 20, 1199)*1000 # i eV
|
|
107
103
|
start = np.searchsorted(spectrum.energy, 100)
|
|
108
104
|
energy_scale = spectrum.energy[start:]
|
|
109
105
|
detector_Efficiency= pyTEMlib.eds_tools.detector_response(tags, spectrum.energy[start:])
|
|
@@ -111,8 +107,9 @@ def get_detector_response(detector_definition, energy_scale):
|
|
|
111
107
|
p = np.array([1, 37, .3])/10000*3
|
|
112
108
|
E_0= 200000
|
|
113
109
|
background = np.zeros(len(spectrum))
|
|
114
|
-
|
|
115
|
-
|
|
110
|
+
bremsstrahlung = p[0] + p[1]*(E_0-energy_scale)/energy_scale
|
|
111
|
+
bremsstrahlung += p[2]*(E_0-energy_scale)**2/energy_scale
|
|
112
|
+
background[start:] = detector_Efficiency * bremsstrahlung
|
|
116
113
|
|
|
117
114
|
plt.figure()
|
|
118
115
|
plt.plot(spectrum.energy, spectrum, label = 'spec')
|
|
@@ -122,174 +119,223 @@ def get_detector_response(detector_definition, energy_scale):
|
|
|
122
119
|
"""
|
|
123
120
|
response = np.ones(len(energy_scale))
|
|
124
121
|
x_sections = pyTEMlib.eels_tools.get_x_sections()
|
|
125
|
-
|
|
126
|
-
def get_absorption(
|
|
127
|
-
photoabsorption = x_sections[str(
|
|
128
|
-
lin = interp1d(x_sections[str(
|
|
129
|
-
mu = lin(energy_scale) * x_sections[str(
|
|
122
|
+
|
|
123
|
+
def get_absorption(z, t):
|
|
124
|
+
photoabsorption = x_sections[str(z)]['dat']/1e10/x_sections[str(z)]['photoabs_to_sigma']
|
|
125
|
+
lin = scipy.interpolate.interp1d(x_sections[str(z)]['ene'], photoabsorption, kind='linear')
|
|
126
|
+
mu = lin(energy_scale) * x_sections[str(z)]['nominal_density']*100. #1/cm -> 1/m
|
|
130
127
|
return np.exp(-mu * t)
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
response *= get_absorption(5, detector_definition['detector']['Be_thickness'])
|
|
136
|
-
if 'Au_thickness' in detector_definition['detector']:
|
|
137
|
-
response *= get_absorption(79, detector_definition['detector']['Au_thickness'])
|
|
138
|
-
if 'Par_thickness' in detector_definition['detector']:
|
|
139
|
-
response *= get_absorption(6, detector_definition['detector']['Par_thickness'])
|
|
128
|
+
|
|
129
|
+
for layer in detector_definition['detector']['layers'].values():
|
|
130
|
+
if layer['Z'] != 14:
|
|
131
|
+
response *= get_absorption(layer['Z'], layer['thickness'])
|
|
140
132
|
if 'SiDeadThickness' in detector_definition['detector']:
|
|
141
133
|
response *= get_absorption(14, detector_definition['detector']['SiDeadThickness'])
|
|
142
|
-
|
|
143
134
|
if 'SiLiveThickness' in detector_definition['detector']:
|
|
144
135
|
response *= 1-get_absorption(14, detector_definition['detector']['SiLiveThickness'])
|
|
145
136
|
return response
|
|
146
137
|
|
|
147
138
|
|
|
148
|
-
def detect_peaks(dataset, minimum_number_of_peaks=30):
|
|
139
|
+
def detect_peaks(dataset, minimum_number_of_peaks=30, prominence=10):
|
|
140
|
+
"""
|
|
141
|
+
Detect peaks in the given spectral dataset.
|
|
142
|
+
|
|
143
|
+
Parameters:
|
|
144
|
+
- dataset: A sidpy.Dataset object containing the spectral data.
|
|
145
|
+
- minimum_number_of_peaks: The minimum number of peaks to detect.
|
|
146
|
+
- prominence: The prominence threshold for peak detection.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
- An array of indices representing the positions of detected peaks in the spectrum.
|
|
150
|
+
"""
|
|
149
151
|
if not isinstance(dataset, sidpy.Dataset):
|
|
150
152
|
raise TypeError('Needs an sidpy dataset')
|
|
151
153
|
if not dataset.data_type.name == 'SPECTRUM':
|
|
152
154
|
raise TypeError('Need a spectrum')
|
|
153
155
|
|
|
154
|
-
energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
|
|
155
|
-
if '
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
156
|
+
energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
|
|
157
|
+
if 'EDS' not in dataset.metadata:
|
|
158
|
+
dataset.metadata['EDS'] = {}
|
|
159
|
+
if 'detector' not in dataset.metadata['EDS']:
|
|
160
|
+
raise ValueError('No detector information found, add detector dictionary to metadata')
|
|
161
|
+
|
|
162
|
+
if 'energy_resolution' not in dataset.metadata['EDS']['detector']:
|
|
163
|
+
dataset.metadata['EDS']['detector']['energy_resolution'] = 138
|
|
164
|
+
print('Using energy resolution of 138 eV')
|
|
165
|
+
if 'start_channel' not in dataset.metadata['EDS']['detector']:
|
|
166
|
+
dataset.metadata['EDS']['detector']['start_channel'] = np.searchsorted(energy_scale, 100)
|
|
167
|
+
|
|
168
|
+
resolution = dataset.metadata['EDS']['detector']['energy_resolution']
|
|
169
|
+
start = dataset.metadata['EDS']['detector']['start_channel']
|
|
164
170
|
## we use half the width of the resolution for smearing
|
|
165
171
|
width = int(np.ceil(resolution/(energy_scale[1]-energy_scale[0])/2)+1)
|
|
166
|
-
new_spectrum = scipy.signal.savgol_filter(np.array(dataset)[start:], width, 2)
|
|
167
|
-
|
|
172
|
+
new_spectrum = scipy.signal.savgol_filter(np.array(dataset)[start:], width, 2)
|
|
173
|
+
|
|
168
174
|
minor_peaks, _ = scipy.signal.find_peaks(new_spectrum, prominence=prominence)
|
|
169
|
-
|
|
175
|
+
|
|
170
176
|
while len(minor_peaks) > minimum_number_of_peaks:
|
|
171
177
|
prominence+=10
|
|
172
178
|
minor_peaks, _ = scipy.signal.find_peaks(new_spectrum, prominence=prominence)
|
|
173
179
|
return np.array(minor_peaks)+start
|
|
174
180
|
|
|
175
181
|
def find_elements(spectrum, minor_peaks):
|
|
182
|
+
"""
|
|
183
|
+
Identify elements present in the spectrum based on detected minor peaks.
|
|
184
|
+
|
|
185
|
+
Parameters:
|
|
186
|
+
- spectrum: A sidpy.Dataset object containing the spectral data.
|
|
187
|
+
- minor_peaks: An array of indices representing the positions of
|
|
188
|
+
minor peaks in the spectrum.
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
- A list of element symbols identified in the spectrum.
|
|
192
|
+
"""
|
|
176
193
|
if not isinstance(spectrum, sidpy.Dataset):
|
|
177
194
|
raise TypeError(' Need a sidpy dataset')
|
|
178
|
-
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0]
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
195
|
+
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0].values
|
|
196
|
+
element_list = set()
|
|
197
|
+
peaks = minor_peaks[np.argsort(spectrum[minor_peaks])]
|
|
198
|
+
accounted_peaks = set()
|
|
199
|
+
for i, peak in reversed(list(enumerate(peaks))):
|
|
200
|
+
for z in range(5, 82):
|
|
201
|
+
if i in accounted_peaks:
|
|
202
|
+
continue
|
|
203
|
+
edge_info = pyTEMlib.eels_tools.get_x_sections(z)
|
|
204
|
+
# element = edge_info['name']
|
|
205
|
+
lines = edge_info.get('lines', {})
|
|
206
|
+
if abs(lines.get('K-L3', {}).get('position', 0) - energy_scale[peak]) <40:
|
|
207
|
+
element_list.add(edge_info['name'])
|
|
208
|
+
for key, line in lines.items():
|
|
209
|
+
dist = np.abs(energy_scale[peaks]-line.get('position', 0))
|
|
210
|
+
if key[0] == 'K' and np.min(dist)< 40:
|
|
211
|
+
ind = np.argmin(dist)
|
|
212
|
+
accounted_peaks.add(ind)
|
|
213
|
+
# This is a special case for boron and carbon
|
|
214
|
+
elif abs(lines.get('K-L2', {}).get('position', 0) - energy_scale[peak]) <30:
|
|
215
|
+
accounted_peaks.add(i)
|
|
216
|
+
element_list.add(edge_info['name'])
|
|
217
|
+
|
|
218
|
+
if abs(lines.get('L3-M5', {}).get('position', 0) - energy_scale[peak]) <50:
|
|
219
|
+
element_list.add(edge_info['name'])
|
|
220
|
+
for key, line in edge_info['lines'].items():
|
|
221
|
+
dist = np.abs(energy_scale[peaks]-line.get('position', 0))
|
|
222
|
+
if key[0] == 'L' and np.min(dist)< 40 and line['weight'] > 0.01:
|
|
223
|
+
ind = np.argmin(dist)
|
|
224
|
+
accounted_peaks.add(ind)
|
|
225
|
+
return list(element_list)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def get_x_ray_lines(spectrum, element_list):
|
|
229
|
+
"""
|
|
230
|
+
Analyze the given spectrum to identify and characterize the X-ray emission lines
|
|
231
|
+
associated with the specified elements.
|
|
232
|
+
|
|
233
|
+
Parameters:
|
|
234
|
+
- spectrum: A sidpy.Dataset object containing the spectral data.
|
|
235
|
+
- elements: A list of element symbols (e.g., ['Fe', 'Cu']) to look for in the spectrum.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
- A dictionary where each key is an element symbol and each value is another dictionary
|
|
239
|
+
containing information about the X-ray lines detected for that element.
|
|
240
|
+
|
|
241
|
+
alpha_k = 1e6
|
|
242
|
+
alpha_l = 6.5e7
|
|
243
|
+
alpha_m = 8*1e8 # 2.2e10
|
|
201
244
|
# My Fit
|
|
202
245
|
alpha_K = .9e6
|
|
203
|
-
|
|
204
|
-
|
|
246
|
+
alpha_l = 6.e7
|
|
247
|
+
alpha_m = 6*1e8 # 2.2e10
|
|
205
248
|
# omega_K = Z**4/(alpha_K+Z**4)
|
|
206
|
-
# omega_L = Z**4/(
|
|
207
|
-
# omega_M = Z**4/(
|
|
208
|
-
|
|
209
|
-
|
|
249
|
+
# omega_L = Z**4/(alpha_l+Z**4)
|
|
250
|
+
# omega_M = Z**4/(alpha_m+Z**4)
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
out_tags = {}
|
|
254
|
+
x_sections = pyTEMlib.xrpa_x_sections.x_sections
|
|
255
|
+
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0].values
|
|
256
|
+
for element in element_list:
|
|
210
257
|
atomic_number = pyTEMlib.eds_tools.elements_list.index(element)
|
|
211
258
|
out_tags[element] ={'Z': atomic_number}
|
|
212
|
-
lines = x_sections
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
259
|
+
lines = pyTEMlib.xrpa_x_sections.x_sections.get(str(atomic_number), {}).get('lines', {})
|
|
260
|
+
if not lines:
|
|
261
|
+
break
|
|
262
|
+
k_weight = 0
|
|
263
|
+
k_main = 'None'
|
|
264
|
+
k_lines = []
|
|
265
|
+
l_weight = 0
|
|
266
|
+
l_main = 'None'
|
|
267
|
+
l_lines = []
|
|
268
|
+
m_weight = 0
|
|
269
|
+
m_main = 'None'
|
|
270
|
+
m_lines = []
|
|
271
|
+
|
|
223
272
|
for key, line in lines.items():
|
|
224
273
|
if 'K' == key[0]:
|
|
225
274
|
if line['position'] < energy_scale[-1]:
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
275
|
+
k_lines.append(key)
|
|
276
|
+
if line['weight'] > k_weight:
|
|
277
|
+
k_weight = line['weight']
|
|
278
|
+
k_main = key
|
|
230
279
|
if 'L' == key[0]:
|
|
231
280
|
if line['position'] < energy_scale[-1]:
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
281
|
+
l_lines.append(key)
|
|
282
|
+
if line['weight'] > l_weight:
|
|
283
|
+
l_weight = line['weight']
|
|
284
|
+
l_main = key
|
|
236
285
|
if 'M' == key[0]:
|
|
237
286
|
if line['position'] < energy_scale[-1]:
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
height = spectrum[np.searchsorted(energy_scale,
|
|
246
|
-
out_tags[element]['K-family']['height'] = height/
|
|
247
|
-
for key in
|
|
287
|
+
m_lines .append(key)
|
|
288
|
+
if line['weight'] > m_weight:
|
|
289
|
+
m_weight = line['weight']
|
|
290
|
+
m_main = key
|
|
291
|
+
if k_weight > 0:
|
|
292
|
+
out_tags[element]['K-family'] = {'main': k_main, 'weight': k_weight, 'lines': k_lines}
|
|
293
|
+
position = x_sections[str(atomic_number)]['lines'][k_main]['position']
|
|
294
|
+
height = spectrum[np.searchsorted(energy_scale, position)].compute()
|
|
295
|
+
out_tags[element]['K-family']['height'] = height/k_weight
|
|
296
|
+
for key in k_lines:
|
|
248
297
|
out_tags[element]['K-family'][key] = x_sections[str(atomic_number)]['lines'][key]
|
|
249
|
-
if
|
|
250
|
-
out_tags[element]['L-family'] = {'main':
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
298
|
+
if l_weight > 0:
|
|
299
|
+
out_tags[element]['L-family'] = {'main': l_main, 'weight': l_weight, 'lines': l_lines}
|
|
300
|
+
position = x_sections[str(atomic_number)]['lines'][l_main]['position']
|
|
301
|
+
height = spectrum[np.searchsorted(energy_scale, position)].compute()
|
|
302
|
+
out_tags[element]['L-family']['height'] = height/l_weight
|
|
303
|
+
for key in l_lines:
|
|
254
304
|
out_tags[element]['L-family'][key] = x_sections[str(atomic_number)]['lines'][key]
|
|
255
|
-
if
|
|
256
|
-
out_tags[element]['M-family'] = {'main':
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
305
|
+
if m_weight > 0:
|
|
306
|
+
out_tags[element]['M-family'] = {'main': m_main, 'weight': m_weight, 'lines': m_lines }
|
|
307
|
+
position = x_sections[str(atomic_number)]['lines'][m_main]['position']
|
|
308
|
+
height = spectrum[np.searchsorted(energy_scale, position)].compute()
|
|
309
|
+
out_tags[element]['M-family']['height'] = height/m_weight
|
|
310
|
+
for key in m_lines :
|
|
260
311
|
out_tags[element]['M-family'][key] = x_sections[str(atomic_number)]['lines'][key]
|
|
261
|
-
|
|
262
|
-
xs = get_eds_cross_sections(atomic_number)
|
|
263
|
-
if 'K' in xs and 'K-family' in out_tags[element]:
|
|
264
|
-
out_tags[element]['K-family']['probability'] = xs['K']
|
|
265
|
-
if 'L' in xs and 'L-family' in out_tags[element]:
|
|
266
|
-
out_tags[element]['L-family']['probability'] = xs['L']
|
|
267
|
-
if 'M' in xs and 'M-family' in out_tags[element]:
|
|
268
|
-
out_tags[element]['M-family']['probability'] = xs['M']
|
|
269
|
-
|
|
270
|
-
if 'EDS' not in spectrum.metadata:
|
|
271
|
-
spectrum.metadata['EDS'] = {}
|
|
272
|
-
spectrum.metadata['EDS'].update(out_tags)
|
|
312
|
+
spectrum.metadata.setdefault('EDS', {}).update(out_tags)
|
|
273
313
|
return out_tags
|
|
274
314
|
|
|
275
315
|
|
|
276
|
-
def
|
|
277
|
-
|
|
316
|
+
def get_fwhm(energy: float, energy_ref: float, fwhm_ref: float) -> float:
|
|
317
|
+
""" Calculate FWHM of Gaussians"""
|
|
318
|
+
return np.sqrt(2.5*(energy-energy_ref)+fwhm_ref**2)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def gaussian(energy_scale: np.ndarray, mu: float, fwhm: float) -> np.ndarray:
|
|
322
|
+
""" Gaussian function"""
|
|
323
|
+
sig = fwhm/2/np.sqrt(2*np.log(2))
|
|
324
|
+
return np.exp(-np.power(np.array(energy_scale) - mu, 2.) / (2 * np.power(sig, 2.)))
|
|
278
325
|
|
|
279
|
-
def gaussian(enrgy_scale, mu, FWHM):
|
|
280
|
-
sig = FWHM/2/np.sqrt(2*np.log(2))
|
|
281
|
-
return np.exp(-np.power(np.array(enrgy_scale) - mu, 2.) / (2 * np.power(sig, 2.)))
|
|
282
326
|
|
|
283
|
-
def get_peak(
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
327
|
+
def get_peak(energy: float, energy_scale: np.ndarray,
|
|
328
|
+
energy_ref: float = 5895.0, fwhm_ref: float = 136) -> np.ndarray:
|
|
329
|
+
""" Generate a normalized Gaussian peak for a given energy."""
|
|
330
|
+
# all energies in eV
|
|
331
|
+
fwhm = get_fwhm(energy, energy_ref, fwhm_ref)
|
|
332
|
+
gauss = gaussian(energy_scale, energy, fwhm)
|
|
288
333
|
|
|
289
|
-
return
|
|
334
|
+
return gauss /(gauss.sum()+1e-12)
|
|
290
335
|
|
|
291
336
|
|
|
292
337
|
def initial_model_parameter(spectrum):
|
|
338
|
+
""" Initialize model parameters based on the spectrum's metadata."""
|
|
293
339
|
tags = spectrum.metadata['EDS']
|
|
294
340
|
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0]
|
|
295
341
|
p = []
|
|
@@ -302,7 +348,7 @@ def initial_model_parameter(spectrum):
|
|
|
302
348
|
if line[0] == 'K':
|
|
303
349
|
model += get_peak(info['position'], energy_scale)*info['weight']
|
|
304
350
|
lines['K-family']['peaks'] = model /model.sum() # *lines['K-family']['probability']
|
|
305
|
-
|
|
351
|
+
|
|
306
352
|
p.append(lines['K-family']['height'] / lines['K-family']['peaks'].max())
|
|
307
353
|
peaks.append(lines['K-family']['peaks'])
|
|
308
354
|
keys.append(element+':K-family')
|
|
@@ -325,90 +371,109 @@ def initial_model_parameter(spectrum):
|
|
|
325
371
|
peaks.append(lines['M-family']['peaks'])
|
|
326
372
|
keys.append(element+':M-family')
|
|
327
373
|
|
|
328
|
-
|
|
329
|
-
#p.extend([300, 10, 1.e-04])
|
|
330
|
-
# p.extend([1, 300, -.02])
|
|
331
374
|
p.extend([1e7, 1e-3, 1500, 20])
|
|
332
375
|
return np.array(peaks), np.array(p), keys
|
|
333
376
|
|
|
334
|
-
def get_model(spectrum
|
|
335
|
-
|
|
377
|
+
def get_model(spectrum):
|
|
378
|
+
"""
|
|
379
|
+
Construct the model spectrum from the metadata in the given spectrum object.
|
|
380
|
+
|
|
381
|
+
Parameters:
|
|
382
|
+
- spectrum: The spectrum object containing metadata and spectral data.
|
|
383
|
+
|
|
384
|
+
Returns:
|
|
385
|
+
- model: The constructed model spectrum as a numpy array.
|
|
386
|
+
"""
|
|
387
|
+
model = np.zeros(len(np.array(spectrum)))
|
|
336
388
|
for key in spectrum.metadata['EDS']:
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
389
|
+
if isinstance(spectrum.metadata['EDS'][key], dict) and key in elements_list:
|
|
390
|
+
for family in spectrum.metadata['EDS'][key]:
|
|
391
|
+
if '-family' in family:
|
|
392
|
+
intensity = spectrum.metadata['EDS'][key][family].get('areal_density', 0)
|
|
393
|
+
peaks = spectrum.metadata['EDS'][key][family].get('peaks', np.zeros(len(model)))
|
|
394
|
+
if peaks.sum() <0.1:
|
|
395
|
+
print('no intensity',key, family)
|
|
396
|
+
model += peaks * intensity
|
|
342
397
|
|
|
343
398
|
if 'detector_efficiency' in spectrum.metadata['EDS']['detector'].keys():
|
|
344
399
|
detector_efficiency = spectrum.metadata['EDS']['detector']['detector_efficiency']
|
|
345
400
|
else:
|
|
346
401
|
detector_efficiency = None
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
# model[start:] += detector_efficiency[start:] * (pp[-3] + pp[-2] * (E_0 - energy_scale) / energy_scale +
|
|
351
|
-
# pp[-1] * (E_0 - energy_scale) ** 2 / energy_scale)
|
|
402
|
+
e_0 = spectrum.metadata['experiment']['acceleration_voltage']
|
|
403
|
+
pp = spectrum.metadata['EDS']['bremsstrahlung']
|
|
404
|
+
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0].values
|
|
352
405
|
|
|
406
|
+
if detector_efficiency is not None:
|
|
407
|
+
bremsstrahlung = (pp[-3] + pp[-2] * (e_0 - energy_scale) / energy_scale +
|
|
408
|
+
pp[-1] * (e_0 - energy_scale) ** 2 / energy_scale)
|
|
409
|
+
model += bremsstrahlung
|
|
410
|
+
model *= detector_efficiency
|
|
353
411
|
return model
|
|
354
412
|
|
|
355
|
-
def fit_model(spectrum,
|
|
356
|
-
|
|
357
|
-
peaks
|
|
413
|
+
def fit_model(spectrum, use_detector_efficiency=False):
|
|
414
|
+
"""
|
|
415
|
+
Fit the EDS spectrum using a model composed of elemental peaks and bremsstrahlung background.
|
|
416
|
+
|
|
417
|
+
Parameters:
|
|
418
|
+
- spectrum: The EDS spectrum to fit.
|
|
419
|
+
- elements: List of elements to consider in the fit.
|
|
420
|
+
- use_detector_efficiency: Whether to include detector efficiency in the model.
|
|
421
|
+
|
|
422
|
+
Returns:
|
|
423
|
+
- peaks: The fitted peak shapes.
|
|
424
|
+
- p: The fitted parameters.
|
|
425
|
+
"""
|
|
426
|
+
peaks, pin, _ = initial_model_parameter(spectrum)
|
|
427
|
+
|
|
358
428
|
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0].values
|
|
359
|
-
|
|
429
|
+
|
|
360
430
|
if 'detector' in spectrum.metadata['EDS'].keys():
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
if
|
|
364
|
-
|
|
365
|
-
|
|
431
|
+
start = spectrum.metadata['EDS'].get('detector', {}).get('start_channel', 120)
|
|
432
|
+
spectrum.metadata['EDS']['detector']['start_channel'] = np.searchsorted(energy_scale, start)
|
|
433
|
+
if use_detector_efficiency:
|
|
434
|
+
efficiency = spectrum.metadata['EDS']['detector'].get('detector_efficiency', [])
|
|
435
|
+
if not isinstance(efficiency, (list, np.ndarray)):
|
|
436
|
+
if len(efficiency) != len(spectrum):
|
|
437
|
+
efficiency = detector_response(spectrum)
|
|
366
438
|
else:
|
|
367
439
|
use_detector_efficiency = False
|
|
368
440
|
else:
|
|
369
441
|
print('need detector information to fit spectrum')
|
|
370
|
-
return
|
|
371
|
-
start = spectrum.metadata['EDS']['detector']['start_channel']
|
|
372
|
-
energy_scale = energy_scale[start:]
|
|
442
|
+
return None, None
|
|
373
443
|
|
|
374
|
-
|
|
444
|
+
e_0 = spectrum.metadata.get('experiment', {}).get('acceleration_voltage', 0.)
|
|
375
445
|
|
|
376
446
|
def residuals(pp, yy):
|
|
377
|
-
|
|
447
|
+
""" residuals for fit"""
|
|
378
448
|
model = np.zeros(len(yy))
|
|
379
449
|
for i in range(len(pp)-4):
|
|
380
450
|
model += peaks[i]*pp[i]
|
|
381
|
-
# pp[-3:] = np.abs(pp[-3:])
|
|
382
|
-
|
|
383
451
|
if use_detector_efficiency:
|
|
384
|
-
bremsstrahlung = pp[-
|
|
385
|
-
|
|
386
|
-
model
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
err = np.abs((yy - model)[start:]) # /np.sqrt(np.abs(yy[start:])+1e-12)
|
|
391
|
-
|
|
452
|
+
bremsstrahlung = (pp[-3] + pp[-2] * (e_0 - energy_scale) / energy_scale +
|
|
453
|
+
pp[-1] * (e_0 - energy_scale)**2 / energy_scale)
|
|
454
|
+
model += bremsstrahlung
|
|
455
|
+
model *= efficiency
|
|
456
|
+
err = np.abs(yy - model) # /np.sqrt(np.abs(yy[start:])+1e-12)
|
|
392
457
|
return err
|
|
393
458
|
|
|
394
459
|
y = np.array(spectrum) # .compute()
|
|
395
|
-
[p, _] = leastsq(residuals, pin, args=(y))
|
|
396
|
-
|
|
397
|
-
# print(pin[-6:], p[-6:])
|
|
398
|
-
|
|
399
|
-
update_fit_values(out_tags, peaks, p)
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
if 'EDS' not in spectrum.metadata:
|
|
403
|
-
spectrum.metadata['EDS'] = {}
|
|
404
|
-
spectrum.metadata['EDS'].update(out_tags)
|
|
460
|
+
[p, _] = scipy.optimize.leastsq(residuals, pin, args=(y,), maxfev=10000)
|
|
405
461
|
|
|
462
|
+
update_fit_values(spectrum.metadata['EDS'], peaks, p)
|
|
406
463
|
return np.array(peaks), np.array(p)
|
|
407
464
|
|
|
408
465
|
|
|
409
466
|
def update_fit_values(out_tags, peaks, p):
|
|
467
|
+
"""
|
|
468
|
+
Update the out_tags dictionary with the fitted peak shapes and parameters.
|
|
469
|
+
|
|
470
|
+
Parameters:
|
|
471
|
+
- out_tags: Dictionary containing the initial tags for each element and line family.
|
|
472
|
+
- peaks: Array of fitted peak shapes.
|
|
473
|
+
- p: Array of fitted parameters.
|
|
474
|
+
"""
|
|
410
475
|
index = 0
|
|
411
|
-
for
|
|
476
|
+
for lines in out_tags.values():
|
|
412
477
|
if 'K-family' in lines:
|
|
413
478
|
lines['K-family']['areal_density'] = p[index]
|
|
414
479
|
lines['K-family']['peaks'] = peaks[index]
|
|
@@ -421,129 +486,51 @@ def update_fit_values(out_tags, peaks, p):
|
|
|
421
486
|
lines['M-family']['areal_density'] =p[index]
|
|
422
487
|
lines['M-family']['peaks'] = peaks[index]
|
|
423
488
|
index += 1
|
|
424
|
-
|
|
425
|
-
def get_eds_xsection(Xsection, energy_scale, start_bgd, end_bgd):
|
|
426
|
-
background = pyTEMlib.eels_tools.power_law_background(Xsection, energy_scale, [start_bgd, end_bgd], verbose=False)
|
|
427
|
-
cross_section_core = Xsection- background[0]
|
|
428
|
-
cross_section_core[cross_section_core < 0] = 0.0
|
|
429
|
-
cross_section_core[energy_scale < end_bgd] = 0.0
|
|
430
|
-
return cross_section_core
|
|
489
|
+
out_tags['bremsstrahlung'] = p[-4:]
|
|
431
490
|
|
|
432
491
|
|
|
433
|
-
def
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
edge_info = pyTEMlib.eels_tools.get_x_sections(z)
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
eds_cross_sections = {}
|
|
440
|
-
Xyield = edge_info['total_fluorescent_yield']
|
|
441
|
-
if 'K' in Xyield:
|
|
442
|
-
start_bgd = edge_info['K1']['onset'] * 0.8
|
|
443
|
-
end_bgd = edge_info['K1']['onset'] - 5
|
|
444
|
-
if start_bgd > end_bgd:
|
|
445
|
-
start_bgd = end_bgd-100
|
|
446
|
-
if start_bgd > energy_scale[0] and end_bgd< energy_scale[-1]-100:
|
|
447
|
-
eds_xsection = get_eds_xsection(Xsection, energy_scale, start_bgd, end_bgd)
|
|
448
|
-
eds_xsection[eds_xsection<0] = 0.
|
|
449
|
-
start_sum = np.searchsorted(energy_scale, edge_info['K1']['onset'])
|
|
450
|
-
end_sum = start_sum+600
|
|
451
|
-
if end_sum> len(Xsection):
|
|
452
|
-
end_sum = len(Xsection)-1
|
|
453
|
-
eds_cross_sections['K1'] = eds_xsection[start_sum:end_sum].sum()
|
|
454
|
-
eds_cross_sections['K'] = eds_xsection[start_sum:end_sum].sum() * Xyield['K']
|
|
455
|
-
|
|
456
|
-
if 'L3' in Xyield:
|
|
457
|
-
start_bgd = edge_info['L3']['onset'] * 0.8
|
|
458
|
-
end_bgd = edge_info['L3']['onset'] - 5
|
|
459
|
-
if start_bgd > end_bgd:
|
|
460
|
-
start_bgd = end_bgd-100
|
|
461
|
-
if start_bgd > energy_scale[0] and end_bgd< energy_scale[-1]-100:
|
|
462
|
-
eds_xsection = get_eds_xsection(Xsection, energy_scale, start_bgd, end_bgd)
|
|
463
|
-
eds_xsection[eds_xsection<0] = 0.
|
|
464
|
-
start_sum = np.searchsorted(energy_scale, edge_info['L3']['onset'])
|
|
465
|
-
end_sum = start_sum+600
|
|
466
|
-
if end_sum> len(Xsection):
|
|
467
|
-
end_sum = len(Xsection)-1
|
|
468
|
-
if end_sum >np.searchsorted(energy_scale, edge_info['K1']['onset'])-10:
|
|
469
|
-
end_sum = np.searchsorted(energy_scale, edge_info['K1']['onset'])-10
|
|
470
|
-
eds_cross_sections['L'] = eds_xsection[start_sum:end_sum].sum()
|
|
471
|
-
L1_channel = np.searchsorted(energy_scale, edge_info['L1']['onset'])
|
|
472
|
-
m_start = start_sum-100
|
|
473
|
-
if m_start < 2:
|
|
474
|
-
m_start = start_sum-20
|
|
475
|
-
l3_rise = np.max(Xsection[m_start: L1_channel-10])-np.min(Xsection[m_start: L1_channel-10])
|
|
476
|
-
l1_rise = np.max(Xsection[L1_channel-10: L1_channel+100])-np.min(Xsection[L1_channel-10: L1_channel+100])
|
|
477
|
-
l1_ratio = l1_rise/l3_rise
|
|
478
|
-
|
|
479
|
-
eds_cross_sections['L1'] = l1_ratio * eds_cross_sections['L']
|
|
480
|
-
eds_cross_sections['L2'] = eds_cross_sections['L']*(1-l1_ratio)*1/3
|
|
481
|
-
eds_cross_sections['L3'] = eds_cross_sections['L']*(1-l1_ratio)*2/3
|
|
482
|
-
eds_cross_sections['yield_L1'] = Xyield['L1']
|
|
483
|
-
eds_cross_sections['yield_L2'] = Xyield['L2']
|
|
484
|
-
eds_cross_sections['yield_L3'] = Xyield['L3']
|
|
485
|
-
|
|
486
|
-
eds_cross_sections['L'] = eds_cross_sections['L1']*Xyield['L1']+eds_cross_sections['L2']*Xyield['L2']+eds_cross_sections['L3']*Xyield['L3']
|
|
487
|
-
# eds_cross_sections['L'] /= 8
|
|
488
|
-
if 'M5' in Xyield:
|
|
489
|
-
start_bgd = edge_info['M5']['onset'] * 0.8
|
|
490
|
-
end_bgd = edge_info['M5']['onset'] - 5
|
|
491
|
-
if start_bgd > end_bgd:
|
|
492
|
-
start_bgd = end_bgd-100
|
|
493
|
-
if start_bgd > energy_scale[0] and end_bgd< energy_scale[-1]-100:
|
|
494
|
-
eds_xsection = get_eds_xsection(Xsection, energy_scale, start_bgd, end_bgd)
|
|
495
|
-
eds_xsection[eds_xsection<0] = 0.
|
|
496
|
-
start_sum = np.searchsorted(energy_scale, edge_info['M5']['onset'])
|
|
497
|
-
end_sum = start_sum+600
|
|
498
|
-
if end_sum > np.searchsorted(energy_scale, edge_info['L3']['onset'])-10:
|
|
499
|
-
end_sum = np.searchsorted(energy_scale, edge_info['L3']['onset'])-10
|
|
500
|
-
eds_cross_sections['M'] = eds_xsection[start_sum:end_sum].sum()
|
|
501
|
-
#print(edge_info['M5']['onset'] - edge_info['M1']['onset'])
|
|
502
|
-
M3_channel = np.searchsorted(energy_scale, edge_info['M3']['onset'])
|
|
503
|
-
M1_channel = np.searchsorted(energy_scale, edge_info['M1']['onset'])
|
|
504
|
-
m5_rise = np.max(Xsection[start_sum-100: M3_channel-10])-np.min(Xsection[start_sum-100: M3_channel-10])
|
|
505
|
-
m3_rise = np.max(Xsection[M3_channel-10: M1_channel-10])-np.min(Xsection[M3_channel-10: M1_channel-10])
|
|
506
|
-
m1_rise = np.max(Xsection[M1_channel-10: M1_channel+100])-np.min(Xsection[M1_channel-10: M1_channel+100])
|
|
507
|
-
m1_ratio = m1_rise/m5_rise
|
|
508
|
-
m3_ratio = m3_rise/m5_rise
|
|
509
|
-
m5_ratio = 1-(m1_ratio+m3_ratio)
|
|
510
|
-
#print(m1_ratio, m3_ratio, 1-(m1_ratio+m3_ratio))
|
|
511
|
-
eds_cross_sections['M1'] = m1_ratio * eds_cross_sections['M']
|
|
512
|
-
eds_cross_sections['M2'] = m3_ratio * eds_cross_sections['M']*1/3
|
|
513
|
-
eds_cross_sections['M3'] = m3_ratio * eds_cross_sections['M']*2/3
|
|
514
|
-
eds_cross_sections['M4'] = m5_ratio * eds_cross_sections['M']*2/5
|
|
515
|
-
eds_cross_sections['M5'] = m5_ratio * eds_cross_sections['M']*3/5
|
|
516
|
-
eds_cross_sections['yield_M1'] = Xyield['M1']
|
|
517
|
-
eds_cross_sections['yield_M2'] = Xyield['M2']
|
|
518
|
-
eds_cross_sections['yield_M3'] = Xyield['M3']
|
|
519
|
-
eds_cross_sections['yield_M4'] = Xyield['M4']
|
|
520
|
-
eds_cross_sections['yield_M5'] = Xyield['M5']
|
|
521
|
-
eds_cross_sections['M'] = eds_cross_sections['M1']*Xyield['M1']+eds_cross_sections['M2']*Xyield['M2']+eds_cross_sections['M3']*Xyield['M3'] \
|
|
522
|
-
+eds_cross_sections['M4']*Xyield['M4']+eds_cross_sections['M5']*Xyield['M5']
|
|
523
|
-
#eds_cross_sections['M'] /= 18
|
|
524
|
-
return eds_cross_sections
|
|
492
|
+
def get_phases(dataset, mode='kmeans', number_of_phases=4):
|
|
493
|
+
"""
|
|
494
|
+
Perform phase segmentation on the dataset using the specified clustering mode.
|
|
525
495
|
|
|
496
|
+
Parameters:
|
|
497
|
+
- dataset: The dataset to be segmented.
|
|
498
|
+
- mode: The clustering mode to use ('kmeans' or other).
|
|
499
|
+
- number_of_phases: The number of phases (clusters) to identify.
|
|
526
500
|
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
501
|
+
Returns:
|
|
502
|
+
None. The results are stored in the dataset's metadata.
|
|
503
|
+
"""
|
|
504
|
+
x_vec = np.array(dataset).reshape(dataset.shape[0]*dataset.shape[1], dataset.shape[2])
|
|
505
|
+
x_vec = np.divide(x_vec.T, x_vec.sum(axis=1)).T
|
|
530
506
|
if mode != 'kmeans':
|
|
531
|
-
|
|
507
|
+
#choose number of components
|
|
508
|
+
gmm = sklearn.mixture.GaussianMixture(n_components=number_of_phases, covariance_type="full")
|
|
532
509
|
|
|
533
|
-
gmm_results = gmm.fit(np.array(
|
|
534
|
-
gmm_labels = gmm_results.fit_predict(
|
|
510
|
+
gmm_results = gmm.fit(np.array(x_vec)) #we can intelligently fold the data and perform GM
|
|
511
|
+
gmm_labels = gmm_results.fit_predict(x_vec)
|
|
535
512
|
|
|
536
|
-
dataset.metadata['gaussian_mixing_model'] = {'map': gmm_labels.reshape(dataset.shape[0],
|
|
513
|
+
dataset.metadata['gaussian_mixing_model'] = {'map': gmm_labels.reshape(dataset.shape[0],
|
|
514
|
+
dataset.shape[1]),
|
|
537
515
|
'covariances': gmm.covariances_,
|
|
538
516
|
'weights': gmm.weights_,
|
|
539
517
|
'means': gmm_results.means_}
|
|
540
518
|
else:
|
|
541
|
-
km = KMeans(number_of_phases, n_init =10) #choose number of clusters
|
|
542
|
-
km_results = km.fit(np.array(
|
|
543
|
-
dataset.metadata['kmeans'] = {'map': km_results.labels_.reshape(dataset.shape[0],
|
|
519
|
+
km = sklearn.cluster.KMeans(number_of_phases, n_init =10) #choose number of clusters
|
|
520
|
+
km_results = km.fit(np.array(x_vec)) #we can intelligently fold the data and perform Kmeans
|
|
521
|
+
dataset.metadata['kmeans'] = {'map': km_results.labels_.reshape(dataset.shape[0],
|
|
522
|
+
dataset.shape[1]),
|
|
544
523
|
'means': km_results.cluster_centers_}
|
|
545
524
|
|
|
546
525
|
def plot_phases(dataset, image=None, survey_image=None):
|
|
526
|
+
"""
|
|
527
|
+
Plot the phase maps and corresponding spectra from the dataset.
|
|
528
|
+
|
|
529
|
+
Parameters:
|
|
530
|
+
- dataset: The dataset containing phase information.
|
|
531
|
+
- image: Optional. The image to overlay the phase map on.
|
|
532
|
+
- survey_image: Optional. A survey image to display alongside the phase maps.
|
|
533
|
+
"""
|
|
547
534
|
if survey_image is not None:
|
|
548
535
|
ncols = 3
|
|
549
536
|
else:
|
|
@@ -553,19 +540,17 @@ def plot_phases(dataset, image=None, survey_image=None):
|
|
|
553
540
|
if survey_image is not None:
|
|
554
541
|
im = axes[0].imshow(survey_image.T)
|
|
555
542
|
axis_index += 1
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
phase_spectra = dataset.metadata['kmeans']['means']
|
|
562
|
-
map = dataset.metadata['kmeans']['map']
|
|
543
|
+
|
|
544
|
+
if 'kmeans' not in dataset.metadata:
|
|
545
|
+
raise ValueError('No phase information found, run get_phases first')
|
|
546
|
+
phase_spectra = dataset.metadata['kmeans']['means']
|
|
547
|
+
map_data = dataset.metadata['kmeans']['map']
|
|
563
548
|
|
|
564
549
|
cmap = plt.get_cmap('jet', len(phase_spectra))
|
|
565
550
|
im = axes[axis_index].imshow(image.T,cmap='gray')
|
|
566
|
-
im = axes[axis_index].imshow(
|
|
567
|
-
vmax=np.max(
|
|
568
|
-
|
|
551
|
+
im = axes[axis_index].imshow(map_data.T, cmap=cmap,vmin=np.min(map_data) - 0.5,
|
|
552
|
+
vmax=np.max(map_data) + 0.5,alpha=0.2)
|
|
553
|
+
|
|
569
554
|
cbar = fig.colorbar(im, ax=axes[axis_index])
|
|
570
555
|
cbar.ax.set_yticks(np.arange(0, len(phase_spectra) ))
|
|
571
556
|
cbar.ax.set_ylabel("GMM Phase", fontsize = 14)
|
|
@@ -580,30 +565,262 @@ def plot_phases(dataset, image=None, survey_image=None):
|
|
|
580
565
|
|
|
581
566
|
|
|
582
567
|
def plot_lines(eds_quantification: dict, axis: plt.Axes):
|
|
568
|
+
"""
|
|
569
|
+
Plot EDS line strengths on the given matplotlib axis.
|
|
570
|
+
|
|
571
|
+
Parameters:
|
|
572
|
+
- eds_quantification: A dictionary containing EDS line data.
|
|
573
|
+
- axis: A matplotlib Axes object where the lines will be plotted.
|
|
574
|
+
"""
|
|
575
|
+
colors = plt.get_cmap('Dark2').colors # jet(np.linspace(0, 1, 10))
|
|
576
|
+
|
|
577
|
+
index = 0
|
|
583
578
|
for key, lines in eds_quantification.items():
|
|
579
|
+
color = colors[index % len(colors)]
|
|
584
580
|
if 'K-family' in lines:
|
|
585
581
|
intensity = lines['K-family']['height']
|
|
586
582
|
for line in lines['K-family']:
|
|
587
583
|
if line[0] == 'K':
|
|
588
584
|
pos = lines['K-family'][line]['position']
|
|
589
|
-
axis.plot([pos,pos], [0, intensity*lines['K-family'][line]['weight']],
|
|
590
|
-
|
|
591
|
-
|
|
585
|
+
axis.plot([pos,pos], [0, intensity*lines['K-family'][line]['weight']],
|
|
586
|
+
color=color)
|
|
587
|
+
if line == lines['K-family']['main']:
|
|
588
|
+
axis.text(pos,0, key+'\n'+line, verticalalignment='top', color=color)
|
|
592
589
|
|
|
593
590
|
if 'L-family' in lines:
|
|
594
591
|
intensity = lines['L-family']['height']
|
|
595
592
|
for line in lines['L-family']:
|
|
596
593
|
if line[0] == 'L':
|
|
597
594
|
pos = lines['L-family'][line]['position']
|
|
598
|
-
axis.plot([pos,pos], [0, intensity*lines['L-family'][line]['weight']],
|
|
599
|
-
|
|
600
|
-
|
|
595
|
+
axis.plot([pos,pos], [0, intensity*lines['L-family'][line]['weight']],
|
|
596
|
+
color=color)
|
|
597
|
+
if line in [lines['L-family']['main'], 'L3-M5', 'L3-N5', 'L1-M3']:
|
|
598
|
+
axis.text(pos,0, key+'\n'+line, verticalalignment='top', color=color)
|
|
601
599
|
|
|
602
600
|
if 'M-family' in lines:
|
|
603
601
|
intensity = lines['M-family']['height']
|
|
604
602
|
for line in lines['M-family']:
|
|
605
603
|
if line[0] == 'M':
|
|
606
604
|
pos = lines['M-family'][line]['position']
|
|
607
|
-
axis.plot([pos,pos],
|
|
608
|
-
|
|
609
|
-
|
|
605
|
+
axis.plot([pos,pos],
|
|
606
|
+
[0, intensity*lines['M-family'][line]['weight']],
|
|
607
|
+
color=color)
|
|
608
|
+
if line in [lines['M-family']['main'], 'M5-N7', 'M4-N6']:
|
|
609
|
+
axis.text(pos,0, key+'\n'+line, verticalalignment='top', color=color)
|
|
610
|
+
|
|
611
|
+
index +=1
|
|
612
|
+
index = index % 10
|
|
613
|
+
|
|
614
|
+
|
|
615
|
+
def get_eds_xsection(x_section, energy_scale, start_bgd, end_bgd):
|
|
616
|
+
"""
|
|
617
|
+
Calculate the EDS cross-section by subtracting the background and zeroing out
|
|
618
|
+
values outside the specified energy range.
|
|
619
|
+
The processed cross-section data with background removed
|
|
620
|
+
and values outside the energy range set to zero.
|
|
621
|
+
|
|
622
|
+
Parameters:
|
|
623
|
+
- x_section: The raw cross-section data.
|
|
624
|
+
- energy_scale: The energy scale corresponding to the cross-section data.
|
|
625
|
+
- start_bgd: The start energy for background calculation.
|
|
626
|
+
- end_bgd: The end energy for background calculation.
|
|
627
|
+
|
|
628
|
+
Returns:
|
|
629
|
+
- cross_section_core: np.array
|
|
630
|
+
"""
|
|
631
|
+
background = pyTEMlib.eels_tools.power_law_background(x_section, energy_scale,
|
|
632
|
+
[start_bgd, end_bgd], verbose=False)
|
|
633
|
+
cross_section_core = x_section- background[0]
|
|
634
|
+
cross_section_core[cross_section_core < 0] = 0.0
|
|
635
|
+
cross_section_core[energy_scale < end_bgd] = 0.0
|
|
636
|
+
return cross_section_core
|
|
637
|
+
|
|
638
|
+
|
|
639
|
+
def add_k_factors(element_dict, element, k_factors):
|
|
640
|
+
"""Add k-factors to element dictionary."""
|
|
641
|
+
family = element_dict.get('K-family', {})
|
|
642
|
+
line = k_factors.get(element, {}).get('Ka1', False)
|
|
643
|
+
if not line:
|
|
644
|
+
line = k_factors.get(element, {}).get('Ka2', False)
|
|
645
|
+
if line and family:
|
|
646
|
+
family['k_factor'] = float(line)
|
|
647
|
+
|
|
648
|
+
family = element_dict.get('L-family', False)
|
|
649
|
+
line = k_factors.get(element, {}).get('La1', False)
|
|
650
|
+
if line and family:
|
|
651
|
+
family['k_factor'] = float(line)
|
|
652
|
+
|
|
653
|
+
family = element_dict.get('M-family', False)
|
|
654
|
+
line = k_factors.get(element, {}).get('Ma1', False)
|
|
655
|
+
if line and family:
|
|
656
|
+
family['k_factor'] = float(line)
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
def quantify_EDS(spectrum, quantification_dict=None, mask=['Cu'] ):
|
|
660
|
+
"""Calculate quantification for EDS spectrum with either k-factors or cross sections."""
|
|
661
|
+
|
|
662
|
+
for key in spectrum.metadata['EDS']:
|
|
663
|
+
element = 0
|
|
664
|
+
if isinstance(spectrum.metadata['EDS'][key], dict) and key in elements_list:
|
|
665
|
+
element = spectrum.metadata['EDS'][key].get('Z', 0)
|
|
666
|
+
if element < 1:
|
|
667
|
+
continue
|
|
668
|
+
if quantification_dict is None:
|
|
669
|
+
quantification_dict = {}
|
|
670
|
+
|
|
671
|
+
edge_info = pyTEMlib.eels_tools.get_x_sections(element)
|
|
672
|
+
spectrum.metadata['EDS'][key]['atomic_weight'] = edge_info['atomic_weight']
|
|
673
|
+
spectrum.metadata['EDS'][key]['nominal_density'] = edge_info['nominal_density']
|
|
674
|
+
|
|
675
|
+
for family, item in edge_info['fluorescent_yield'].items():
|
|
676
|
+
spectrum.metadata['EDS'][key][f"{family}-family"
|
|
677
|
+
]['fluorescent_yield'] = item
|
|
678
|
+
if quantification_dict.get('metadata', {}).get('type', '') == 'k_factor':
|
|
679
|
+
k_factors = quantification_dict.get('table', {})
|
|
680
|
+
add_k_factors(spectrum.metadata['EDS'][key], key, k_factors)
|
|
681
|
+
|
|
682
|
+
if quantification_dict is None:
|
|
683
|
+
print('using cross sections for quantification')
|
|
684
|
+
quantify_cross_section(spectrum, None, mask=mask)
|
|
685
|
+
elif not isinstance(quantification_dict, dict):
|
|
686
|
+
pass
|
|
687
|
+
elif quantification_dict.get('metadata', {}).get('type', '') == 'k_factor':
|
|
688
|
+
print('using k-factors for quantification')
|
|
689
|
+
quantification_k_factors(spectrum, mask=mask) # , quantification_dict['table'],
|
|
690
|
+
elif quantification_dict.get('metadata', {}).get('type', '') == 'cross_section':
|
|
691
|
+
print('using cross sections for quantification')
|
|
692
|
+
quantify_cross_section(spectrum, quantification_dict['table'], mask=mask)
|
|
693
|
+
else:
|
|
694
|
+
print('using cross sections for quantification')
|
|
695
|
+
quantify_cross_section(spectrum, None, mask=mask)
|
|
696
|
+
# print('Need either k-factor or cross section dictionary')
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
def read_esl_k_factors(filename, reduced=False):
|
|
700
|
+
""" Read k-factors from esl file."""
|
|
701
|
+
k_factors = {}
|
|
702
|
+
if not os.path.isfile(filename):
|
|
703
|
+
print('k-factor file not found', filename)
|
|
704
|
+
return None, 'k_factors_Bruker_15keV.json'
|
|
705
|
+
tree = xml.etree.ElementTree.parse(filename)
|
|
706
|
+
root = tree.getroot()
|
|
707
|
+
k_dict = pyTEMlib.file_reader.etree_to_dict(root)
|
|
708
|
+
k_dict = k_dict.get('TRTStandardLibrary', {})
|
|
709
|
+
k_factor_dict = (k_dict.get('ClassInstance', {}).get('CliffLorimerFactors', {}))
|
|
710
|
+
for index, item in enumerate(k_factor_dict.get('K_Factors', '').split(',')):
|
|
711
|
+
if index < 84:
|
|
712
|
+
if item.strip() != '0':
|
|
713
|
+
k_factors[elements[index]] = {'Ka1': float(item)}
|
|
714
|
+
else:
|
|
715
|
+
k_factors[elements[index]] = {}
|
|
716
|
+
for index, item in enumerate(k_factor_dict.get('L_Factors', '').split(',')):
|
|
717
|
+
if index < 84:
|
|
718
|
+
if item.strip() != '0':
|
|
719
|
+
k_factors[elements[index]]['La1'] = float(item)
|
|
720
|
+
for index, item in enumerate(k_factor_dict.get('M_Factors', '').split(',')):
|
|
721
|
+
if index < 84:
|
|
722
|
+
if item.strip() != '0':
|
|
723
|
+
k_factors[elements[index]]['Ma1'] = float(item)
|
|
724
|
+
primary = int(float(k_dict.get('ClassInstance', {}).get('Header', {}).get('PrimaryEnergy', 0)))
|
|
725
|
+
name = f'k_factors_Bruker_{primary}keV.json'
|
|
726
|
+
metadata = {'origin': 'pyTEMlib',
|
|
727
|
+
'source_file': filename,
|
|
728
|
+
'reduced': reduced,
|
|
729
|
+
'version': pyTEMlib.__version__,
|
|
730
|
+
'type': 'k-factors',
|
|
731
|
+
'spectroscopy': 'EDS',
|
|
732
|
+
'acceleration_voltage': primary,
|
|
733
|
+
'microscope': 'Bruker',
|
|
734
|
+
'name': name}
|
|
735
|
+
return k_factors, metadata
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
def read_csv_k_factors(filename, reduced=True):
|
|
739
|
+
""" Read k-factors from csv file of ThermoFisher TEMs."""
|
|
740
|
+
k_factors = {}
|
|
741
|
+
with open(filename, newline='', encoding='utf-8') as csvfile:
|
|
742
|
+
reader = csv.reader(csvfile, delimiter=',', quotechar='|')
|
|
743
|
+
start = True
|
|
744
|
+
for row in reader:
|
|
745
|
+
if start:
|
|
746
|
+
k_column = row.index('K-factor')
|
|
747
|
+
start = False
|
|
748
|
+
else:
|
|
749
|
+
element, line = row[0].split('-')
|
|
750
|
+
if element not in k_factors:
|
|
751
|
+
k_factors[element] = {}
|
|
752
|
+
if reduced:
|
|
753
|
+
if line[-1:] == '1':
|
|
754
|
+
k_factors[element][line] = row[k_column]
|
|
755
|
+
else:
|
|
756
|
+
k_factors[element][line] = row[k_column]
|
|
757
|
+
metadata = {'origin': 'pyTEMlib',
|
|
758
|
+
'source_file': filename,
|
|
759
|
+
'reduced': reduced,
|
|
760
|
+
'microscope': 'ThermoFisher',
|
|
761
|
+
'acceleration_voltage': 200000,
|
|
762
|
+
'version': pyTEMlib.__version__,
|
|
763
|
+
'type': 'k-factors',
|
|
764
|
+
'spectroscopy': 'EDS',
|
|
765
|
+
'name': 'k_factors_Thermo_200keV.json'}
|
|
766
|
+
return k_factors, metadata
|
|
767
|
+
|
|
768
|
+
|
|
769
|
+
def convert_k_factor_file(file_name, reduced=True, new_name=None):
|
|
770
|
+
""" Convert k-factor file to a dictionary."""
|
|
771
|
+
if not os.path.isfile(file_name):
|
|
772
|
+
print('k-factor file not found', file_name)
|
|
773
|
+
return None
|
|
774
|
+
_, filename = os.path.split(file_name)
|
|
775
|
+
_, extension = os.path.splitext(filename)
|
|
776
|
+
if extension == '.csv':
|
|
777
|
+
k_factors, metadata = read_csv_k_factors(file_name, reduced=reduced)
|
|
778
|
+
elif extension == '.esl':
|
|
779
|
+
k_factors, metadata = read_esl_k_factors(file_name)
|
|
780
|
+
else:
|
|
781
|
+
print('unknown k-factor file format', extension)
|
|
782
|
+
return None
|
|
783
|
+
if new_name is None:
|
|
784
|
+
new_name = metadata['name']
|
|
785
|
+
write_k_factors(k_factors, metadata, file_name=new_name)
|
|
786
|
+
return k_factors, metadata
|
|
787
|
+
|
|
788
|
+
|
|
789
|
+
def get_k_factor_files():
|
|
790
|
+
""" Get list of k-factor files in the .pyTEMlib folder."""
|
|
791
|
+
k_factor_files = []
|
|
792
|
+
for file_name in os.listdir(config_path):
|
|
793
|
+
if 'k_factors' in file_name:
|
|
794
|
+
k_factor_files.append(file_name)
|
|
795
|
+
return k_factor_files
|
|
796
|
+
|
|
797
|
+
|
|
798
|
+
def write_k_factors(k_factors, metadata, file_name='k_factors.json'):
|
|
799
|
+
""" Write k-factors to a json file."""
|
|
800
|
+
file_name = os.path.join(config_path, file_name)
|
|
801
|
+
save_dict = {"table" : k_factors, "metadata" : metadata}
|
|
802
|
+
with open(file_name, "w", encoding='utf-8') as json_file:
|
|
803
|
+
json.dump(save_dict, json_file, indent=4, encoding='utf-8')
|
|
804
|
+
|
|
805
|
+
|
|
806
|
+
def read_k_factors(file_name='k_factors.json'):
|
|
807
|
+
""" Read k-factors from a json file."""
|
|
808
|
+
if not os.path.isfile(os.path.join(config_path, file_name)):
|
|
809
|
+
print('k-factor file not found', file_name)
|
|
810
|
+
return None
|
|
811
|
+
with open(os.path.join(config_path, file_name), 'r', encoding='utf-8') as json_file:
|
|
812
|
+
table, metadata = json.load(json_file)
|
|
813
|
+
return table, metadata
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
def load_k_factors(reduced=True):
|
|
817
|
+
""" Load k-factors from csv files in the .pyTEMlib folder."""
|
|
818
|
+
k_factors = {}
|
|
819
|
+
metadata = {}
|
|
820
|
+
data_path = os.path.join(os.path.expanduser('~'), '.pyTEMlib')
|
|
821
|
+
for file_name in os.listdir(data_path):
|
|
822
|
+
if 'k-factors' in file_name:
|
|
823
|
+
path = os.path.join(data_path, file_name)
|
|
824
|
+
k_factors, metadata = read_csv_k_factors(path, reduced=reduced)
|
|
825
|
+
metadata['type'] = 'k_factor'
|
|
826
|
+
return {'table': k_factors, 'metadata': metadata}
|