pyTEMlib 0.2025.4.1__py3-none-any.whl → 0.2025.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

Files changed (94) hide show
  1. build/lib/pyTEMlib/__init__.py +33 -0
  2. build/lib/pyTEMlib/animation.py +640 -0
  3. build/lib/pyTEMlib/atom_tools.py +238 -0
  4. build/lib/pyTEMlib/config_dir.py +31 -0
  5. build/lib/pyTEMlib/crystal_tools.py +1219 -0
  6. build/lib/pyTEMlib/diffraction_plot.py +756 -0
  7. build/lib/pyTEMlib/dynamic_scattering.py +293 -0
  8. build/lib/pyTEMlib/eds_tools.py +826 -0
  9. build/lib/pyTEMlib/eds_xsections.py +432 -0
  10. build/lib/pyTEMlib/eels_tools/__init__.py +44 -0
  11. build/lib/pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  12. build/lib/pyTEMlib/eels_tools/eels_database.py +134 -0
  13. build/lib/pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  14. build/lib/pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  15. build/lib/pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  16. build/lib/pyTEMlib/file_reader.py +274 -0
  17. build/lib/pyTEMlib/file_tools.py +811 -0
  18. build/lib/pyTEMlib/get_bote_salvat.py +69 -0
  19. build/lib/pyTEMlib/graph_tools.py +1153 -0
  20. build/lib/pyTEMlib/graph_viz.py +599 -0
  21. build/lib/pyTEMlib/image/__init__.py +37 -0
  22. build/lib/pyTEMlib/image/image_atoms.py +270 -0
  23. build/lib/pyTEMlib/image/image_clean.py +197 -0
  24. build/lib/pyTEMlib/image/image_distortion.py +299 -0
  25. build/lib/pyTEMlib/image/image_fft.py +277 -0
  26. build/lib/pyTEMlib/image/image_graph.py +926 -0
  27. build/lib/pyTEMlib/image/image_registration.py +316 -0
  28. build/lib/pyTEMlib/image/image_utilities.py +309 -0
  29. build/lib/pyTEMlib/image/image_window.py +421 -0
  30. build/lib/pyTEMlib/image_tools.py +699 -0
  31. build/lib/pyTEMlib/interactive_image.py +1 -0
  32. build/lib/pyTEMlib/kinematic_scattering.py +1196 -0
  33. build/lib/pyTEMlib/microscope.py +61 -0
  34. build/lib/pyTEMlib/probe_tools.py +906 -0
  35. build/lib/pyTEMlib/sidpy_tools.py +153 -0
  36. build/lib/pyTEMlib/simulation_tools.py +104 -0
  37. build/lib/pyTEMlib/test.py +437 -0
  38. build/lib/pyTEMlib/utilities.py +314 -0
  39. build/lib/pyTEMlib/version.py +5 -0
  40. build/lib/pyTEMlib/xrpa_x_sections.py +20976 -0
  41. pyTEMlib/__init__.py +25 -3
  42. pyTEMlib/animation.py +31 -22
  43. pyTEMlib/atom_tools.py +29 -34
  44. pyTEMlib/config_dir.py +2 -28
  45. pyTEMlib/crystal_tools.py +129 -165
  46. pyTEMlib/eds_tools.py +559 -342
  47. pyTEMlib/eds_xsections.py +432 -0
  48. pyTEMlib/eels_tools/__init__.py +44 -0
  49. pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  50. pyTEMlib/eels_tools/eels_database.py +134 -0
  51. pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  52. pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  53. pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  54. pyTEMlib/file_reader.py +274 -0
  55. pyTEMlib/file_tools.py +260 -1130
  56. pyTEMlib/get_bote_salvat.py +69 -0
  57. pyTEMlib/graph_tools.py +101 -174
  58. pyTEMlib/graph_viz.py +150 -0
  59. pyTEMlib/image/__init__.py +37 -0
  60. pyTEMlib/image/image_atoms.py +270 -0
  61. pyTEMlib/image/image_clean.py +197 -0
  62. pyTEMlib/image/image_distortion.py +299 -0
  63. pyTEMlib/image/image_fft.py +277 -0
  64. pyTEMlib/image/image_graph.py +926 -0
  65. pyTEMlib/image/image_registration.py +316 -0
  66. pyTEMlib/image/image_utilities.py +309 -0
  67. pyTEMlib/image/image_window.py +421 -0
  68. pyTEMlib/image_tools.py +154 -915
  69. pyTEMlib/kinematic_scattering.py +1 -1
  70. pyTEMlib/probe_tools.py +1 -1
  71. pyTEMlib/test.py +437 -0
  72. pyTEMlib/utilities.py +314 -0
  73. pyTEMlib/version.py +2 -3
  74. pyTEMlib/xrpa_x_sections.py +14 -10
  75. {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/METADATA +13 -16
  76. pytemlib-0.2025.9.1.dist-info/RECORD +86 -0
  77. {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/WHEEL +1 -1
  78. pytemlib-0.2025.9.1.dist-info/top_level.txt +6 -0
  79. pyTEMlib/core_loss_widget.py +0 -721
  80. pyTEMlib/eels_dialog.py +0 -754
  81. pyTEMlib/eels_dialog_utilities.py +0 -1199
  82. pyTEMlib/eels_tools.py +0 -2359
  83. pyTEMlib/file_tools_qt.py +0 -193
  84. pyTEMlib/image_dialog.py +0 -158
  85. pyTEMlib/image_dlg.py +0 -146
  86. pyTEMlib/info_widget.py +0 -1086
  87. pyTEMlib/info_widget3.py +0 -1120
  88. pyTEMlib/low_loss_widget.py +0 -479
  89. pyTEMlib/peak_dialog.py +0 -1129
  90. pyTEMlib/peak_dlg.py +0 -286
  91. pytemlib-0.2025.4.1.dist-info/RECORD +0 -38
  92. pytemlib-0.2025.4.1.dist-info/top_level.txt +0 -1
  93. {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/entry_points.txt +0 -0
  94. {pytemlib-0.2025.4.1.dist-info → pytemlib-0.2025.9.1.dist-info}/licenses/LICENSE +0 -0
pyTEMlib/eels_tools.py DELETED
@@ -1,2359 +0,0 @@
1
- """
2
- eels_tools
3
- Model based quantification of electron energy-loss data
4
- Copyright by Gerd Duscher
5
-
6
- The University of Tennessee, Knoxville
7
- Department of Materials Science & Engineering
8
-
9
- Sources:
10
- M. Tian et al.
11
-
12
- Units:
13
- everything is in SI units, except length is given in nm and angles in mrad.
14
-
15
- Usage:
16
- See the notebooks for examples of these routines
17
-
18
- All the input and output is done through a dictionary which is to be found in the meta_data
19
- attribute of the sidpy.Dataset
20
-
21
- Update by Austin Houston, UTK 12-2023 : Parallization of spectrum images
22
- """
23
- import typing
24
- from typing import Union
25
- import numpy as np
26
- import matplotlib.pyplot as plt
27
-
28
- import scipy
29
- from scipy import constants
30
- from scipy import interpolate
31
- from scipy.interpolate import interp1d, splrep
32
- from scipy.signal import peak_prominences
33
- from scipy.ndimage import gaussian_filter
34
- from scipy.optimize import curve_fit, leastsq
35
-
36
- from numba import jit, float64
37
-
38
- import requests
39
-
40
- # ## And we use the image tool library of pyTEMlib
41
- from pyTEMlib.xrpa_x_sections import x_sections
42
-
43
- import sidpy
44
- from sidpy.proc.fitter import SidFitter
45
-
46
- # we have a function called find peaks - is it necessary?
47
- # or could we just use scipy.signal import find_peaks
48
-
49
- major_edges = ['K1', 'L3', 'M5', 'N5']
50
- all_edges = ['K1', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5', 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2',
51
- 'O3', 'O4', 'O5', 'O6', 'O7', 'P1', 'P2', 'P3']
52
- first_close_edges = ['K1', 'L3', 'M5', 'M3', 'N5', 'N3']
53
-
54
- elements = [' ', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na',
55
- 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V',
56
- 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br',
57
- 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag',
58
- 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr',
59
- 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',
60
- 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi']
61
-
62
-
63
- # kroeger_core(e_data,a_data,eps_data,ee,thick, relativistic =True)
64
- # kroeger_core2(e_data,a_data,eps_data,acceleration_voltage_kev,thickness, relativistic =True)
65
- # get_wave_length(e0)
66
-
67
- # plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef = 4., ep= 16.8, Es = 0, IBT = [])
68
- # drude(tags, e, ep, ew, tnm, eb)
69
- # drude(ep, eb, gamma, e)
70
- # drude_lorentz(epsInf,leng, ep, eb, gamma, e, Amplitude)
71
- # zl_func( p, x)
72
- # ###############################################################
73
- # Utility Functions
74
- # ################################################################
75
-
76
- def get_wave_length(e0):
77
- """get deBroglie wavelength of electron accelerated by energy (in eV) e0"""
78
-
79
- ev = constants.e * e0
80
- return constants.h / np.sqrt(2 * constants.m_e * ev * (1 + ev / (2 * constants.m_e * constants.c ** 2)))
81
-
82
-
83
- def effective_collection_angle(energy_scale, alpha, beta, beam_kv):
84
- """Calculates the effective collection angle in mrad:
85
-
86
- Translate from original Fortran program
87
- Calculates the effective collection angle in mrad:
88
- Parameter
89
- ---------
90
- energy_scale: numpy array
91
- first and last energy loss of spectrum in eV
92
- alpha: float
93
- convergence angle in mrad
94
- beta: float
95
- collection angle in mrad
96
- beamKV: float
97
- acceleration voltage in V
98
-
99
- Returns
100
- -------
101
- eff_beta: float
102
- effective collection angle in mrad
103
-
104
- # function y = effbeta(ene, alpha, beta, beam_kv)
105
- #
106
- # This program computes etha(alpha,beta), that is the collection
107
- # efficiency associated to the following geometry :
108
- #
109
- # alpha = half angle of illumination (0 -> pi/2)
110
- # beta = half angle of collection (0 -> pi/2)
111
- # (pi/2 = 1570.795 mrad)
112
- #
113
- # A constant angular distribution of incident electrons is assumed
114
- # for any incident angle (-alpha,alpha). These electrons imping the
115
- # target and a single energy-loss event occurs, with a characteristic
116
- # angle theta-e (relativistic). The angular distribution of the
117
- # electrons after the target is analytically derived.
118
- # This program integrates this distribution from theta=0 up to
119
- # theta=beta with an adjustable angular step.
120
- # This program also computes beta* which is the theoretical
121
- # collection angle which would give the same value of etha(alpha,beta)
122
- # with a parallel incident beam.
123
- #
124
- # subroutines and function subprograms required
125
- # ---------------------------------------------
126
- # none
127
- #
128
- # comments
129
- # --------
130
- #
131
- # The following parameters are asked as input :
132
- # accelerating voltage (kV), energy loss range (eV) for the study,
133
- # energy loss step (eV) in this range, alpha (mrad), beta (mrad).
134
- # The program returns for each energy loss step :
135
- # alpha (mrad), beta (mrad), theta-e (relativistic) (mrad),
136
- # energy loss (eV), etha (#), beta * (mrad)
137
- #
138
- # author :
139
- # --------
140
- # Pierre TREBBIA
141
- # US 41 : "Microscopie Electronique Analytique Quantitative"
142
- # Laboratoire de Physique des Solides, Bat. 510
143
- # Universite Paris-Sud, F91405 ORSAY Cedex
144
- # Phone : (33-1) 69 41 53 68
145
- #
146
- """
147
- if beam_kv == 0:
148
- beam_kv = 100.0
149
-
150
- if alpha == 0:
151
- return beta
152
-
153
- if beta == 0:
154
- return alpha
155
-
156
- z1 = beam_kv # eV
157
- z2 = energy_scale[0]
158
- z3 = energy_scale[-1]
159
- z4 = 100.0
160
-
161
- z5 = alpha * 0.001 # rad
162
- z6 = beta * 0.001 # rad
163
- z7 = 500.0 # number of integration steps to be modified at will
164
-
165
- # main loop on energy loss
166
- #
167
- for zx in range(int(z2), int(z3), int(z4)): # ! zx = current energy loss
168
- eta = 0.0
169
- x0 = float(zx) * (z1 + 511060.) / (z1 * (z1 + 1022120.)) # x0 = relativistic theta-e
170
- x1 = np.pi / (2. * x0)
171
- x2 = x0 * x0 + z5 * z5
172
- x3 = z5 / x0 * z5 / x0
173
- x4 = 0.1 * np.sqrt(x2)
174
- dtheta = (z6 - x4) / z7
175
- #
176
- # calculation of the analytical expression
177
- #
178
- for zi in range(1, int(z7)):
179
- theta = x4 + dtheta * float(zi)
180
- x5 = theta * theta
181
- x6 = 4. * x5 * x0 * x0
182
- x7 = x2 - x5
183
- x8 = np.sqrt(x7 * x7 + x6)
184
- x9 = (x8 + x7) / (2. * x0 * x0)
185
- x10 = 2. * theta * dtheta * np.log(x9)
186
- eta = eta + x10
187
-
188
- eta = eta + x2 / 100. * np.log(1. + x3) # addition of the central contribution
189
- x4 = z5 * z5 * np.log(1. + x1 * x1) # normalisation
190
- eta = eta / x4
191
- #
192
- # correction by geometrical factor (beta/alpha)**2
193
- #
194
- if z6 < z5:
195
- x5 = z5 / z6
196
- eta = eta * x5 * x5
197
-
198
- etha2 = eta * 100.
199
- #
200
- # calculation of beta *
201
- #
202
- x6 = np.power((1. + x1 * x1), eta)
203
- x7 = x0 * np.sqrt(x6 - 1.)
204
- beta = x7 * 1000. # in mrad
205
-
206
- return beta
207
-
208
-
209
- def set_default_metadata(current_dataset: sidpy.Dataset) -> None:
210
-
211
- if 'experiment' not in current_dataset.metadata:
212
- current_dataset.metadata['experiment'] = {}
213
- if 'convergence_angle' not in current_dataset.metadata['experiment']:
214
- current_dataset.metadata['experiment']['convergence_angle'] = 30
215
- if 'collection_angle' not in current_dataset.metadata['experiment']:
216
- current_dataset.metadata['experiment']['collection_angle'] = 50
217
- if 'acceleration_voltage' not in current_dataset.metadata['experiment']:
218
- current_dataset.metadata['experiment']['acceleration_voltage'] = 200000
219
-
220
- ###
221
-
222
- # ###############################################################
223
- # Peak Fit Functions
224
- # ################################################################
225
-
226
-
227
- def residuals_smooth(p, x, y, only_positive_intensity):
228
- """part of fit"""
229
-
230
- err = (y - model_smooth(x, p, only_positive_intensity))
231
- return err
232
-
233
-
234
- def model_smooth(x, p, only_positive_intensity=False):
235
- """part of fit"""
236
-
237
- y = np.zeros(len(x))
238
-
239
- number_of_peaks = int(len(p) / 3)
240
- for i in range(number_of_peaks):
241
- if only_positive_intensity:
242
- p[i * 3 + 1] = abs(p[i * 3 + 1])
243
- p[i * 3 + 2] = abs(p[i * 3 + 2])
244
- if p[i * 3 + 2] > abs(p[i * 3]) * 4.29193 / 2.0:
245
- p[i * 3 + 2] = abs(p[i * 3]) * 4.29193 / 2. # ## width cannot extend beyond zero, maximum is FWTM/2
246
-
247
- y = y + gauss(x, p[i * 3:])
248
-
249
- return y
250
-
251
- @jit
252
- def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
253
- """Gaussian Function
254
-
255
- p[0]==mean, p[1]= amplitude p[2]==fwhm
256
- area = np.sqrt(2* np.pi)* p[1] * np.abs(p[2] / 2.3548)
257
- FWHM = 2 * np.sqrt(2 np.log(2)) * sigma = 2.3548 * sigma
258
- sigma = FWHM/3548
259
- """
260
- if p[2] == 0:
261
- return x * 0.
262
- else:
263
- return p[1] * np.exp(-(x - p[0]) ** 2 / (2.0 * (p[2] / 2.3548) ** 2))
264
-
265
-
266
- def lorentz(x, center, amplitude, width):
267
- """ Lorentzian Function """
268
- lorentz_peak = 0.5 * width / np.pi / ((x - center) ** 2 + (width / 2) ** 2)
269
- return amplitude * lorentz_peak / lorentz_peak.max()
270
-
271
- def zero_loss_function(x, p):
272
- return zl_func(x, *p)
273
-
274
- def zl_func(x, center1, amplitude1, width1, center2, amplitude2, width2):
275
- """ zero loss function as product of two lorentzians """
276
- return lorentz(x, center1, amplitude1, width1) * lorentz(x, center2, amplitude2, width2)
277
-
278
-
279
- def zl(x, p, p_zl):
280
- """zero-loss function"""
281
- p_zl_local = p_zl.copy()
282
- p_zl_local[2] += p[0]
283
- p_zl_local[5] += p[0]
284
- zero_loss = zl_func(x, p_zl_local)
285
- return p[1] * zero_loss / zero_loss.max()
286
-
287
-
288
- def get_channel_zero(spectrum: np.ndarray, energy: np.ndarray, width: int = 8):
289
- """Determin shift of energy scale according to zero-loss peak position
290
-
291
- This function assumes that the zero loss peak is the maximum of the spectrum.
292
- """
293
-
294
- zero = scipy.signal.find_peaks(spectrum/np.max(spectrum), height=0.98)[0][0]
295
- width = int(width/2)
296
- x = np.array(energy[int(zero-width):int(zero+width)])
297
- y = np.array(spectrum[int(zero-width):int(zero+width)]).copy()
298
-
299
- y[np.nonzero(y <= 0)] = 1e-12
300
-
301
- p0 = [energy[zero], spectrum.max(), .5] # Initial guess is a normal distribution
302
-
303
- def errfunc(pp, xx, yy):
304
- return (gauss(xx, pp) - yy) / np.sqrt(yy) # Distance to the target function
305
-
306
- [p1, _] = leastsq(errfunc, np.array(p0[:]), args=(x, y))
307
- fit_mu, area, fwhm = p1
308
-
309
- return fwhm, fit_mu
310
-
311
-
312
- def get_zero_loss_energy(dataset):
313
-
314
- spectrum = dataset.sum(axis=tuple(range(dataset.ndim - 1)))
315
-
316
- startx = scipy.signal.find_peaks(spectrum/np.max(spectrum), height=0.98)[0][0]
317
-
318
- end = startx + 3
319
- start = startx - 3
320
- for i in range(10):
321
- if spectrum[startx - i] < 0.3 * spectrum[startx]:
322
- start = startx - i
323
- if spectrum[startx + i] < 0.3 * spectrum[startx]:
324
- end = startx + i
325
- if end - start < 7:
326
- end = startx + 4
327
- start = startx - 4
328
- width = int((end-start)/2+0.5)
329
-
330
- energy = dataset.get_spectral_dims(return_axis=True)[0].values
331
-
332
- if dataset.ndim == 1: # single spectrum
333
- _, shifts = get_channel_zero(np.array(dataset), energy, width)
334
- shifts = np.array([shifts])
335
- elif dataset.ndim == 2: # line scan
336
- shifts = np.zeros(dataset.shape[:1])
337
- for x in range(dataset.shape[0]):
338
- _, shifts[x] = get_channel_zero(dataset[x, :], energy, width)
339
- elif dataset.ndim == 3: # spectral image
340
- shifts = np.zeros(dataset.shape[:2])
341
- for x in range(dataset.shape[0]):
342
- for y in range(dataset.shape[1]):
343
- _, shifts[x, y] = get_channel_zero(dataset[x, y, :], energy, width)
344
- return shifts
345
-
346
-
347
- def shift_energy(dataset: sidpy.Dataset, shifts: np.ndarray) -> sidpy.Dataset:
348
- """ Align zero-loss peaks of any spectral sidpy dataset """
349
-
350
- new_si = dataset.copy()
351
- new_si *= 0.0
352
-
353
- image_dims = dataset.get_image_dims()
354
- if len(image_dims) == 0:
355
- image_dims =[0]
356
- if len(image_dims) != shifts.ndim:
357
- raise TypeError('array of energy shifts have to have same dimension as dataset')
358
- if not isinstance(dataset, sidpy.Dataset):
359
- raise TypeError('This function needs a sidpy Dataset to shift energy scale')
360
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
361
- if dataset.ndim == 1: # single spectrum
362
- tck = interpolate.splrep(np.array(energy_scale - shifts), np.array(dataset), k=1, s=0)
363
- new_si[:] = interpolate.splev(energy_scale, tck, der=0)
364
- new_si.data_type = 'Spectrum'
365
- elif dataset.ndim == 2: # line scan
366
- for x in range(dataset.shape[0]):
367
- tck = interpolate.splrep(np.array(energy_scale - shifts[x]), np.array(dataset[x, :]), k=1, s=0)
368
- new_si[x, :] = interpolate.splev(energy_scale, tck, der=0)
369
- elif dataset.ndim == 3: # spectral image
370
- for x in range(dataset.shape[0]):
371
- for y in range(dataset.shape[1]):
372
- tck = interpolate.splrep(np.array(energy_scale - shifts[x, y]), np.array(dataset[x, y]), k=1, s=0)
373
- new_si[x, y, :] = interpolate.splev(energy_scale, tck, der=0)
374
-
375
- return new_si
376
-
377
-
378
- def align_zero_loss(dataset: sidpy.Dataset) -> sidpy.Dataset:
379
- """
380
- Shifts the energy axis of the input dataset to be aligned with the zero-loss peak.
381
-
382
- Parameters:
383
- -----------
384
- dataset : sidpy.Dataset
385
- The input dataset containing the energy axis to be aligned.
386
-
387
- Returns:
388
- --------
389
- sidpy.Dataset
390
- The dataset with the energy axis shifted to align the zero-loss peak.
391
-
392
- """
393
- shifts = get_zero_loss_energy(dataset)
394
- # print(shifts, dataset)
395
- new_si = shift_energy(dataset, shifts)
396
- new_si.metadata.update({'zero_loss': {'shifted': shifts}})
397
- return new_si
398
-
399
- from numba import jit
400
-
401
- def get_zero_losses(energy, z_loss_params):
402
- z_loss_dset = np.zeros((z_loss_params.shape[0], z_loss_params.shape[1], energy.shape[0]))
403
- for x in range(z_loss_params.shape[0]):
404
- for y in range(z_loss_params.shape[1]):
405
- z_loss_dset[x, y] += zl_func(energy, *z_loss_params[x, y])
406
- return z_loss_dset
407
-
408
-
409
-
410
-
411
- def get_resolution_functions(dataset: sidpy.Dataset, startFitEnergy: float=-1, endFitEnergy: float=+1,
412
- n_workers: int=1, n_threads: int=8):
413
- """
414
- Analyze and fit low-loss EELS data within a specified energy range to determine zero-loss peaks.
415
-
416
- This function processes a low-loss EELS dataset from transmission electron microscopy (TEM) data,
417
- focusing on a specified energy range for analyzing and fitting the spectrum.
418
- It determines fitting parameters and applies these to extract zero-loss peak information
419
- from the dataset. The function handles both 2D and 3D datasets.
420
-
421
- Parameters:
422
- -----------
423
- dataset (sidpy.Dataset): The dataset containing TEM spectral data.
424
- startFitEnergy (float): The start energy of the fitting window.
425
- endFitEnergy (float): The end energy of the fitting window.
426
- n_workers (int, optional): The number of workers for parallel processing (default is 1).
427
- n_threads (int, optional): The number of threads for parallel processing (default is 8).
428
-
429
- Returns:
430
- --------
431
- tuple: A tuple containing:
432
- - z_loss_dset (sidpy.Dataset): The dataset with added zero-loss peak information.
433
- - z_loss_params (numpy.ndarray): Array of parameters used for the zero-loss peak fitting.
434
-
435
- Raises:
436
- -------
437
- ValueError: If the input dataset does not have the expected dimensions or format.
438
-
439
- Notes:
440
- ------
441
- - The function expects `dset` to have specific dimensionalities and will raise an error if they are not met.
442
- - Parallel processing is employed to enhance performance, particularly for large datasets.
443
- """
444
- energy = dataset.get_spectral_dims(return_axis=True)[0].values
445
- start_fit_pixel = np.searchsorted(energy, startFitEnergy)
446
- end_fit_pixel = np.searchsorted(energy, endFitEnergy)
447
- guess_width = (endFitEnergy - startFitEnergy)/2
448
- if end_fit_pixel - start_fit_pixel < 5:
449
- start_fit_pixel -= 2
450
- end_fit_pixel += 2
451
-
452
- def get_good_guess(zl_func, energy, spectrum):
453
- popt, pcov = curve_fit(zl_func, energy, spectrum,
454
- p0=[0, guess_amplitude, guess_width,
455
- 0, guess_amplitude, guess_width])
456
- return popt
457
-
458
- fit_energy = energy[start_fit_pixel:end_fit_pixel]
459
- # get a good guess for the fit parameters
460
- if len(dataset.shape) == 3:
461
- fit_dset = dataset[:, :, start_fit_pixel:end_fit_pixel]
462
- guess_amplitude = np.sqrt(fit_dset.max())
463
- guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=(0, 1))/fit_dset.shape[0]/fit_dset.shape[1])
464
- elif len(dataset.shape) == 2:
465
- fit_dset = dataset[:, start_fit_pixel:end_fit_pixel]
466
- fit_energy = energy[start_fit_pixel:end_fit_pixel]
467
- guess_amplitude = np.sqrt(fit_dset.max())
468
- guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=0)/fit_dset.shape[0])
469
- elif len(dataset.shape) == 1:
470
- fit_dset = dataset[start_fit_pixel:end_fit_pixel]
471
- fit_energy = energy[start_fit_pixel:end_fit_pixel]
472
- guess_amplitude = np.sqrt(fit_dset.max())
473
- guess_params = get_good_guess(zl_func, fit_energy, fit_dset)
474
- z_loss_dset = dataset.copy()
475
- z_loss_dset *= 0.0
476
- z_loss_dset += zl_func(energy, *guess_params)
477
- if 'zero_loss' not in z_loss_dset.metadata:
478
- z_loss_dset.metadata['zero_loss'] = {}
479
- z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
480
- 'endFitEnergy': endFitEnergy,
481
- 'fit_parameter': guess_params,
482
- 'original_low_loss': dataset.title})
483
- return z_loss_dset
484
- else:
485
- print('Error: need a spectrum or spectral image sidpy dataset')
486
- print('Not dset.shape = ', dataset.shape)
487
- return None
488
-
489
- # define guess function for SidFitter
490
- def guess_function(xvec, yvec):
491
- return guess_params
492
-
493
- # apply to all spectra
494
- zero_loss_fitter = SidFitter(fit_dset, zl_func, num_workers=n_workers, guess_fn=guess_function, threads=n_threads,
495
- return_cov=False, return_fit=False, return_std=False, km_guess=False, num_fit_parms=6)
496
-
497
- [z_loss_params] = zero_loss_fitter.do_fit()
498
- z_loss_dset = dataset.copy()
499
- z_loss_dset *= 0.0
500
-
501
- #energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
502
- # z_loss_dset.shape[1], energy.shape[0]))
503
- #z_loss_peaks = zl_func(energy_grid, *z_loss_params)
504
- z_loss_params = np.array(z_loss_params)
505
- z_loss_dset += get_zero_losses(np.array(energy), np.array(z_loss_params))
506
-
507
- shifts = z_loss_params[:, :, 0] * z_loss_params[:, :, 3]
508
- widths = z_loss_params[:, :, 2] * z_loss_params[:, :, 5]
509
-
510
- z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
511
- 'endFitEnergy': endFitEnergy,
512
- 'fit_parameter': z_loss_params,
513
- 'original_low_loss': dataset.title})
514
-
515
-
516
- return z_loss_dset
517
-
518
-
519
- def drude(energy_scale, peak_position, peak_width, gamma):
520
- """dielectric function according to Drude theory"""
521
-
522
- eps = (1 - (peak_position ** 2 - peak_width * energy_scale * 1j) /
523
- (energy_scale ** 2 + 2 * energy_scale * gamma * 1j)) # Mod drude term
524
- return eps
525
-
526
-
527
- def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
528
- """dielectric function according to Drude-Lorentz theory"""
529
-
530
- eps = eps_inf
531
- for i in range(leng):
532
- eps = eps + amplitude[i] * (1 / (e + ep[i] + gamma[i] * 1j) - 1 / (e - ep[i] + gamma[i] * 1j))
533
- return eps
534
-
535
-
536
- def get_plasmon_losses(energy, params):
537
- dset = np.zeros((params.shape[0], params.shape[1], energy.shape[0]))
538
- for x in range(params.shape[0]):
539
- for y in range(params.shape[1]):
540
- dset[x, y] += energy_loss_function(energy, params[x, y])
541
- return dset
542
-
543
-
544
- def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
545
- """
546
- Fit plasmon peak positions and widths in a TEM dataset using a Drude model.
547
-
548
- This function applies the Drude model to fit plasmon peaks in a dataset obtained
549
- from transmission electron microscopy (TEM). It processes the dataset to determine
550
- peak positions, widths, and amplitudes within a specified energy range. The function
551
- can handle datasets with different dimensions and offers parallel processing capabilities.
552
-
553
- Parameters:
554
- dataset: sidpy.Dataset or numpy.ndarray
555
- The dataset containing TEM spectral data.
556
- startFitEnergy: float
557
- The start energy of the fitting window.
558
- endFitEnergy: float
559
- The end energy of the fitting window.
560
- plot_result: bool, optional
561
- If True, plots the fitting results (default is False).
562
- number_workers: int, optional
563
- The number of workers for parallel processing (default is 4).
564
- number_threads: int, optional
565
- The number of threads for parallel processing (default is 8).
566
-
567
- Returns:
568
- fitted_dataset: sidpy.Dataset or numpy.ndarray
569
- The dataset with fitted plasmon peak parameters. The dimensions and
570
- format depend on the input dataset.
571
-
572
- Raises:
573
- ValueError: If the input dataset does not have the expected dimensions or format.
574
-
575
- Notes:
576
- - The function uses the Drude model to fit plasmon peaks.
577
- - The fitting parameters are peak position (Ep), peak width (Ew), and amplitude (A).
578
- - If `plot_result` is True, the function plots Ep, Ew, and A as separate subplots.
579
- """
580
- # define Drude function for plasmon fitting
581
-
582
- anglog, T, _ = angle_correction(dataset)
583
- def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.ndarray:
584
-
585
- eps = 1 - Ep**2/(E**2+Ew**2) + 1j * Ew * Ep**2/E/(E**2+Ew**2)
586
- elf = (-1/eps).imag
587
- return A*elf
588
-
589
- # define window for fitting
590
- energy = dataset.get_spectral_dims(return_axis=True)[0].values
591
- start_fit_pixel = np.searchsorted(energy, startFitEnergy)
592
- end_fit_pixel = np.searchsorted(energy, endFitEnergy)
593
-
594
- # rechunk dataset
595
- if dataset.ndim == 3:
596
- dataset = dataset.rechunk(chunks=(1, 1, -1))
597
- fit_dset = dataset[:, :, start_fit_pixel:end_fit_pixel]
598
- elif dataset.ndim == 2:
599
- dataset = dataset.rechunk(chunks=(1, -1))
600
- fit_dset = dataset[:, start_fit_pixel:end_fit_pixel]
601
- else:
602
- fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel]/ anglog[start_fit_pixel:end_fit_pixel])
603
- guess_pos = np.argmax(fit_dset)
604
- guess_amplitude = fit_dset[guess_pos]
605
- guess_width =(endFitEnergy-startFitEnergy)/4
606
- guess_pos = energy[start_fit_pixel+guess_pos]
607
-
608
- if guess_width >8:
609
- guess_width=8
610
- try:
611
- popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
612
- p0=[guess_pos, guess_width, guess_amplitude])
613
- except:
614
- end_fit_pixel = np.searchsorted(energy, 30)
615
- fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel]/ anglog[start_fit_pixel:end_fit_pixel])
616
- try:
617
- popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
618
- p0=[guess_pos, guess_width, guess_amplitude])
619
- except:
620
- popt=[0,0,0]
621
-
622
- plasmon = dataset.like_data(energy_loss_function(energy, popt[0], popt[1], popt[2]))
623
- plasmon *= anglog
624
- start_plasmon = np.searchsorted(energy, 0)+1
625
- plasmon[:start_plasmon] = 0.
626
-
627
- epsilon = drude(energy, popt[0], popt[1], 1) * popt[2]
628
- epsilon[:start_plasmon] = 0.
629
-
630
- plasmon.metadata['plasmon'] = {'parameter': popt, 'epsilon':epsilon}
631
- return plasmon
632
-
633
- # if it can be parallelized:
634
- fitter = SidFitter(fit_dset, energy_loss_function, num_workers=number_workers,
635
- threads=number_threads, return_cov=False, return_fit=False, return_std=False,
636
- km_guess=False, num_fit_parms=3)
637
- [fit_parameter] = fitter.do_fit()
638
-
639
- plasmon_dset = dataset * 0.0
640
- fit_parameter = np.array(fit_parameter)
641
- plasmon_dset += get_plasmon_losses(np.array(energy), fit_parameter)
642
- if 'plasmon' not in plasmon_dset.metadata:
643
- plasmon_dset.metadata['plasmon'] = {}
644
- plasmon_dset.metadata['plasmon'].update({'startFitEnergy': startFitEnergy,
645
- 'endFitEnergy': endFitEnergy,
646
- 'fit_parameter': fit_parameter,
647
- 'original_low_loss': dataset.title})
648
-
649
- return plasmon_dset
650
-
651
-
652
- def angle_correction(spectrum):
653
-
654
- acceleration_voltage = spectrum.metadata['experiment']['acceleration_voltage']
655
- energy_scale = spectrum.get_spectral_dims(return_axis=True)[0]
656
- # eff_beta = effective_collection_angle(energy_scale, spectrum.metadata['experiment']['convergence_angle'],
657
- # spectrum.metadata['experiment']['collection_angle'],acceleration_voltage)
658
-
659
-
660
- epc = energy_scale.slope # input('ev per channel : ');
661
-
662
- alpha = spectrum.metadata['experiment']['convergence_angle'] # input('Alpha (mrad) : ');
663
- beta = spectrum.metadata['experiment']['collection_angle']# input('Beta (mrad) : ');
664
- e = energy_scale.values
665
- e0 = acceleration_voltage/1000 # input('E0 (keV) : ');
666
-
667
- T = 1000.0*e0*(1.+e0/1022.12)/(1.0+e0/511.06)**2 # %eV # equ.5.2a or Appendix E p 427
668
-
669
- tgt=e0*(1.+e0/1022.)/(1+e0/511.);
670
- thetae=(e+1e-6)/tgt; # % avoid NaN for e=0
671
- # % A2,B2,T2 ARE SQUARES OF ANGLES IN RADIANS**2
672
- a2=alpha*alpha*1e-6 + 1e-7; # % avoid inf for alpha=0
673
- b2=beta*beta*1e-6;
674
- t2=thetae*thetae*1e-6;
675
- eta1=np.sqrt((a2+b2+t2)**2-4*a2*b2)-a2-b2-t2;
676
- eta2=2.*b2*np.log(0.5/t2*(np.sqrt((a2+t2-b2)**2+4.*b2*t2)+a2+t2-b2));
677
- eta3=2.*a2*np.log(0.5/t2*(np.sqrt((b2+t2-a2)**2+4.*a2*t2)+b2+t2-a2));
678
- eta=(eta1+eta2+eta3)/a2/np.log(4./t2);
679
- f1=(eta1+eta2+eta3)/2./a2/np.log(1.+b2/t2);
680
- f2=f1;
681
- if(alpha/beta>1):
682
- f2=f1*a2/b2;
683
-
684
- bstar=thetae*np.sqrt(np.exp(f2*np.log(1.+b2/t2))-1.);
685
- anglog = f2
686
- """
687
- b = eff_beta/1000.0 # %rad
688
- e0 = acceleration_voltage/1000.0 # %keV
689
- T = 1000.0*e0*(1.+e0/1022.12)/(1.0+e0/511.06)**2 # %eV # equ.5.2a or Appendix E p 427
690
- tgt = 1000*e0*(1022.12 + e0)/(511.06 + e0) # %eV Appendix E p 427
691
-
692
- the = energy_scale/tgt # varies with energy loss! # Appendix E p 427
693
- anglog = np.log(1.0+ b*b/the/the)
694
- # 2 * T = m_0 v**2 !!! a_0 = 0.05292 nm epc is for sum over I0
695
- """
696
- return anglog, (np.pi*0.05292* T / 2.0)/epc, bstar[0],
697
-
698
- def energy_loss_function(energy_scale: np.ndarray, p: np.ndarray, anglog=1) -> np.ndarray:
699
- eps = 1 - p[0]**2/(energy_scale**2+p[1]**2) + 1j * p[1] * p[0]**2/energy_scale/(energy_scale**2+p[1]**2)
700
- elf = (-1/eps).imag
701
- return elf*p[2]*anglog
702
-
703
- def inelatic_mean_free_path(E_p, spectrum):
704
- acceleration_voltage = spectrum.metadata['experiment']['acceleration_voltage']
705
- energy_scale = spectrum.get_spectral_dims(return_axis=True)[0].values
706
-
707
- e0 = acceleration_voltage/1000.0 # %keV
708
-
709
- eff_beta = effective_collection_angle(energy_scale, spectrum.metadata['experiment']['convergence_angle'],
710
- spectrum.metadata['experiment']['collection_angle'],acceleration_voltage)
711
- beta = eff_beta/1000.0 # %rad
712
-
713
- T = 1000.0*e0*(1.+e0/1022.12)/(1.0+e0/511.06)**2 # %eV # equ.5.2a or Appendix E p 427
714
- tgt = 1000*e0*(1022.12 + e0)/(511.06 + e0) # %eV Appendix E p 427
715
- theta_e = E_p/tgt # varies with energy loss! # Appendix E p 427
716
-
717
- # 2 * T = m_0 v**2 !!!
718
- a_0 = 0.05292 # nm
719
- imfp = 4*T*a_0/E_p/np.log(1+beta**2/theta_e**2)
720
-
721
- return imfp, theta_e
722
-
723
-
724
- def multiple_scattering(energy_scale: np.ndarray, p: list, core_loss=False)-> np.ndarray:
725
- p = np.abs(p)
726
- tmfp = p[3]
727
- if core_loss:
728
- dif = 1
729
- else:
730
- dif = 16
731
- LLene = np.linspace(1, 2048-1,2048)/dif
732
-
733
- SSD = energy_loss_function(LLene, p)
734
- ssd = np.fft.fft(SSD)
735
- ssd2 = ssd.copy()
736
-
737
- ### sum contribution from each order of scattering:
738
- PSD = np.zeros(len(LLene))
739
- for order in range(15):
740
- # This order convoluted spectum
741
- # convoluted SSD is SSD2
742
- SSD2 = np.fft.ifft(ssd).real
743
-
744
- # scale right (could be done better? GERD)
745
- # And add this order to final spectrum
746
- PSD += SSD2*abs(sum(SSD)/sum(SSD2)) / scipy.special.factorial(order+1)*np.power(tmfp, (order+1))*np.exp(-tmfp) #using equation 4.1 of egerton ed2
747
-
748
- # next order convolution
749
- ssd = ssd * ssd2
750
-
751
- PSD /=tmfp*np.exp(-tmfp)
752
- BGDcoef = scipy.interpolate.splrep(LLene, PSD, s=0)
753
- msd = scipy.interpolate.splev(energy_scale, BGDcoef)
754
- start_plasmon = np.searchsorted(energy_scale, 0)+1
755
- msd[:start_plasmon] = 0.
756
- return msd
757
-
758
- def fit_multiple_scattering(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float,pin=None, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
759
- """
760
- Fit multiple scattering of plasmon peak in a TEM dataset.
761
-
762
-
763
- Parameters:
764
- dataset: sidpy.Dataset or numpy.ndarray
765
- The dataset containing TEM spectral data.
766
- startFitEnergy: float
767
- The start energy of the fitting window.
768
- endFitEnergy: float
769
- The end energy of the fitting window.
770
- number_workers: int, optional
771
- The number of workers for parallel processing (default is 4).
772
- number_threads: int, optional
773
- The number of threads for parallel processing (default is 8).
774
-
775
- Returns:
776
- fitted_dataset: sidpy.Dataset or numpy.ndarray
777
- The dataset with fitted plasmon peak parameters. The dimensions and
778
- format depend on the input dataset.
779
-
780
- Raises:
781
- ValueError: If the input dataset does not have the expected dimensions or format.
782
-
783
- Notes:
784
- - The function uses the Drude model to fit plasmon peaks.
785
- - The fitting parameters are peak position (Ep), peak width (Ew), and amplitude (A).
786
- - If `plot_result` is True, the function plots Ep, Ew, and A as separate subplots.
787
- """
788
-
789
-
790
- # define window for fitting
791
- energy = dataset.get_spectral_dims(return_axis=True)[0].values
792
- start_fit_pixel = np.searchsorted(energy, startFitEnergy)
793
- end_fit_pixel = np.searchsorted(energy, endFitEnergy)
794
-
795
- def errf_multi(p, y, x):
796
- elf = multiple_scattering(x, p)
797
- err = y - elf
798
- #print (p,sum(np.abs(err)))
799
- return np.abs(err) # /np.sqrt(y)
800
-
801
- if pin is None:
802
- pin = np.array([9,1,.7, 0.3])
803
-
804
-
805
- fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel])
806
- popt, lsq = leastsq(errf_multi, pin, args=(fit_dset, energy[start_fit_pixel:end_fit_pixel]), maxfev=2000)
807
-
808
- multi = dataset.like_data(multiple_scattering(energy, popt))
809
-
810
-
811
- multi.metadata['multiple_scattering'] = {'parameter': popt}
812
- return multi
813
-
814
-
815
-
816
- def drude_simulation(dset, e, ep, ew, tnm, eb):
817
- """probabilities of dielectric function eps relative to zero-loss integral (i0 = 1)
818
-
819
- Gives probabilities of dielectric function eps relative to zero-loss integral (i0 = 1) per eV
820
- Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
821
-
822
- # Given the plasmon energy (ep), plasmon fwhm (ew) and binding energy(eb),
823
- # this program generates:
824
- # EPS1, EPS2 from modified Eq. (3.40), ELF=Im(-1/EPS) from Eq. (3.42),
825
- # single scattering from Eq. (4.26) and SRFINT from Eq. (4.31)
826
- # The output is e, ssd into the file drude.ssd (for use in Flog etc.)
827
- # and e,eps1 ,eps2 into drude.eps (for use in Kroeger etc.)
828
- # Gives probabilities relative to zero-loss integral (i0 = 1) per eV
829
- # Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
830
- # Version 10.11.26
831
-
832
- """
833
- energy_scale = dset.get_spectral_dims(return_axis=True)[0].values
834
-
835
- epc = energy_scale[1] - energy_scale[0] # input('ev per channel : ');
836
-
837
- b = dset.metadata['collection_angle'] / 1000. # rad
838
- epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
839
- e0 = dset.metadata['acceleration_voltage'] / 1000. # input('incident energy e0(kev) : ');
840
-
841
- # effective kinetic energy: T = m_o v^2/2,
842
- t = 1000.0 * e0 * (1. + e0 / 1022.12) / (1.0 + e0 / 511.06) ** 2 # eV # equ.5.2a or Appendix E p 427
843
-
844
- # 2 gamma T
845
- tgt = 1000 * e0 * (1022.12 + e0) / (511.06 + e0) # eV Appendix E p 427
846
-
847
- rk0 = 2590 * (1.0 + e0 / 511.06) * np.sqrt(2.0 * t / 511060)
848
-
849
- os = e[0]
850
- ew_mod = eb
851
- tags = dset.metadata
852
-
853
- eps = 1 - (ep ** 2 - ew_mod * e * 1j) / (e ** 2 + 2 * e * ew * 1j) # Mod drude term
854
-
855
- eps[np.nonzero(eps == 0.0)] = 1e-19
856
- elf = np.imag(-1 / eps)
857
-
858
- the = e / tgt # varies with energy loss! # Appendix E p 427
859
- # srfelf = 4..*eps2./((1+eps1).^2+eps2.^2) - elf; %equivalent
860
- srfelf = np.imag(-4. / (1.0 + eps)) - elf # for 2 surfaces
861
- angdep = np.arctan(b / the) / the - b / (b * b + the * the)
862
- srfint = angdep * srfelf / (3.1416 * 0.05292 * rk0 * t) # probability per eV
863
- anglog = np.log(1.0 + b * b / the / the)
864
- i0 = dset.sum() # *tags['counts2e']
865
-
866
- # 2 * t = m_0 v**2 !!! a_0 = 0.05292 nm
867
- volint = abs(tnm / (np.pi * 0.05292 * t * 2.0) * elf * anglog) # S equ 4.26% probability per eV
868
- volint = volint * i0 / epc # S probability per channel
869
- ssd = volint # + srfint;
870
-
871
- if e[0] < -1.0:
872
- xs = int(abs(-e[0] / epc))
873
-
874
- ssd[0:xs] = 0.0
875
- volint[0:xs] = 0.0
876
- srfint[0:xs] = 0.0
877
-
878
- # if os <0:
879
- p_s = np.trapz(e, srfint) # 2 surfaces but includes negative Begrenzung contribution.
880
- p_v = abs(np.trapz(e, abs(volint / tags['spec'].sum()))) # integrated volume probability
881
- p_v = (volint / i0).sum() # our data have he same epc and the trapez formula does not include
882
- lam = tnm / p_v # does NOT depend on free-electron approximation (no damping).
883
- lamfe = 4.0 * 0.05292 * t / ep / np.log(1 + (b * tgt / ep) ** 2) # Eq.(3.44) approximation
884
-
885
- tags['eps'] = eps
886
- tags['lam'] = lam
887
- tags['lamfe'] = lamfe
888
- tags['p_v'] = p_v
889
-
890
- return ssd # /np.pi
891
-
892
-
893
- def kroeger_core(e_data, a_data, eps_data, acceleration_voltage_kev, thickness, relativistic=True):
894
- """This function calculates the differential scattering probability
895
-
896
- .. math::
897
- \\frac{d^2P}{d \\Omega d_e}
898
- of the low-loss region for total loss and volume plasmon loss
899
-
900
- Args:
901
- e_data (array): energy scale [eV]
902
- a_data (array): angle or momentum range [rad]
903
- eps_data (array) dielectric function
904
- acceleration_voltage_kev (float): acceleration voltage [keV]
905
- thickness (float): thickness in nm
906
- relativistic (boolean): relativistic correction
907
-
908
- Returns:
909
- P (numpy array 2d): total loss probability
910
- p_vol (numpy array 2d): volume loss probability
911
-
912
- return P, P*scale*1e2,p_vol*1e2, p_simple*1e2
913
- """
914
-
915
- # $d^2P/(dEd\Omega) = \frac{1}{\pi^2 a_0 m_0 v^2} \Im \left[ \frac{t\mu^2}{\varepsilon \phi^2 } \right]
916
- """
917
- # Internally everything is calculated in si units
918
- # acceleration_voltage_kev = 200 #keV
919
- # thick = 32.0*10-9 # m
920
-
921
- """
922
- a_data = np.array(a_data)
923
- e_data = np.array(e_data)
924
- # adjust input to si units
925
- wavelength = get_wave_length(acceleration_voltage_kev * 1e3) # in m
926
- thickness = thickness * 1e-9 # input thickness now in m
927
-
928
- # Define constants
929
- # ec = 14.4;
930
- m_0 = constants.value(u'electron mass') # REST electron mass in kg
931
- # h = constants.Planck # Planck's constant
932
- hbar = constants.hbar
933
-
934
- c = constants.speed_of_light # speed of light m/s
935
- bohr = constants.value(u'Bohr radius') # Bohr radius in meters
936
- e = constants.value(u'elementary charge') # electron charge in Coulomb
937
- # print('hbar =', hbar ,' [Js] =', hbar/e ,'[ eV s]')
938
-
939
- # Calculate fixed terms of equation
940
- va = 1 - (511. / (511. + acceleration_voltage_kev)) ** 2 # acceleration_voltage_kev is incident energy in keV
941
- v = c * np.sqrt(va)
942
-
943
- if relativistic:
944
- beta = v / c # non-relativistic for =1
945
- gamma = 1. / np.sqrt(1 - beta ** 2)
946
- else:
947
- beta = 1
948
- gamma = 1 # set = 1 to correspond to E+B & Siegle
949
-
950
- momentum = m_0 * v * gamma # used for xya, E&B have no gamma
951
-
952
- # ##### Define mapped variables
953
-
954
- # Define independent variables E, theta
955
- [energy, theta] = np.meshgrid(e_data + 1e-12, a_data)
956
- # Define CONJUGATE dielectric function variable eps
957
- [eps, _] = np.meshgrid(np.conj(eps_data), a_data)
958
-
959
- # ##### Calculate lambda in equation EB 2.3
960
- theta2 = theta ** 2 + 1e-15
961
-
962
- theta_e = energy * e / momentum / v # critical angle
963
-
964
- lambda2 = theta2 - eps * theta_e ** 2 * beta ** 2 # Eq 2.3
965
-
966
- lambd = np.sqrt(lambda2)
967
- if (np.real(lambd) < 0).any():
968
- print(' error negative lambda')
969
-
970
- # ##### Calculate lambda0 in equation EB 2.4
971
- # According to Kröger real(lambda0) is defined as positive!
972
-
973
- phi2 = lambda2 + theta_e ** 2 # Eq. 2.2
974
- lambda02 = theta2 - theta_e ** 2 * beta ** 2 # eta=1 Eq 2.4
975
- lambda02[lambda02 < 0] = 0
976
- lambda0 = np.sqrt(lambda02)
977
- if not (np.real(lambda0) >= 0).any():
978
- print(' error negative lambda0')
979
-
980
- de = thickness * energy * e / (2.0 * hbar * v) # Eq 2.5
981
- xya = lambd * de / theta_e # used in Eqs 2.6, 2.7, 4.4
982
-
983
- lplus = lambda0 * eps + lambd * np.tanh(xya) # eta=1 %Eq 2.6
984
- lminus = lambda0 * eps + lambd / np.tanh(xya) # eta=1 %Eq 2.7
985
-
986
- mue2 = 1 - (eps * beta ** 2) # Eq. 4.5
987
- phi20 = lambda02 + theta_e ** 2 # Eq 4.6
988
- phi201 = theta2 + theta_e ** 2 * (1 - (eps + 1) * beta ** 2) # eta=1, eps-1 in E+b Eq.(4.7)
989
-
990
- # Eq 4.2
991
- a1 = phi201 ** 2 / eps
992
- a2 = np.sin(de) ** 2 / lplus + np.cos(de) ** 2 / lminus
993
- a = a1 * a2
994
-
995
- # Eq 4.3
996
- b1 = beta ** 2 * lambda0 * theta_e * phi201
997
- b2 = (1. / lplus - 1. / lminus) * np.sin(2. * de)
998
- b = b1 * b2
999
-
1000
- # Eq 4.4
1001
- c1 = -beta ** 4 * lambda0 * lambd * theta_e ** 2
1002
- c2 = np.cos(de) ** 2 * np.tanh(xya) / lplus
1003
- c3 = np.sin(de) ** 2 / np.tanh(xya) / lminus
1004
- c = c1 * (c2 + c3)
1005
-
1006
- # Put all the pieces together...
1007
- p_coef = e / (bohr * np.pi ** 2 * m_0 * v ** 2)
1008
-
1009
- p_v = thickness * mue2 / eps / phi2
1010
-
1011
- p_s1 = 2. * theta2 * (eps - 1) ** 2 / phi20 ** 2 / phi2 ** 2 # ASSUMES eta=1
1012
- p_s2 = hbar / momentum
1013
- p_s3 = a + b + c
1014
-
1015
- p_s = p_s1 * p_s2 * p_s3
1016
-
1017
- # print(p_v.min(),p_v.max(),p_s.min(),p_s.max())
1018
- # Calculate P and p_vol (volume only)
1019
- dtheta = a_data[1] - a_data[0]
1020
- scale = np.sin(np.abs(theta)) * dtheta * 2 * np.pi
1021
-
1022
- p = p_coef * np.imag(p_v - p_s) # Eq 4.1
1023
- p_vol = p_coef * np.imag(p_v) * scale
1024
-
1025
- # lplus_min = e_data[np.argmin(np.real(lplus), axis=1)]
1026
- # lminus_min = e_data[np.argmin(np.imag(lminus), axis=1)]
1027
-
1028
- p_simple = p_coef * np.imag(1 / eps) * thickness / (theta2 + theta_e ** 2) * scale
1029
- # Watch it: eps is conjugated dielectric function
1030
-
1031
- return p, p * scale * 1e2, p_vol * 1e2, p_simple * 1e2 # ,lplus_min,lminus_min
1032
-
1033
-
1034
- #################################################################
1035
- # CORE - LOSS functions
1036
- #################################################################
1037
-
1038
- def get_z(z: Union[int, str]) -> int:
1039
- """Returns the atomic number independent of input as a string or number
1040
-
1041
- Parameter
1042
- ---------
1043
- z: int, str
1044
- atomic number of chemical symbol (0 if not valid)
1045
- Return:
1046
- ------
1047
- z_out: int
1048
- atomic number
1049
- """
1050
- x_sections = get_x_sections()
1051
-
1052
- z_out = 0
1053
- if str(z).isdigit():
1054
- z_out = int(z)
1055
- elif isinstance(z, str):
1056
- for key in x_sections:
1057
- if x_sections[key]['name'].lower() == z.lower(): # Well one really should know how to write elemental
1058
- z_out = int(key)
1059
- else:
1060
- raise TypeError('A string or number is required')
1061
- return z_out
1062
-
1063
-
1064
- def get_x_sections(z: int=0) -> dict:
1065
- """Reads X-ray fluorescent cross-sections from a dictionary.
1066
-
1067
- Parameters
1068
- ----------
1069
- z: int
1070
- atomic number if zero all cross-sections will be returned
1071
-
1072
- Returns
1073
- -------
1074
- dictionary
1075
- cross-section of an element or of all elements if z = 0
1076
-
1077
- """
1078
- if z < 1:
1079
- return x_sections
1080
- else:
1081
- z = str(z)
1082
- if z in x_sections:
1083
- return x_sections[z]
1084
- else:
1085
- return 0
1086
-
1087
-
1088
- def list_all_edges(z: Union[str, int]=0, verbose=False)->list[str, dict]:
1089
- """List all ionization edges of an element with atomic number z
1090
-
1091
- Parameters
1092
- ----------
1093
- z: int
1094
- atomic number
1095
- verbose: bool, optional
1096
- more info if set to True
1097
-
1098
- Returns
1099
- -------
1100
- out_string: str
1101
- string with all major edges in energy range
1102
- """
1103
-
1104
- element = str(get_z(z))
1105
- x_sections = get_x_sections()
1106
- out_string = ''
1107
- if verbose:
1108
- print('Major edges')
1109
- edge_list = {x_sections[element]['name']: {}}
1110
-
1111
- for key in all_edges:
1112
- if key in x_sections[element]:
1113
- if 'onset' in x_sections[element][key]:
1114
- if verbose:
1115
- print(f" {x_sections[element]['name']}-{key}: {x_sections[element][key]['onset']:8.1f} eV ")
1116
- out_string = out_string + f" {x_sections[element]['name']}-{key}: " \
1117
- f"{x_sections[element][key]['onset']:8.1f} eV /n"
1118
- edge_list[x_sections[element]['name']][key] = x_sections[element][key]['onset']
1119
- return out_string, edge_list
1120
-
1121
-
1122
- def find_all_edges(edge_onset: float, maximal_chemical_shift: float=5.0, major_edges_only: bool=False) -> str:
1123
- """Find all (major and minor) edges within an energy range
1124
-
1125
- Parameters
1126
- ----------
1127
- edge_onset: float
1128
- approximate energy of ionization edge
1129
- maximal_chemical_shift: float, default = 5eV
1130
- range of energy window around edge_onset to look for major edges
1131
- major_edges_only: boolean, default = False
1132
- only major edges are considered if True
1133
- Returns
1134
- -------
1135
- text: str
1136
- string with all edges in energy range
1137
-
1138
- """
1139
-
1140
- text = ''
1141
- x_sections = get_x_sections()
1142
- for element in x_sections:
1143
- for key in x_sections[element]:
1144
- if isinstance(x_sections[element][key], dict):
1145
- if 'onset' in x_sections[element][key]:
1146
- if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
1147
- # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
1148
- new_text = f"\n {x_sections[element]['name']:2s}-{key}: " \
1149
- f"{x_sections[element][key]['onset']:8.1f} eV "
1150
- if major_edges_only:
1151
- if key in major_edges:
1152
- text += new_text
1153
- else:
1154
- text += new_text
1155
-
1156
- return text
1157
-
1158
- def find_associated_edges(dataset: sidpy.Dataset) -> None:
1159
- onsets = []
1160
- edges = []
1161
- if 'core_loss' in dataset.metadata:
1162
- if'edges' in dataset.metadata['core_loss']:
1163
- for key, edge in dataset.metadata['core_loss']['edges'].items():
1164
- if key.isdigit():
1165
- """for sym in edge['all_edges']: # TODO: Could be replaced with exclude
1166
- onsets.append(edge['all_edges'][sym]['onset'] + edge['chemical_shift'])
1167
- edges.append([key, f"{edge['element']}-{sym}", onsets[-1]])
1168
- """
1169
- onsets.append(edge['onset'])
1170
- dataset.metadata['core_loss']['edges'][key]['associated_peaks'] = {}
1171
- if 'peak_fit' in dataset.metadata:
1172
- p = dataset.metadata['peak_fit']['peak_out_list']
1173
- for key, peak in enumerate(p):
1174
- distances = (onsets-peak[0])*-1
1175
- distances[distances < -0.3] = 1e6
1176
- if np.min(distances) < 50:
1177
- index = np.argmin(distances)
1178
- dataset.metadata['core_loss']['edges'][str(index)]['associated_peaks'][key] = peak
1179
-
1180
-
1181
- """for key, peak in dataset.metadata['peak_fit']['peaks'].items():
1182
- if key.isdigit():
1183
- distance = dataset.get_spectral_dims(return_axis=True)[0].values[-1]
1184
- index = -1
1185
- for ii, onset in enumerate(onsets):
1186
- if onset < peak['position'] < onset+post_edge:
1187
- if distance > np.abs(peak['position'] - onset):
1188
- distance = np.abs(peak['position'] - onset) # TODO: check whether absolute is good
1189
- distance_onset = peak['position'] - onset
1190
- index = ii
1191
- if index >= 0:
1192
- peak['associated_edge'] = edges[index][1] # check if more info is necessary
1193
- peak['distance_to_onset'] = distance_onset
1194
- """
1195
-
1196
- def find_white_lines(dataset: sidpy.Dataset) -> dict:
1197
- white_lines_out ={'sum': {}, 'ratio': {}}
1198
- white_lines = []
1199
- if 'peak_fit' in dataset.metadata:
1200
- peaks = dataset.metadata['peak_fit']['peaks']
1201
- else:
1202
- return
1203
- for index, edge in dataset.metadata['core_loss']['edges'].items():
1204
- if index.isdigit():
1205
- if 'associated_peaks' in edge:
1206
- peaks = edge['associated_peaks']
1207
-
1208
- if edge['symmetry'][-2:] in ['L3', 'M5']:
1209
- if 'L3' in edge['all_edges']:
1210
- end_range1 = edge['all_edges']['L2']['onset'] + edge['chemical_shift']
1211
- end_range2 = edge['all_edges']['L2']['onset']*2 - edge['all_edges']['L3']['onset'] + edge['chemical_shift']
1212
- white_lines = ['L3', 'L2']
1213
- elif 'M5' in edge['all_edges']:
1214
- end_range1 = edge['all_edges']['M4']['onset'] + edge['chemical_shift']
1215
- end_range2 = edge['all_edges']['M4']['onset']*2 - edge['all_edges']['M5']['onset'] + edge['chemical_shift']
1216
- white_lines = ['M5', 'M4']
1217
- else:
1218
- return
1219
- white_line_areas = [0., 0.]
1220
- for key, peak in peaks.items():
1221
- if str(key).isdigit():
1222
- if peak[0] < end_range1:
1223
- white_line_areas[0] += np.sqrt(2 * np.pi) * peak[1] * np.abs(peak[2]/np.sqrt(2 * np.log(2)))
1224
- elif peak[0] < end_range2:
1225
- white_line_areas[1] += np.sqrt(2 * np.pi) * peak[1] * np.abs(peak[2]/np.sqrt(2 * np.log(2)))
1226
-
1227
- edge['white_lines'] = {white_lines[0]: white_line_areas[0], white_lines[1]: white_line_areas[1]}
1228
-
1229
- reference_counts = edge['areal_density']*dataset.metadata['core_loss']['xsections'][int(index)].sum()
1230
- white_lines_out['sum'][f"{edge['element']}-{white_lines[0]}+{white_lines[1]}"] = (white_line_areas[0] + white_line_areas[1])/reference_counts
1231
- white_lines_out['ratio'][f"{edge['element']}-{white_lines[0]}/{white_lines[1]}"] = white_line_areas[0] / white_line_areas[1]
1232
- return white_lines_out
1233
-
1234
-
1235
- """white_line_ratios = {}
1236
- white_line_sum = {}
1237
- for sym, area in white_lines.items():
1238
- if sym[-2:] in ['L2', 'M4', 'M2']:
1239
- if area > 0 and f"{sym[:-1]}{int(sym[-1]) + 1}" in white_lines:
1240
- if white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"] > 0:
1241
- white_line_ratios[f"{sym}/{sym[-2]}{int(sym[-1]) + 1}"] = area / white_lines[
1242
- f"{sym[:-1]}{int(sym[-1]) + 1}"]
1243
- white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] = (
1244
- area + white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"])
1245
-
1246
- areal_density = 1.
1247
- if 'edges' in dataset.metadata:
1248
- for key, edge in dataset.metadata['edges'].items():
1249
- if key.isdigit():
1250
- if edge['element'] == sym.split('-')[0]:
1251
- areal_density = edge['areal_density']
1252
- break
1253
- white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] /= areal_density
1254
-
1255
- dataset.metadata['peak_fit']['white_lines'] = white_lines
1256
- dataset.metadata['peak_fit']['white_line_ratios'] = white_line_ratios
1257
- dataset.metadata['peak_fit']['white_line_sums'] = white_line_sum
1258
- """
1259
-
1260
- def second_derivative(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
1261
- """Calculates second derivative of a sidpy.dataset"""
1262
-
1263
- dim = dataset.get_spectral_dims()
1264
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1265
- if dataset.data_type.name == 'SPECTRAL_IMAGE':
1266
- spectrum = dataset.view.get_spectrum()
1267
- else:
1268
- spectrum = np.array(dataset)
1269
-
1270
- spec = scipy.ndimage.gaussian_filter(spectrum, 3)
1271
-
1272
- dispersion = energy_scale.slope
1273
- second_dif = np.roll(spec, -3) - 2 * spec + np.roll(spec, +3)
1274
- second_dif[:3] = 0
1275
- second_dif[-3:] = 0
1276
-
1277
- # find if there is a strong edge at high energy_scale
1278
- noise_level = 2. * np.std(second_dif[3:50])
1279
- [indices, _] = scipy.signal.find_peaks(second_dif, noise_level)
1280
- width = 50 / dispersion
1281
- if width < 50:
1282
- width = 50
1283
- start_end_noise = int(len(energy_scale) - width)
1284
- for index in indices[::-1]:
1285
- if index > start_end_noise:
1286
- start_end_noise = index - 70
1287
-
1288
- noise_level_start = sensitivity * np.std(second_dif[3:50])
1289
- noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
1290
- #slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
1291
- #noise_level = noise_level_start #+ np.arange(len(energy_scale)) * slope
1292
- return second_dif , noise_level
1293
-
1294
-
1295
-
1296
- def find_edges(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
1297
- """find edges within a sidpy.Dataset"""
1298
-
1299
- dim = dataset.get_spectral_dims()
1300
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1301
-
1302
- second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
1303
-
1304
- [indices, peaks] = scipy.signal.find_peaks(second_dif, noise_level)
1305
-
1306
- peaks['peak_positions'] = energy_scale[indices]
1307
- peaks['peak_indices'] = indices
1308
- edge_energies = [energy_scale[50]]
1309
- edge_indices = []
1310
-
1311
- [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
1312
- minima = energy_scale[indices]
1313
-
1314
- for peak_number in range(len(peaks['peak_positions'])):
1315
- position = peaks['peak_positions'][peak_number]
1316
- if position - edge_energies[-1] > 20:
1317
- impossible = minima[minima < position]
1318
- impossible = impossible[impossible > position - 5]
1319
- if len(impossible) == 0:
1320
- possible = minima[minima > position]
1321
- possible = possible[possible < position + 5]
1322
- if len(possible) > 0:
1323
- edge_energies.append((position + possible[0])/2)
1324
- edge_indices.append(np.searchsorted(energy_scale, (position + possible[0])/2))
1325
-
1326
- selected_edges = []
1327
- for peak in edge_indices:
1328
- if 525 < energy_scale[peak] < 533:
1329
- selected_edges.append('O-K1')
1330
- else:
1331
- selected_edge = ''
1332
- edges = find_all_edges(energy_scale[peak], 20, major_edges_only=True)
1333
- edges = edges.split('\n')
1334
- minimum_dist = 100.
1335
- for edge in edges[1:]:
1336
- edge = edge[:-3].split(':')
1337
- name = edge[0].strip()
1338
- energy = float(edge[1].strip())
1339
- if np.abs(energy - energy_scale[peak]) < minimum_dist:
1340
- minimum_dist = np.abs(energy - energy_scale[peak])
1341
- selected_edge = name
1342
-
1343
- if selected_edge != '':
1344
- selected_edges.append(selected_edge)
1345
-
1346
- return selected_edges
1347
-
1348
-
1349
- def assign_likely_edges(edge_channels: Union[list, np.ndarray], energy_scale: np.ndarray):
1350
- edges_in_list = []
1351
- result = {}
1352
- for channel in edge_channels:
1353
- if channel not in edge_channels[edges_in_list]:
1354
- shift = 5
1355
- element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1356
- while len(element_list) < 1:
1357
- shift += 1
1358
- element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1359
-
1360
- if len(element_list) > 1:
1361
- while len(element_list) > 0:
1362
- shift-=1
1363
- element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1364
- element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift+1, major_edges_only=True)
1365
- element = (element_list[:4]).strip()
1366
- z = get_z(element)
1367
- result[element] =[]
1368
- _, edge_list = list_all_edges(z)
1369
-
1370
- for peak in edge_list:
1371
- for edge in edge_list[peak]:
1372
- possible_minor_edge = np.argmin(np.abs(energy_scale[edge_channels]-edge_list[peak][edge]))
1373
- if np.abs(energy_scale[edge_channels[possible_minor_edge]]-edge_list[peak][edge]) < 3:
1374
- #print('nex', next_e)
1375
- edges_in_list.append(possible_minor_edge)
1376
-
1377
- result[element].append(edge)
1378
-
1379
- return result
1380
-
1381
-
1382
- def auto_id_edges(dataset):
1383
- edge_channels = identify_edges(dataset)
1384
- dim = dataset.get_spectral_dims()
1385
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1386
- found_edges = assign_likely_edges(edge_channels, energy_scale)
1387
- return found_edges
1388
-
1389
-
1390
- def identify_edges(dataset: sidpy.Dataset, noise_level: float=2.0):
1391
- """
1392
- Using first derivative to determine edge onsets
1393
- Any peak in first derivative higher than noise_level times standard deviation will be considered
1394
-
1395
- Parameters
1396
- ----------
1397
- dataset: sidpy.Dataset
1398
- the spectrum
1399
- noise_level: float
1400
- ths number times standard deviation in first derivative decides on whether an edge onset is significant
1401
-
1402
- Return
1403
- ------
1404
- edge_channel: numpy.ndarray
1405
-
1406
- """
1407
- dim = dataset.get_spectral_dims()
1408
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1409
- dispersion = energy_scale.slope
1410
-
1411
- spec = scipy.ndimage.gaussian_filter(dataset, 3/dispersion) # smooth with 3eV wideGaussian
1412
-
1413
- first_derivative = spec - np.roll(spec, +2)
1414
- first_derivative[:3] = 0
1415
- first_derivative[-3:] = 0
1416
-
1417
- # find if there is a strong edge at high energy_scale
1418
- noise_level = noise_level*np.std(first_derivative[3:50])
1419
- [edge_channels, _] = scipy.signal.find_peaks(first_derivative, noise_level)
1420
-
1421
- return edge_channels
1422
-
1423
-
1424
- def add_element_to_dataset(dataset: sidpy.Dataset, z: Union[int, str]):
1425
- """
1426
- """
1427
- # We check whether this element is already in the
1428
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1429
-
1430
- zz = get_z(z)
1431
- if 'edges' not in dataset.metadata:
1432
- dataset.metadata['edges'] = {'model': {}, 'use_low_loss': False}
1433
- index = 0
1434
- for key, edge in dataset.metadata['edges'].items():
1435
- if key.isdigit():
1436
- index += 1
1437
- if 'z' in edge:
1438
- if zz == edge['z']:
1439
- index = int(key)
1440
- break
1441
-
1442
- major_edge = ''
1443
- minor_edge = ''
1444
- all_edges = {}
1445
- x_section = get_x_sections(zz)
1446
- edge_start = 10 # int(15./ft.get_slope(self.energy_scale)+0.5)
1447
- for key in x_section:
1448
- if len(key) == 2 and key[0] in ['K', 'L', 'M', 'N', 'O'] and key[1].isdigit():
1449
- if energy_scale[edge_start] < x_section[key]['onset'] < energy_scale[-edge_start]:
1450
- if key in ['K1', 'L3', 'M5', 'M3']:
1451
- major_edge = key
1452
-
1453
- all_edges[key] = {'onset': x_section[key]['onset']}
1454
-
1455
- if major_edge != '':
1456
- key = major_edge
1457
- elif minor_edge != '':
1458
- key = minor_edge
1459
- else:
1460
- print(f'Could not find no edge of {zz} in spectrum')
1461
- return False
1462
-
1463
-
1464
- if str(index) not in dataset.metadata['edges']:
1465
- dataset.metadata['edges'][str(index)] = {}
1466
-
1467
- start_exclude = x_section[key]['onset'] - x_section[key]['excl before']
1468
- end_exclude = x_section[key]['onset'] + x_section[key]['excl after']
1469
-
1470
- dataset.metadata['edges'][str(index)] = {'z': zz, 'symmetry': key, 'element': elements[zz],
1471
- 'onset': x_section[key]['onset'], 'end_exclude': end_exclude,
1472
- 'start_exclude': start_exclude}
1473
- dataset.metadata['edges'][str(index)]['all_edges'] = all_edges
1474
- dataset.metadata['edges'][str(index)]['chemical_shift'] = 0.0
1475
- dataset.metadata['edges'][str(index)]['areal_density'] = 0.0
1476
- dataset.metadata['edges'][str(index)]['original_onset'] = dataset.metadata['edges'][str(index)]['onset']
1477
- return True
1478
-
1479
-
1480
- def make_edges(edges_present: dict, energy_scale: np.ndarray, e_0:float, coll_angle:float, low_loss:np.ndarray=None)->dict:
1481
- """Makes the edges dictionary for quantification
1482
-
1483
- Parameters
1484
- ----------
1485
- edges_present: list
1486
- list of edges
1487
- energy_scale: numpy array
1488
- energy scale on which to make cross-section
1489
- e_0: float
1490
- acceleration voltage (in V)
1491
- coll_angle: float
1492
- collection angle in mrad
1493
- low_loss: numpy array with same length as energy_scale
1494
- low_less spectrum with which to convolve the cross-section (default=None)
1495
-
1496
- Returns
1497
- -------
1498
- edges: dict
1499
- dictionary with all information on cross-section
1500
- """
1501
- x_sections = get_x_sections()
1502
- edges = {}
1503
- for i, edge in enumerate(edges_present):
1504
- element, symmetry = edge.split('-')
1505
- z = 0
1506
- for key in x_sections:
1507
- if element == x_sections[key]['name']:
1508
- z = int(key)
1509
- edges[i] = {}
1510
- edges[i]['z'] = z
1511
- edges[i]['symmetry'] = symmetry
1512
- edges[i]['element'] = element
1513
-
1514
- for key in edges:
1515
- xsec = x_sections[str(edges[key]['z'])]
1516
- if 'chemical_shift' not in edges[key]:
1517
- edges[key]['chemical_shift'] = 0
1518
- if 'symmetry' not in edges[key]:
1519
- edges[key]['symmetry'] = 'K1'
1520
- if 'K' in edges[key]['symmetry']:
1521
- edges[key]['symmetry'] = 'K1'
1522
- elif 'L' in edges[key]['symmetry']:
1523
- edges[key]['symmetry'] = 'L3'
1524
- elif 'M' in edges[key]['symmetry']:
1525
- edges[key]['symmetry'] = 'M5'
1526
- else:
1527
- edges[key]['symmetry'] = edges[key]['symmetry'][0:2]
1528
-
1529
- edges[key]['original_onset'] = xsec[edges[key]['symmetry']]['onset']
1530
- edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
1531
- edges[key]['start_exclude'] = edges[key]['onset'] - xsec[edges[key]['symmetry']]['excl before']
1532
- edges[key]['end_exclude'] = edges[key]['onset'] + xsec[edges[key]['symmetry']]['excl after']
1533
-
1534
- edges = make_cross_sections(edges, energy_scale, e_0, coll_angle, low_loss)
1535
-
1536
- return edges
1537
-
1538
- def fit_dataset(dataset: sidpy.Dataset):
1539
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1540
- if 'fit_area' not in dataset.metadata['edges']:
1541
- dataset.metadata['edges']['fit_area'] = {}
1542
- if 'fit_start' not in dataset.metadata['edges']['fit_area']:
1543
- dataset.metadata['edges']['fit_area']['fit_start'] = energy_scale[50]
1544
- if 'fit_end' not in dataset.metadata['edges']['fit_area']:
1545
- dataset.metadata['edges']['fit_area']['fit_end'] = energy_scale[-2]
1546
- dataset.metadata['edges']['use_low_loss'] = False
1547
-
1548
- if 'experiment' in dataset.metadata:
1549
- exp = dataset.metadata['experiment']
1550
- if 'convergence_angle' not in exp:
1551
- raise ValueError('need a convergence_angle in experiment of metadata dictionary ')
1552
- alpha = exp['convergence_angle']
1553
- beta = exp['collection_angle']
1554
- beam_kv = exp['acceleration_voltage']
1555
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1556
- eff_beta = effective_collection_angle(energy_scale, alpha, beta, beam_kv)
1557
- edges = make_cross_sections(dataset.metadata['edges'], np.array(energy_scale), beam_kv, eff_beta)
1558
- dataset.metadata['edges'] = fit_edges2(dataset, energy_scale, edges)
1559
- areal_density = []
1560
- elements = []
1561
- for key in edges:
1562
- if key.isdigit(): # only edges have numbers in that dictionary
1563
- elements.append(edges[key]['element'])
1564
- areal_density.append(edges[key]['areal_density'])
1565
- areal_density = np.array(areal_density)
1566
- out_string = '\nRelative composition: \n'
1567
- for i, element in enumerate(elements):
1568
- out_string += f'{element}: {areal_density[i] / areal_density.sum() * 100:.1f}% '
1569
-
1570
- print(out_string)
1571
-
1572
-
1573
- def auto_chemical_composition(dataset:sidpy.Dataset)->None:
1574
-
1575
- found_edges = auto_id_edges(dataset)
1576
- for key in found_edges:
1577
- add_element_to_dataset(dataset, key)
1578
- fit_dataset(dataset)
1579
-
1580
-
1581
- def make_cross_sections(edges:dict, energy_scale:np.ndarray, e_0:float, coll_angle:float, low_loss:np.ndarray=None)->dict:
1582
- """Updates the edges dictionary with collection angle-integrated X-ray photo-absorption cross-sections
1583
-
1584
- """
1585
- for key in edges:
1586
- if str(key).isdigit():
1587
- if edges[key]['z'] <1:
1588
- break
1589
- edges[key]['data'] = xsec_xrpa(energy_scale, e_0 / 1000., edges[key]['z'], coll_angle,
1590
- edges[key]['chemical_shift']) / 1e10 # from barnes to 1/nm^2
1591
- if low_loss is not None:
1592
- low_loss = np.roll(np.array(low_loss), 1024 - np.argmax(np.array(low_loss)))
1593
- edges[key]['data'] = scipy.signal.convolve(edges[key]['data'], low_loss/low_loss.sum(), mode='same')
1594
-
1595
- edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
1596
- edges[key]['X_section_type'] = 'XRPA'
1597
- edges[key]['X_section_source'] = 'pyTEMlib'
1598
-
1599
- return edges
1600
-
1601
-
1602
- def power_law(energy: np.ndarray, a:float, r:float)->np.ndarray:
1603
- """power law for power_law_background"""
1604
- return a * np.power(energy, -r)
1605
-
1606
-
1607
- def power_law_background(spectrum:np.ndarray, energy_scale:np.ndarray, fit_area:list, verbose:bool=False):
1608
- """fit of power law to spectrum """
1609
-
1610
- # Determine energy window for background fit in pixels
1611
- startx = np.searchsorted(energy_scale, fit_area[0])
1612
- endx = np.searchsorted(energy_scale, fit_area[1])
1613
-
1614
- x = np.array(energy_scale)[startx:endx]
1615
- y = np.array(spectrum)[startx:endx].flatten()
1616
-
1617
- # Initial values of parameters
1618
- p0 = np.array([1.0E+20, 3])
1619
-
1620
- # background fitting
1621
- def bgdfit(pp, yy, xx):
1622
- err = yy - power_law(xx, pp[0], pp[1])
1623
- return err
1624
-
1625
- [p, _] = leastsq(bgdfit, p0, args=(y, x), maxfev=2000)
1626
-
1627
- background_difference = y - power_law(x, p[0], p[1])
1628
- background_noise_level = std_dev = np.std(background_difference)
1629
- if verbose:
1630
- print(f'Power-law background with amplitude A: {p[0]:.1f} and exponent -r: {p[1]:.2f}')
1631
- print(background_difference.max() / background_noise_level)
1632
-
1633
- print(f'Noise level in spectrum {std_dev:.3f} counts')
1634
-
1635
- # Calculate background over the whole energy scale
1636
- background = power_law(energy_scale, p[0], p[1])
1637
- return background, p
1638
-
1639
-
1640
- def cl_model(xx, pp, number_of_edges, xsec):
1641
- """ core loss model for fitting"""
1642
- yy = pp[0] * xx**pp[1] + pp[2] + pp[3]* xx + pp[4] * xx * xx
1643
- for i in range(number_of_edges):
1644
- pp[i+5] = np.abs(pp[i+5])
1645
- yy = yy + pp[i+5] * xsec[i, :]
1646
- return yy
1647
-
1648
-
1649
- def fit_edges2(spectrum, energy_scale, edges):
1650
- """fit edges for quantification"""
1651
-
1652
- dispersion = energy_scale[1] - energy_scale[0]
1653
- # Determine fitting ranges and masks to exclude ranges
1654
- mask = np.ones(len(spectrum))
1655
-
1656
- background_fit_start = edges['fit_area']['fit_start']
1657
- if edges['fit_area']['fit_end'] > energy_scale[-1]:
1658
- edges['fit_area']['fit_end'] = energy_scale[-1]
1659
- background_fit_end = edges['fit_area']['fit_end']
1660
-
1661
- startx = np.searchsorted(energy_scale, background_fit_start)
1662
- endx = np.searchsorted(energy_scale, background_fit_end)
1663
- mask[0:startx] = 0.0
1664
- mask[endx:-1] = 0.0
1665
- for key in edges:
1666
- if key.isdigit():
1667
- if edges[key]['start_exclude'] > background_fit_start + dispersion:
1668
- if edges[key]['start_exclude'] < background_fit_end - dispersion * 2:
1669
- if edges[key]['end_exclude'] > background_fit_end - dispersion:
1670
- # we need at least one channel to fit.
1671
- edges[key]['end_exclude'] = background_fit_end - dispersion
1672
- startx = np.searchsorted(energy_scale, edges[key]['start_exclude'])
1673
- if startx < 2:
1674
- startx = 1
1675
- endx = np.searchsorted(energy_scale, edges[key]['end_exclude'])
1676
- mask[startx: endx] = 0.0
1677
-
1678
- ########################
1679
- # Background Fit
1680
- ########################
1681
- bgd_fit_area = [background_fit_start, background_fit_end]
1682
- background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
1683
-
1684
- #######################
1685
- # Edge Fit
1686
- #######################
1687
- x = energy_scale
1688
- blurred = gaussian_filter(spectrum, sigma=5)
1689
-
1690
- y = blurred # now in probability
1691
- y[np.where(y < 1e-8)] = 1e-8
1692
-
1693
- xsec = []
1694
- number_of_edges = 0
1695
- for key in edges:
1696
- if key.isdigit():
1697
- xsec.append(edges[key]['data'])
1698
- number_of_edges += 1
1699
- xsec = np.array(xsec)
1700
-
1701
-
1702
- def model(xx, pp):
1703
- yy = pp[0] * xx**pp[1] + pp[2] + pp[3]* xx + pp[4] * xx * xx
1704
- for i in range(number_of_edges):
1705
- pp[i+5] = np.abs(pp[i+5])
1706
- yy = yy + pp[i+5] * xsec[i, :]
1707
- return yy
1708
-
1709
- def residuals(pp, xx, yy):
1710
- err = np.abs((yy - model(xx, pp)) * mask) / np.sqrt(np.abs(y))
1711
- return err
1712
-
1713
- scale = y[100]
1714
- pin = np.array([A,-r, 10., 1., 0.00] + [scale/5] * number_of_edges)
1715
- [p, _] = leastsq(residuals, pin, args=(x, y))
1716
-
1717
- for key in edges:
1718
- if key.isdigit():
1719
- edges[key]['areal_density'] = p[int(key)+5]
1720
- # print(p)
1721
- edges['model'] = {}
1722
- edges['model']['background'] = ( p[0] * np.power(x, -p[1])+ p[2]+ x**p[3] + p[4] * x * x)
1723
- edges['model']['background-poly_0'] = p[2]
1724
- edges['model']['background-poly_1'] = p[3]
1725
- edges['model']['background-poly_2'] = p[4]
1726
- edges['model']['background-A'] = p[0]
1727
- edges['model']['background-r'] = p[1]
1728
- edges['model']['spectrum'] = model(x, p)
1729
- edges['model']['blurred'] = blurred
1730
- edges['model']['mask'] = mask
1731
- edges['model']['fit_parameter'] = p
1732
- edges['model']['fit_area_start'] = edges['fit_area']['fit_start']
1733
- edges['model']['fit_area_end'] = edges['fit_area']['fit_end']
1734
- edges['model']['xsec'] = xsec
1735
- return edges
1736
-
1737
-
1738
- def core_loss_model(energy_scale, pp, number_of_edges, xsec):
1739
- """ core loss model for fitting"""
1740
- xx = np.array(energy_scale)
1741
- yy = pp[0] * xx**pp[1] + pp[2] + pp[3]* xx + pp[4] * xx * xx
1742
- for i in range(number_of_edges):
1743
- pp[i+5] = np.abs(pp[i+5])
1744
- yy = yy + pp[i+5] * xsec[i, :]
1745
- return yy
1746
-
1747
-
1748
-
1749
- def fit_edges(spectrum, energy_scale, region_tags, edges):
1750
- """fit edges for quantification"""
1751
-
1752
- # Determine fitting ranges and masks to exclude ranges
1753
- mask = np.ones(len(spectrum))
1754
-
1755
- background_fit_end = energy_scale[-1]
1756
- for key in region_tags:
1757
- end = region_tags[key]['start_x'] + region_tags[key]['width_x']
1758
-
1759
- startx = np.searchsorted(energy_scale, region_tags[key]['start_x'])
1760
- endx = np.searchsorted(energy_scale, end)
1761
-
1762
- if key == 'fit_area':
1763
- mask[0:startx] = 0.0
1764
- mask[endx:-1] = 0.0
1765
- else:
1766
- mask[startx:endx] = 0.0
1767
- if region_tags[key]['start_x'] < background_fit_end: # Which is the onset of the first edge?
1768
- background_fit_end = region_tags[key]['start_x']
1769
-
1770
- ########################
1771
- # Background Fit
1772
- ########################
1773
- bgd_fit_area = [region_tags['fit_area']['start_x'], background_fit_end]
1774
- background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
1775
-
1776
- #######################
1777
- # Edge Fit
1778
- #######################
1779
- x = energy_scale
1780
- blurred = gaussian_filter(spectrum, sigma=5)
1781
-
1782
- y = blurred # now in probability
1783
- y[np.where(y < 1e-8)] = 1e-8
1784
-
1785
- xsec = []
1786
- number_of_edges = 0
1787
- for key in edges:
1788
- if key.isdigit():
1789
- xsec.append(edges[key]['data'])
1790
- number_of_edges += 1
1791
- xsec = np.array(xsec)
1792
-
1793
- def model(xx, pp):
1794
- yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
1795
- for i in range(number_of_edges):
1796
- pp[i] = np.abs(pp[i])
1797
- yy = yy + pp[i] * xsec[i, :]
1798
- return yy
1799
-
1800
- def residuals(pp, xx, yy):
1801
- err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
1802
- return err
1803
-
1804
- scale = y[100]
1805
- pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
1806
- [p, _] = leastsq(residuals, pin, args=(x, y))
1807
-
1808
- for key in edges:
1809
- if key.isdigit():
1810
- edges[key]['areal_density'] = p[int(key) - 1]
1811
-
1812
- edges['model'] = {}
1813
- edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
1814
- edges['model']['background-poly_0'] = p[6]
1815
- edges['model']['background-poly_1'] = p[7]
1816
- edges['model']['background-poly_2'] = p[8]
1817
- edges['model']['background-A'] = A
1818
- edges['model']['background-r'] = r
1819
- edges['model']['spectrum'] = model(x, p)
1820
- edges['model']['blurred'] = blurred
1821
- edges['model']['mask'] = mask
1822
- edges['model']['fit_parameter'] = p
1823
- edges['model']['fit_area_start'] = region_tags['fit_area']['start_x']
1824
- edges['model']['fit_area_end'] = region_tags['fit_area']['start_x'] + region_tags['fit_area']['width_x']
1825
-
1826
- return edges
1827
-
1828
-
1829
-
1830
- def get_spectrum(dataset, x=0, y=0, bin_x=1, bin_y=1):
1831
- """
1832
- Parameter
1833
- ---------
1834
- dataset: sidpy.Dataset object
1835
- contains spectrum or spectrum image
1836
- x: int default = 0
1837
- x position of spectrum image
1838
- y: int default = 0
1839
- y position of spectrum
1840
- bin_x: int default = 1
1841
- binning of spectrum image in x-direction
1842
- bin_y: int default = 1
1843
- binning of spectrum image in y-direction
1844
-
1845
- Returns:
1846
- --------
1847
- spectrum: sidpy.Dataset object
1848
-
1849
- """
1850
- if dataset.data_type.name == 'SPECTRUM':
1851
- spectrum = dataset.copy()
1852
- else:
1853
- image_dims = dataset.get_image_dims()
1854
- if x > dataset.shape[image_dims[0]] - bin_x:
1855
- x = dataset.shape[image_dims[0]] - bin_x
1856
- if y > dataset.shape[image_dims[1]] - bin_y:
1857
- y = dataset.shape[image_dims[1]] - bin_y
1858
- selection = []
1859
- dimensions = dataset.get_dimension_types()
1860
- for dim, dimension_type in enumerate(dimensions):
1861
- # print(dim, axis.dimension_type)
1862
- if dimension_type == 'SPATIAL':
1863
- if dim == image_dims[0]:
1864
- selection.append(slice(x, x + bin_x))
1865
- else:
1866
- selection.append(slice(y, y + bin_y))
1867
- elif dimension_type == 'SPECTRAL':
1868
- selection.append(slice(None))
1869
- elif dimension_type == 'CHANNEL':
1870
- selection.append(slice(None))
1871
- else:
1872
- selection.append(slice(0, 1))
1873
-
1874
- spectrum = dataset[tuple(selection)].mean(axis=tuple(image_dims))
1875
- spectrum.squeeze().compute()
1876
- spectrum.data_type = 'Spectrum'
1877
- return spectrum
1878
-
1879
- def find_peaks(dataset, energy_scale): #, fit_start, fit_end, sensitivity=2):
1880
- """find peaks in spectrum"""
1881
-
1882
- peaks, prop = scipy.signal.find_peaks(np.abs(dataset)+1, width=5)
1883
- results_half = scipy.signal.peak_widths(np.abs(dataset)+1, peaks, rel_height=0.5)[0]
1884
-
1885
- disp = energy_scale[1] - energy_scale[0]
1886
- if len(peaks) > 0:
1887
- p_in = np.ravel([[energy_scale[peaks[i]], dataset[peaks[i]], results_half[i]*disp] for i in range(len(peaks))])
1888
- return p_in # model, p_in
1889
-
1890
- def nothing():
1891
- pass
1892
- """
1893
- if dataset.data_type.name == 'SPECTRAL_IMAGE':
1894
- if hasattr(dataset.view, 'get_spectrum'):
1895
- spectrum = dataset.view.get_spectrum()
1896
- else:
1897
- spectrum = np.array(dataset[0,0])
1898
-
1899
- else:
1900
- spectrum = np.array(dataset)
1901
-
1902
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1903
-
1904
- """
1905
-
1906
-
1907
- """
1908
- start_channel = np.searchsorted(energy_scale, fit_start)
1909
- end_channel = np.searchsorted(energy_scale, fit_end)
1910
- peaks = []
1911
- for index in indices:
1912
- if start_channel < index < end_channel:
1913
- peaks.append(index - start_channel)
1914
-
1915
- if 'model' in dataset.metadata:
1916
- model = dataset.metadata['model']
1917
-
1918
- elif energy_scale[0] > 0:
1919
- if 'edges' not in dataset.metadata:
1920
- return
1921
- if 'model' not in dataset.metadata['edges']:
1922
- return
1923
- model = dataset.metadata['edges']['model']['spectrum']
1924
-
1925
- else:
1926
- model = np.zeros(len(energy_scale))
1927
-
1928
- energy_scale = energy_scale[start_channel:end_channel]
1929
-
1930
- difference = np.array(spectrum - model)[start_channel:end_channel]
1931
- fit = np.zeros(len(energy_scale))
1932
- if len(peaks) > 0:
1933
- p_in = np.ravel([[energy_scale[i], difference[i], .7] for i in peaks])
1934
- """
1935
-
1936
-
1937
-
1938
- def find_maxima(y, number_of_peaks):
1939
- """ find the first most prominent peaks
1940
-
1941
- peaks are then sorted by energy
1942
-
1943
- Parameters
1944
- ----------
1945
- y: numpy array
1946
- (part) of spectrum
1947
- number_of_peaks: int
1948
-
1949
- Returns
1950
- -------
1951
- numpy array
1952
- indices of peaks
1953
- """
1954
- blurred2 = gaussian_filter(y, sigma=2)
1955
- peaks, _ = scipy.signal.find_peaks(blurred2)
1956
- prominences = peak_prominences(blurred2, peaks)[0]
1957
- prominences_sorted = np.argsort(prominences)
1958
- peaks = peaks[prominences_sorted[-number_of_peaks:]]
1959
-
1960
- peak_indices = np.argsort(peaks)
1961
- return peaks[peak_indices]
1962
-
1963
- #
1964
- def model3(x, p, number_of_peaks, peak_shape, p_zl, pin=None, restrict_pos=0, restrict_width=0):
1965
- """ model for fitting low-loss spectrum"""
1966
- if pin is None:
1967
- pin = p
1968
-
1969
- # if len([restrict_pos]) == 1:
1970
- # restrict_pos = [restrict_pos]*number_of_peaks
1971
- # if len([restrict_width]) == 1:
1972
- # restrict_width = [restrict_width]*number_of_peaks
1973
- y = np.zeros(len(x))
1974
-
1975
- for i in range(number_of_peaks):
1976
- index = int(i * 3)
1977
- if restrict_pos > 0:
1978
- if p[index] > pin[index] * (1.0 + restrict_pos):
1979
- p[index] = pin[index] * (1.0 + restrict_pos)
1980
- if p[index] < pin[index] * (1.0 - restrict_pos):
1981
- p[index] = pin[index] * (1.0 - restrict_pos)
1982
-
1983
- p[index + 1] = abs(p[index + 1])
1984
- # print(p[index + 1])
1985
- p[index + 2] = abs(p[index + 2])
1986
- if restrict_width > 0:
1987
- if p[index + 2] > pin[index + 2] * (1.0 + restrict_width):
1988
- p[index + 2] = pin[index + 2] * (1.0 + restrict_width)
1989
-
1990
- if peak_shape[i] == 'Lorentzian':
1991
- y = y + lorentz(x, p[index:])
1992
- elif peak_shape[i] == 'zl':
1993
-
1994
- y = y + zl(x, p[index:], p_zl)
1995
- else:
1996
- y = y + gauss(x, p[index:])
1997
- return y
1998
-
1999
-
2000
- def sort_peaks(p, peak_shape):
2001
- """sort fitting parameters by peak position"""
2002
- number_of_peaks = int(len(p) / 3)
2003
- p3 = np.reshape(p, (number_of_peaks, 3))
2004
- sort_pin = np.argsort(p3[:, 0])
2005
-
2006
- p = p3[sort_pin].flatten()
2007
- peak_shape = np.array(peak_shape)[sort_pin].tolist()
2008
-
2009
- return p, peak_shape
2010
-
2011
-
2012
- def add_peaks(x, y, peaks, pin_in=None, peak_shape_in=None, shape='Gaussian'):
2013
- """ add peaks to fitting parameters"""
2014
- if pin_in is None:
2015
- return
2016
- if peak_shape_in is None:
2017
- return
2018
-
2019
- pin = pin_in.copy()
2020
-
2021
- peak_shape = peak_shape_in.copy()
2022
- if isinstance(shape, str): # if peak_shape is only a string make a list of it.
2023
- shape = [shape]
2024
-
2025
- if len(shape) == 1:
2026
- shape = shape * len(peaks)
2027
- for i, peak in enumerate(peaks):
2028
- pin.append(x[peak])
2029
- pin.append(y[peak])
2030
- pin.append(.3)
2031
- peak_shape.append(shape[i])
2032
-
2033
- return pin, peak_shape
2034
-
2035
- @jit
2036
- def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
2037
- """Gaussian Function
2038
-
2039
- p[0]==mean, p[1]= amplitude p[2]==fwhm
2040
- area = np.sqrt(2* np.pi)* p[1] * np.abs(p[2] / 2.3548)
2041
- FWHM = 2 * np.sqrt(2 np.log(2)) * sigma = 2.3548 * sigma
2042
- sigma = FWHM/3548
2043
- """
2044
- if p[2] == 0:
2045
- return x * 0.
2046
- else:
2047
- return p[1] * np.exp(-(x - p[0]) ** 2 / (2.0 * (p[2] / 2.3548) ** 2))
2048
-
2049
-
2050
- @jit
2051
- def gmm(x, p):
2052
- y = np.zeros(len(x))
2053
- number_of_peaks= int(len(p)/3)
2054
- for i in range(number_of_peaks):
2055
- index = i*3
2056
- p[index + 1] = p[index + 1]
2057
- # print(p[index + 1])
2058
- p[index + 2] = abs(p[index + 2])
2059
- y = y + gauss(x, p[index:index+3])
2060
- return y
2061
-
2062
- @jit
2063
- def residuals3(pp, xx, yy):
2064
- err = (yy - gmm(xx, pp))
2065
- return err
2066
-
2067
- def gaussian_mixture_model(dataset, p_in=None):
2068
- peak_model = None
2069
- if isinstance(dataset, sidpy.Dataset):
2070
- if dataset.data_type.name == 'SPECTRAL_IMAGE':
2071
- if hasattr(dataset.view, 'get_spectrum'):
2072
- spectrum = dataset.view.get_spectrum()
2073
- else:
2074
- spectrum = dataset[0,0]
2075
- spectrum.data_type == 'SPECTRUM'
2076
- else:
2077
- spectrum = dataset
2078
- spectrum.data_type = 'SPECTRUM'
2079
- energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
2080
- else:
2081
- spectrum = np.array(dataset)
2082
- energy_scale = np.arange(len(spectrum))
2083
- spectrum = np.array(spectrum)
2084
- #spectrum -= np.min(spectrum)-1
2085
- if p_in is None:
2086
- p_in = find_peaks(spectrum, energy_scale)
2087
-
2088
- p = fit_gmm(energy_scale, np.array(spectrum), list(p_in))
2089
-
2090
- peak_model = gmm(energy_scale, p)
2091
- return peak_model, p
2092
-
2093
- def fit_gmm(x, y, pin):
2094
- """fit a Gaussian mixture model to a spectrum"""
2095
- [p, _] = leastsq(residuals3, pin, args=(x, y),maxfev = 10000)
2096
- return p
2097
-
2098
-
2099
- def fit_model(x, y, pin, number_of_peaks, peak_shape, p_zl, restrict_pos=0, restrict_width=0):
2100
- """model for fitting low-loss spectrum"""
2101
-
2102
- pin_original = pin.copy()
2103
-
2104
-
2105
-
2106
- [p, _] = scipy.optimize.leastsq(residuals3, pin, args=(x, y),maxfev = 19400)
2107
- # p2 = p.tolist()
2108
- # p3 = np.reshape(p2, (number_of_peaks, 3))
2109
- # sort_pin = np.argsort(p3[:, 0])
2110
-
2111
- # p = p3[sort_pin].flatten()
2112
- # peak_shape = np.array(peak_shape)[sort_pin].tolist()
2113
-
2114
- return p, peak_shape
2115
-
2116
-
2117
-
2118
- def plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef=4., ep=16.8, es=0, ibt=[]):
2119
- """Plot loss function """
2120
-
2121
- [x, y] = np.meshgrid(e_data + 1e-12, a_data[1024:2048] * 1000)
2122
-
2123
- z = plotdata
2124
- lev = np.array([0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 3, 4, 4.9]) * max_p / 5
2125
-
2126
- wavelength = get_wave_length(ee)
2127
- q = a_data[1024:2048] / (wavelength * 1e9) # in [1/nm]
2128
- scale = np.array([0, a_data[-1], e_data[0], e_data[-1]])
2129
- ev2hertz = constants.value('electron volt-hertz relationship')
2130
-
2131
- if units[0] == 'mrad':
2132
- units[0] = 'scattering angle [mrad]'
2133
- scale[1] = scale[1] * 1000.
2134
- light_line = constants.c * a_data # for mrad
2135
- elif units[0] == '1/nm':
2136
- units[0] = 'scattering vector [1/nm]'
2137
- scale[1] = scale[1] / (wavelength * 1e9)
2138
- light_line = 1 / (constants.c / ev2hertz) * 1e-9
2139
-
2140
- if units[1] == 'eV':
2141
- units[1] = 'energy loss [eV]'
2142
-
2143
- if units[2] == 'ppm':
2144
- units[2] = 'probability [ppm]'
2145
- if units[2] == '1/eV':
2146
- units[2] = 'probability [eV$^{-1}$ srad$^{-1}$]'
2147
-
2148
- alpha = 3. / 5. * ef / ep
2149
-
2150
- ax2 = plt.gca()
2151
- fig2 = plt.gcf()
2152
- im = ax2.imshow(z.T, clim=(0, max_p), origin='lower', aspect='auto', extent=scale)
2153
- co = ax2.contour(y, x, z, levels=lev, colors='k', origin='lower')
2154
- # ,extent=(-ang*1000.,ang*1000.,e_data[0],e_data[-1]))#, vmin = p_vol.min(), vmax = 1000)
2155
-
2156
- fig2.colorbar(im, ax=ax2, label=units[2])
2157
-
2158
- ax2.plot(a_data, light_line, c='r', label='light line')
2159
- # ax2.plot(e_data*light_line*np.sqrt(np.real(eps_data)),e_data, color='steelblue',
2160
- # label='$\omega = c q \sqrt{\epsilon_2}$')
2161
-
2162
- # ax2.plot(q, Ep_disp, c='r')
2163
- ax2.plot([11.5 * light_line, 0.12], [11.5, 11.5], c='r')
2164
-
2165
- ax2.text(.05, 11.7, 'surface plasmon', color='r')
2166
- ax2.plot([0.0, 0.12], [16.8, 16.8], c='r')
2167
- ax2.text(.05, 17, 'volume plasmon', color='r')
2168
- ax2.set_xlim(0, scale[1])
2169
- ax2.set_ylim(0, 20)
2170
- # Interband transitions
2171
- ax2.plot([0.0, 0.25], [4.2, 4.2], c='g', label='interband transitions')
2172
- ax2.plot([0.0, 0.25], [5.2, 5.2], c='g')
2173
- ax2.set_ylabel(units[1])
2174
- ax2.set_xlabel(units[0])
2175
- ax2.legend(loc='lower right')
2176
-
2177
-
2178
- def xsec_xrpa(energy_scale, e0, z, beta, shift=0):
2179
- """ Calculate momentum-integrated cross-section for EELS from X-ray photo-absorption cross-sections.
2180
-
2181
- X-ray photo-absorption cross-sections from NIST.
2182
- Momentum-integrated cross-section for EELS according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
2183
-
2184
- Parameters
2185
- ----------
2186
- energy_scale: numpy array
2187
- energy scale of spectrum to be analyzed
2188
- e0: float
2189
- acceleration voltage in keV
2190
- z: int
2191
- atomic number of element
2192
- beta: float
2193
- effective collection angle in mrad
2194
- shift: float
2195
- chemical shift of edge in eV
2196
- """
2197
- beta = beta * 0.001 # collection half angle theta [rad]
2198
- # theta_max = self.parent.spec[0].convAngle * 0.001 # collection half angle theta [rad]
2199
- dispersion = energy_scale[1] - energy_scale[0]
2200
-
2201
- x_sections = get_x_sections(z)
2202
- enexs = x_sections['ene']
2203
- datxs = x_sections['dat']
2204
-
2205
- # enexs = enexs[:len(datxs)]
2206
-
2207
- #####
2208
- # Cross Section according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
2209
- #####
2210
-
2211
- # Relativistic correction factors
2212
- t = 511060.0 * (1.0 - 1.0 / (1.0 + e0 / 511.06) ** 2) / 2.0
2213
- gamma = 1 + e0 / 511.06
2214
- a = 6.5 # e-14 *10**14
2215
- b = beta
2216
-
2217
- theta_e = enexs / (2 * gamma * t)
2218
-
2219
- g = 2 * np.log(gamma) - np.log((b ** 2 + theta_e ** 2) / (b ** 2 + theta_e ** 2 / gamma ** 2)) - (
2220
- gamma - 1) * b ** 2 / (b ** 2 + theta_e ** 2 / gamma ** 2)
2221
- datxs = datxs * (a / enexs / t) * (np.log(1 + b ** 2 / theta_e ** 2) + g) / 1e8
2222
-
2223
- datxs = datxs * dispersion # from per eV to per dispersion
2224
- coeff = splrep(enexs, datxs, s=0) # now in areal density atoms / m^2
2225
- xsec = np.zeros(len(energy_scale))
2226
- # shift = 0# int(ek -onsetXRPS)#/dispersion
2227
- lin = interp1d(enexs, datxs, kind='linear') # Linear instead of spline interpolation to avoid oscillations.
2228
- if energy_scale[0] < enexs[0]:
2229
- start = np.searchsorted(energy_scale, enexs[0])+1
2230
- else:
2231
- start = 0
2232
- xsec[start:] = lin(energy_scale[start:] - shift)
2233
-
2234
- return xsec
2235
-
2236
-
2237
- ##########################
2238
- # EELS Database
2239
- ##########################
2240
-
2241
-
2242
- def read_msa(msa_string):
2243
- """read msa formated file"""
2244
- parameters = {}
2245
- y = []
2246
- x = []
2247
- # Read the keywords
2248
- data_section = False
2249
- msa_lines = msa_string.split('\n')
2250
-
2251
- for line in msa_lines:
2252
- if data_section is False:
2253
- if len(line) > 0:
2254
- if line[0] == "#":
2255
- try:
2256
- key, value = line.split(': ')
2257
- value = value.strip()
2258
- except ValueError:
2259
- key = line
2260
- value = None
2261
- key = key.strip('#').strip()
2262
-
2263
- if key != 'SPECTRUM':
2264
- parameters[key] = value
2265
- else:
2266
- data_section = True
2267
- else:
2268
- # Read the data
2269
-
2270
- if len(line) > 0 and line[0] != "#" and line.strip():
2271
- if parameters['DATATYPE'] == 'XY':
2272
- xy = line.replace(',', ' ').strip().split()
2273
- y.append(float(xy[1]))
2274
- x.append(float(xy[0]))
2275
- elif parameters['DATATYPE'] == 'Y':
2276
- print('y')
2277
- data = [
2278
- float(i) for i in line.replace(',', ' ').strip().split()]
2279
- y.extend(data)
2280
- parameters['data'] = np.array(y)
2281
- if 'XPERCHAN' in parameters:
2282
- parameters['XPERCHAN'] = str(parameters['XPERCHAN']).split(' ')[0]
2283
- parameters['OFFSET'] = str(parameters['OFFSET']).split(' ')[0]
2284
- parameters['energy_scale'] = np.arange(len(y)) * float(parameters['XPERCHAN']) + float(parameters['OFFSET'])
2285
- return parameters
2286
-
2287
-
2288
- def get_spectrum_eels_db(formula=None, edge=None, title=None, element=None):
2289
- """
2290
- get spectra from EELS database
2291
- chemical formula and edge is accepted.
2292
- Could expose more of the search parameters
2293
- """
2294
- valid_edges = ['K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5']
2295
- if edge is not None and edge not in valid_edges:
2296
- print('edge should be a in ', valid_edges)
2297
-
2298
- spectrum_type = None
2299
- title = title
2300
- author = None
2301
- element = element
2302
- min_energy = None
2303
- max_energy = None
2304
- resolution = None
2305
- min_energy_compare = "gt"
2306
- max_energy_compare = "lt",
2307
- resolution_compare = "lt"
2308
- max_n = -1
2309
- monochromated = None
2310
- order = None
2311
- order_direction = "ASC"
2312
- verify_certificate = True
2313
- # Verify arguments
2314
-
2315
- if spectrum_type is not None and spectrum_type not in {'coreloss', 'lowloss', 'zeroloss', 'xrayabs'}:
2316
- raise ValueError("spectrum_type must be one of \'coreloss\', \'lowloss\', "
2317
- "\'zeroloss\', \'xrayabs\'.")
2318
- # valid_edges = ['K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5']
2319
-
2320
- params = {
2321
- "type": spectrum_type,
2322
- "title": title,
2323
- "author": author,
2324
- "edge": edge,
2325
- "min_energy": min_energy,
2326
- "max_energy": max_energy,
2327
- "resolution": resolution,
2328
- "resolution_compare": resolution_compare,
2329
- "monochromated": monochromated,
2330
- "formula": formula,
2331
- 'element': element,
2332
- "min_energy_compare": min_energy_compare,
2333
- "max_energy_compare": max_energy_compare,
2334
- "per_page": max_n,
2335
- "order": order,
2336
- "order_direction": order_direction,
2337
- }
2338
-
2339
- request = requests.get('http://api.eelsdb.eu/spectra', params=params, verify=True)
2340
- # spectra = []
2341
- jsons = request.json()
2342
- if "message" in jsons:
2343
- # Invalid query, EELSdb raises error.
2344
- raise IOError(
2345
- "Please report the following error to the HyperSpy developers: "
2346
- "%s" % jsons["message"])
2347
- reference_spectra = {}
2348
- for json_spectrum in jsons:
2349
- download_link = json_spectrum['download_link']
2350
- # print(download_link)
2351
- msa_string = requests.get(download_link, verify=verify_certificate).text
2352
- # print(msa_string[:100])
2353
- parameters = read_msa(msa_string)
2354
- if 'XPERCHAN' in parameters:
2355
- reference_spectra[parameters['TITLE']] = parameters
2356
- print(parameters['TITLE'])
2357
- print(f'found {len(reference_spectra.keys())} spectra in EELS database)')
2358
-
2359
- return reference_spectra