pyTEMlib 0.2023.8.0__py2.py3-none-any.whl → 0.2024.2.0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

pyTEMlib/eels_tools.py CHANGED
@@ -17,31 +17,34 @@ Usage:
17
17
 
18
18
  All the input and output is done through a dictionary which is to be found in the meta_data
19
19
  attribute of the sidpy.Dataset
20
+
21
+ Update by Austin Houston, UTK 12-2023 : Parallization of spectrum images
20
22
  """
23
+ import typing
24
+ from typing import Union
21
25
  import numpy as np
26
+ import matplotlib.pyplot as plt
22
27
 
23
28
  import scipy
24
- from scipy.interpolate import interp1d, splrep # splev, splint
29
+ from scipy import constants
25
30
  from scipy import interpolate
31
+ from scipy.interpolate import interp1d, splrep
26
32
  from scipy.signal import peak_prominences
27
33
  from scipy.ndimage import gaussian_filter
28
-
29
- from scipy import constants
30
- import matplotlib.pyplot as plt
34
+ from scipy.optimize import curve_fit, leastsq
31
35
 
32
36
  import requests
33
37
 
34
- from scipy.optimize import leastsq # least square fitting routine fo scipy
35
-
36
- import pickle # pkg_resources,
37
-
38
38
  # ## And we use the image tool library of pyTEMlib
39
- import pyTEMlib.file_tools as ft
40
39
  from pyTEMlib.xrpa_x_sections import x_sections
41
40
 
42
41
  import sidpy
42
+ from sidpy.proc.fitter import SidFitter
43
43
  from sidpy.base.num_utils import get_slope
44
44
 
45
+ # we have a function called find peaks - is it necessary?
46
+ # or could we just use scipy.signal import find_peaks
47
+
45
48
  major_edges = ['K1', 'L3', 'M5', 'N5']
46
49
  all_edges = ['K1', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5', 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2',
47
50
  'O3', 'O4', 'O5', 'O6', 'O7', 'P1', 'P2', 'P3']
@@ -65,32 +68,154 @@ elements = [' ', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na',
65
68
  # drude(ep, eb, gamma, e)
66
69
  # drude_lorentz(epsInf,leng, ep, eb, gamma, e, Amplitude)
67
70
  # zl_func( p, x)
71
+ # ###############################################################
72
+ # Utility Functions
73
+ # ################################################################
74
+
75
+ def get_wave_length(e0):
76
+ """get deBroglie wavelength of electron accelerated by energy (in eV) e0"""
77
+
78
+ ev = constants.e * e0
79
+ return constants.h / np.sqrt(2 * constants.m_e * ev * (1 + ev / (2 * constants.m_e * constants.c ** 2)))
80
+
81
+
82
+ def effective_collection_angle(energy_scale, alpha, beta, beam_kv):
83
+ """Calculates the effective collection angle in mrad:
84
+
85
+ Translate from original Fortran program
86
+ Calculates the effective collection angle in mrad:
87
+ Parameter
88
+ ---------
89
+ energy_scale: numpy array
90
+ first and last energy loss of spectrum in eV
91
+ alpha: float
92
+ convergence angle in mrad
93
+ beta: float
94
+ collection angle in mrad
95
+ beamKV: float
96
+ acceleration voltage in V
97
+
98
+ Returns
99
+ -------
100
+ eff_beta: float
101
+ effective collection angle in mrad
102
+
103
+ # function y = effbeta(ene, alpha, beta, beam_kv)
104
+ #
105
+ # This program computes etha(alpha,beta), that is the collection
106
+ # efficiency associated to the following geometry :
107
+ #
108
+ # alpha = half angle of illumination (0 -> pi/2)
109
+ # beta = half angle of collection (0 -> pi/2)
110
+ # (pi/2 = 1570.795 mrad)
111
+ #
112
+ # A constant angular distribution of incident electrons is assumed
113
+ # for any incident angle (-alpha,alpha). These electrons imping the
114
+ # target and a single energy-loss event occurs, with a characteristic
115
+ # angle theta-e (relativistic). The angular distribution of the
116
+ # electrons after the target is analytically derived.
117
+ # This program integrates this distribution from theta=0 up to
118
+ # theta=beta with an adjustable angular step.
119
+ # This program also computes beta* which is the theoretical
120
+ # collection angle which would give the same value of etha(alpha,beta)
121
+ # with a parallel incident beam.
122
+ #
123
+ # subroutines and function subprograms required
124
+ # ---------------------------------------------
125
+ # none
126
+ #
127
+ # comments
128
+ # --------
129
+ #
130
+ # The following parameters are asked as input :
131
+ # accelerating voltage (kV), energy loss range (eV) for the study,
132
+ # energy loss step (eV) in this range, alpha (mrad), beta (mrad).
133
+ # The program returns for each energy loss step :
134
+ # alpha (mrad), beta (mrad), theta-e (relativistic) (mrad),
135
+ # energy loss (eV), etha (#), beta * (mrad)
136
+ #
137
+ # author :
138
+ # --------
139
+ # Pierre TREBBIA
140
+ # US 41 : "Microscopie Electronique Analytique Quantitative"
141
+ # Laboratoire de Physique des Solides, Bat. 510
142
+ # Universite Paris-Sud, F91405 ORSAY Cedex
143
+ # Phone : (33-1) 69 41 53 68
144
+ #
145
+ """
146
+ if beam_kv == 0:
147
+ beam_kv = 100.0
148
+
149
+ if alpha == 0:
150
+ return beta
151
+
152
+ if beta == 0:
153
+ return alpha
154
+
155
+ z1 = beam_kv # eV
156
+ z2 = energy_scale[0]
157
+ z3 = energy_scale[-1]
158
+ z4 = 100.0
159
+
160
+ z5 = alpha * 0.001 # rad
161
+ z6 = beta * 0.001 # rad
162
+ z7 = 500.0 # number of integration steps to be modified at will
163
+
164
+ # main loop on energy loss
165
+ #
166
+ for zx in range(int(z2), int(z3), int(z4)): # ! zx = current energy loss
167
+ eta = 0.0
168
+ x0 = float(zx) * (z1 + 511060.) / (z1 * (z1 + 1022120.)) # x0 = relativistic theta-e
169
+ x1 = np.pi / (2. * x0)
170
+ x2 = x0 * x0 + z5 * z5
171
+ x3 = z5 / x0 * z5 / x0
172
+ x4 = 0.1 * np.sqrt(x2)
173
+ dtheta = (z6 - x4) / z7
174
+ #
175
+ # calculation of the analytical expression
176
+ #
177
+ for zi in range(1, int(z7)):
178
+ theta = x4 + dtheta * float(zi)
179
+ x5 = theta * theta
180
+ x6 = 4. * x5 * x0 * x0
181
+ x7 = x2 - x5
182
+ x8 = np.sqrt(x7 * x7 + x6)
183
+ x9 = (x8 + x7) / (2. * x0 * x0)
184
+ x10 = 2. * theta * dtheta * np.log(x9)
185
+ eta = eta + x10
186
+
187
+ eta = eta + x2 / 100. * np.log(1. + x3) # addition of the central contribution
188
+ x4 = z5 * z5 * np.log(1. + x1 * x1) # normalisation
189
+ eta = eta / x4
190
+ #
191
+ # correction by geometrical factor (beta/alpha)**2
192
+ #
193
+ if z6 < z5:
194
+ x5 = z5 / z6
195
+ eta = eta * x5 * x5
196
+
197
+ etha2 = eta * 100.
198
+ #
199
+ # calculation of beta *
200
+ #
201
+ x6 = np.power((1. + x1 * x1), eta)
202
+ x7 = x0 * np.sqrt(x6 - 1.)
203
+ beta = x7 * 1000. # in mrad
204
+
205
+ return beta
206
+
207
+
208
+ def set_default_metadata(current_dataset: sidpy.Dataset) -> None:
209
+
210
+ if 'experiment' not in current_dataset.metadata:
211
+ current_dataset.metadata['experiment'] = {}
212
+ if 'convergence_angle' not in current_dataset.metadata['experiment']:
213
+ current_dataset.metadata['experiment']['convergence_angle'] = 30
214
+ if 'collection_angle' not in current_dataset.metadata['experiment']:
215
+ current_dataset.metadata['experiment']['collection_angle'] = 50
216
+ if 'acceleration_voltage' not in current_dataset.metadata['experiment']:
217
+ current_dataset.metadata['experiment']['acceleration_voltage'] = 200000
68
218
 
69
- ###
70
- def set_previous_quantification(current_dataset):
71
- """Set previous quantification from a sidpy.Dataset"""
72
-
73
- current_channel = current_dataset.h5_dataset.parent
74
- found_metadata = False
75
- for key in current_channel:
76
- if 'Log' in key:
77
- if current_channel[key]['analysis'][()] == 'EELS_quantification':
78
- current_dataset.metadata.update(current_channel[key].attrs) # ToDo: find red dictionary
79
- found_metadata = True
80
- print('found previous quantification')
81
-
82
- if not found_metadata:
83
- # setting important experimental parameter
84
- current_dataset.metadata['experiment'] = ft.read_dm3_info(current_dataset.original_metadata)
85
-
86
- if 'experiment' not in current_dataset.metadata:
87
- current_dataset.metadata['experiment'] = {}
88
- if 'convergence_angle' not in current_dataset.metadata['experiment']:
89
- current_dataset.metadata['experiment']['convergence_angle'] = 30
90
- if 'collection_angle' not in current_dataset.metadata['experiment']:
91
- current_dataset.metadata['experiment']['collection_angle'] = 50
92
- if 'acceleration_voltage' not in current_dataset.metadata['experiment']:
93
- current_dataset.metadata['experiment']['acceleration_voltage'] = 200000
94
219
  ###
95
220
 
96
221
  # ###############################################################
@@ -123,1859 +248,1677 @@ def model_smooth(x, p, only_positive_intensity=False):
123
248
  return y
124
249
 
125
250
 
126
- def residuals_ll(p, x, y, only_positive_intensity):
127
- """part of fit"""
251
+ def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
252
+ """Gaussian Function
128
253
 
129
- err = (y - model_ll(x, p, only_positive_intensity)) / np.sqrt(np.abs(y))
130
- return err
254
+ p[0]==mean, p[1]= amplitude p[2]==fwhm
255
+ area = np.sqrt(2* np.pi)* p[1] * np.abs(p[2] / 2.3548)
256
+ FWHM = 2 * np.sqrt(2 np.log(2)) * sigma = 2.3548 * sigma
257
+ sigma = FWHM/3548
258
+ """
259
+ if p[2] == 0:
260
+ return x * 0.
261
+ else:
262
+ return p[1] * np.exp(-(x - p[0]) ** 2 / (2.0 * (p[2] / 2.3548) ** 2))
131
263
 
132
264
 
133
- def residuals_ll2(p, x, y, only_positive_intensity):
134
- """part of fit"""
265
+ def lorentz(x, center, amplitude, width):
266
+ """ Lorentzian Function """
267
+ lorentz_peak = 0.5 * width / np.pi / ((x - center) ** 2 + (width / 2) ** 2)
268
+ return amplitude * lorentz_peak / lorentz_peak.max()
135
269
 
136
- err = (y - model_ll(x, p, only_positive_intensity))
137
- return err
138
270
 
271
+ def zl_func(x, center1, amplitude1, width1, center2, amplitude2, width2):
272
+ """ zero loss function as product of two lorentzians """
273
+ return lorentz(x, center1, amplitude1, width1) * lorentz(x, center2, amplitude2, width2)
139
274
 
140
- def model_ll(x, p, only_positive_intensity):
141
- """part of fit"""
142
275
 
143
- y = np.zeros(len(x))
276
+ def zl(x, p, p_zl):
277
+ """zero-loss function"""
278
+ p_zl_local = p_zl.copy()
279
+ p_zl_local[2] += p[0]
280
+ p_zl_local[5] += p[0]
281
+ zero_loss = zl_func(x, p_zl_local)
282
+ return p[1] * zero_loss / zero_loss.max()
144
283
 
145
- number_of_peaks = int(len(p) / 3)
146
- for i in range(number_of_peaks):
147
- if only_positive_intensity:
148
- p[i * 3 + 1] = abs(p[i * 3 + 1])
149
- p[i * 3 + 2] = abs(p[i * 3 + 2])
150
- if p[i * 3 + 2] > abs(p[i * 3]) * 4.29193 / 2.0:
151
- p[i * 3 + 2] = abs(p[i * 3]) * 4.29193 / 2. # ## width cannot extend beyond zero, maximum is FWTM/2
152
284
 
153
- y = y + gauss(x, p[i * 3:])
285
+ def get_channel_zero(spectrum: np.ndarray, energy: np.ndarray, width: int = 8):
286
+ """Determin shift of energy scale according to zero-loss peak position
287
+
288
+ This function assumes that the zero loss peak is the maximum of the spectrum.
289
+ """
154
290
 
155
- return y
291
+ zero = scipy.signal.find_peaks(spectrum/np.max(spectrum), height=0.98)[0][0]
292
+ width = int(width/2)
293
+ x = np.array(energy[int(zero-width):int(zero+width)])
294
+ y = np.array(spectrum[int(zero-width):int(zero+width)]).copy()
156
295
 
296
+ y[np.nonzero(y <= 0)] = 1e-12
157
297
 
158
- def fit_peaks(spectrum, energy_scale, pin, start_fit, end_fit, only_positive_intensity=False):
159
- """fit peaks to spectrum
298
+ p0 = [energy[zero], spectrum.max(), .5] # Initial guess is a normal distribution
160
299
 
161
- Parameters
162
- ----------
163
- spectrum: numpy array
164
- spectrum to be fitted
165
- energy_scale: numpy array
166
- energy scale of spectrum
167
- pin: list of float
168
- intial guess of peaks position amplitude width
169
- start_fit: int
170
- channel where fit starts
171
- end_fit: int
172
- channel where fit starts
173
- only_positive_intensity: boolean
174
- allows only for positive amplitudes if True; default = False
300
+ def errfunc(pp, xx, yy):
301
+ return (gauss(xx, pp) - yy) / np.sqrt(yy) # Distance to the target function
302
+
303
+ [p1, _] = leastsq(errfunc, np.array(p0[:]), args=(x, y))
304
+ fit_mu, area, fwhm = p1
175
305
 
176
- Returns
177
- -------
178
- p: list of float
179
- fitting parameters
180
- """
306
+ return fwhm, fit_mu
181
307
 
182
- # TODO: remove zero_loss_fit_width add absolute
183
308
 
184
- fit_energy = energy_scale[start_fit:end_fit]
185
- spectrum = np.array(spectrum)
186
- fit_spectrum = spectrum[start_fit:end_fit]
309
+ def get_zero_loss_energy(dataset):
187
310
 
188
- pin_flat = [item for sublist in pin for item in sublist]
189
- [p_out, _] = leastsq(residuals_ll, np.array(pin_flat), ftol=1e-3, args=(fit_energy, fit_spectrum,
190
- only_positive_intensity))
191
- p = []
192
- for i in range(len(pin)):
193
- if only_positive_intensity:
194
- p_out[i * 3 + 1] = abs(p_out[i * 3 + 1])
195
- p.append([p_out[i * 3], p_out[i * 3 + 1], abs(p_out[i * 3 + 2])])
196
- return p
311
+ spectrum = dataset.sum(axis=tuple(range(dataset.ndim - 1)))
197
312
 
313
+ startx = scipy.signal.find_peaks(spectrum/np.max(spectrum), height=0.98)[0][0]
198
314
 
199
- #################################################################
200
- # CORE - LOSS functions
201
- #################################################################
315
+ end = startx + 3
316
+ start = startx - 3
317
+ for i in range(10):
318
+ if spectrum[startx - i] < 0.3 * spectrum[startx]:
319
+ start = startx - i
320
+ if spectrum[startx + i] < 0.3 * spectrum[startx]:
321
+ end = startx + i
322
+ if end - start < 3:
323
+ end = startx + 2
324
+ start = startx - 2
325
+ width = int((end-start)/2+0.5)
202
326
 
327
+ energy = dataset.get_spectral_dims(return_axis=True)[0].values
203
328
 
204
- def get_x_sections(z=0):
205
- """Reads X-ray fluorescent cross-sections from a pickle file.
329
+ if dataset.ndim == 1: # single spectrum
330
+ _, shifts = get_channel_zero(np.array(dataset), energy, width)
331
+ shifts = np.array([shifts])
332
+ elif dataset.ndim == 2: # line scan
333
+ shifts = np.zeros(dataset.shape[:1])
334
+ for x in range(dataset.shape[0]):
335
+ _, shifts[x] = get_channel_zero(dataset[x, :], energy, width)
336
+ elif dataset.ndim == 3: # spectral image
337
+ shifts = np.zeros(dataset.shape[:2])
338
+ for x in range(dataset.shape[0]):
339
+ for y in range(dataset.shape[1]):
340
+ _, shifts[x, y] = get_channel_zero(dataset[x, y, :], energy, width)
341
+ return shifts
206
342
 
207
- Parameters
208
- ----------
209
- z: int
210
- atomic number if zero all cross-sections will be returned
211
343
 
212
- Returns
213
- -------
214
- dictionary
215
- cross-section of an element or of all elements if z = 0
344
+ def shift_energy(dataset: sidpy.Dataset, shifts: np.ndarray) -> sidpy.Dataset:
345
+ """ Align zero-loss peaks of any spectral sidpy dataset """
216
346
 
217
- """
218
- # pkl_file = open(data_path + '/edges_db.pkl', 'rb')
219
- # x_sections = pickle.load(pkl_file)
220
- # pkl_file.close()
221
- # x_sections = pyTEMlib.config_dir.x_sections
222
- z = int(z)
347
+ new_si = dataset.copy()
348
+ new_si *= 0.0
223
349
 
224
- if z < 1:
225
- return x_sections
226
- else:
227
- z = str(z)
228
- if z in x_sections:
229
- return x_sections[z]
230
- else:
231
- return 0
350
+ image_dims = dataset.get_image_dims()
351
+ if len(image_dims) == 0:
352
+ image_dims =[0]
353
+ if len(image_dims) != shifts.ndim:
354
+ raise TypeError('array of energy shifts have to have same dimension as dataset')
355
+ if not isinstance(dataset, sidpy.Dataset):
356
+ raise TypeError('This function needs a sidpy Dataset to shift energy scale')
357
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
358
+ if dataset.ndim == 1: # single spectrum
359
+ tck = interpolate.splrep(np.array(energy_scale - shifts), np.array(dataset), k=1, s=0)
360
+ new_si[:] = interpolate.splev(energy_scale, tck, der=0)
361
+ new_si.data_type = 'Spectrum'
362
+ elif dataset.ndim == 2: # line scan
363
+ for x in range(dataset.shape[0]):
364
+ tck = interpolate.splrep(np.array(energy_scale - shifts[x]), np.array(dataset[x, :]), k=1, s=0)
365
+ new_si[x, :] = interpolate.splev(energy_scale, tck, der=0)
366
+ elif dataset.ndim == 3: # spectral image
367
+ for x in range(dataset.shape[0]):
368
+ for y in range(dataset.shape[1]):
369
+ tck = interpolate.splrep(np.array(energy_scale - shifts[x, y]), np.array(dataset[x, y]), k=1, s=0)
370
+ new_si[x, y, :] = interpolate.splev(energy_scale, tck, der=0)
232
371
 
372
+ return new_si
233
373
 
234
- def get_z(z):
235
- """Returns the atomic number independent of input as a string or number
236
374
 
237
- Parameter
238
- ---------
239
- z: int, str
240
- atomic number of chemical symbol (0 if not valid)
241
- """
242
- x_sections = get_x_sections()
375
+ def align_zero_loss(dataset: sidpy.Dataset) -> sidpy.Dataset:
243
376
 
244
- z_out = 0
245
- if str(z).isdigit():
246
- z_out = int(z)
247
- elif isinstance(z, str):
248
- for key in x_sections:
249
- if x_sections[key]['name'].lower() == z.lower(): # Well one really should know how to write elemental
250
- z_out = int(key)
251
- return z_out
377
+ shifts = get_zero_loss_energy(dataset)
378
+ print(shifts, dataset)
379
+ new_si = shift_energy(dataset, shifts)
380
+ new_si.metadata.update({'zero_loss': {'shifted': shifts}})
381
+ return new_si
252
382
 
253
383
 
254
- def list_all_edges(z, verbose=False):
255
- """List all ionization edges of an element with atomic number z
384
+ def get_resolution_functions(dset: sidpy.Dataset, startFitEnergy: float=-1, endFitEnergy: float=+1,
385
+ n_workers: int=1, n_threads: int=8):
386
+ """
387
+ Analyze and fit low-loss EELS data within a specified energy range to determine zero-loss peaks.
388
+
389
+ This function processes a low-loss EELS dataset from transmission electron microscopy (TEM) data,
390
+ focusing on a specified energy range for analyzing and fitting the spectrum.
391
+ It determines fitting parameters and applies these to extract zero-loss peak information
392
+ from the dataset. The function handles both 2D and 3D datasets.
393
+
394
+ Parameters:
395
+ dset: sidpy.Dataset
396
+ The dataset containing TEM spectral data.
397
+ startFitEnergy: float
398
+ The start energy of the fitting window.
399
+ endFitEnergy: float
400
+ The end energy of the fitting window.
401
+ n_workers: int, optional
402
+ The number of workers for parallel processing (default is 1).
403
+ n_threads: int, optional
404
+ The number of threads for parallel processing (default is 8).
256
405
 
257
- Parameters
258
- ----------
259
- z: int
260
- atomic number
406
+ Returns:
407
+ tuple: A tuple containing:
408
+ - z_loss_dset (sidpy.Dataset): The dataset with added zero-loss peak information.
409
+ - z_loss_params (numpy.ndarray): Array of parameters used for the zero-loss peak fitting.
261
410
 
262
- Returns
263
- -------
264
- out_string: str
265
- string with all major edges in energy range
411
+ Raises:
412
+ ValueError: If the input dataset does not have the expected dimensions or format.
413
+
414
+ Notes:
415
+ - The function expects `dset` to have specific dimensionalities and will raise an error if they are not met.
416
+ - Parallel processing is employed to enhance performance, particularly for large datasets.
266
417
  """
418
+ energy = dset.get_spectral_dims(return_axis=True)[0].values
419
+ start_fit_pixel = np.searchsorted(energy, startFitEnergy)
420
+ end_fit_pixel = np.searchsorted(energy, endFitEnergy)
421
+ guess_width = (endFitEnergy - startFitEnergy)/2
422
+
423
+ def get_good_guess(zl_func, energy, spectrum):
424
+ popt, pcov = curve_fit(zl_func, energy, spectrum,
425
+ p0=[0, guess_amplitude, guess_width,
426
+ 0, guess_amplitude, guess_width])
427
+ return popt
428
+
429
+ fit_energy = energy[start_fit_pixel:end_fit_pixel]
430
+ # get a good guess for the fit parameters
431
+ if len(dset.shape) == 3:
432
+ fit_dset = dset[:, :, start_fit_pixel:end_fit_pixel]
433
+ guess_amplitude = np.sqrt(fit_dset.max())
434
+ guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=(0, 1))/fit_dset.shape[0]/fit_dset.shape[1])
435
+ elif len(dset.shape) == 2:
436
+ fit_dset = dset[:, start_fit_pixel:end_fit_pixel]
437
+ fit_energy = energy[start_fit_pixel:end_fit_pixel]
438
+ guess_amplitude = np.sqrt(fit_dset.max())
439
+ guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=0)/fit_dset.shape[0])
440
+ elif len(dset.shape) == 1:
441
+ fit_dset = dset[start_fit_pixel:end_fit_pixel]
442
+ fit_energy = energy[start_fit_pixel:end_fit_pixel]
443
+ guess_amplitude = np.sqrt(fit_dset.max())
444
+ guess_params = get_good_guess(zl_func, fit_energy, fit_dset)
445
+ z_loss_dset = dset.copy()
446
+ z_loss_dset *= 0.0
447
+ z_loss_dset += zl_func(energy, *guess_params)
448
+ if 'zero_loss' not in z_loss_dset.metadata:
449
+ z_loss_dset.metadata['zero_loss'] = {}
450
+ z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
451
+ 'endFitEnergy': endFitEnergy,
452
+ 'fit_parameter': guess_params,
453
+ 'original_low_loss': dset.title})
454
+ return z_loss_dset
455
+ else:
456
+ print('Error: need a spectrum or spectral image sidpy dataset')
457
+ print('Not dset.shape = ', dset.shape)
458
+ return None
267
459
 
268
- element = str(z)
269
- x_sections = get_x_sections()
270
- out_string = ''
271
- if verbose:
272
- print('Major edges')
273
- edge_list = {x_sections[element]['name']: {}}
460
+ # define guess function for SidFitter
461
+ def guess_function(xvec, yvec):
462
+ return guess_params
274
463
 
275
- for key in all_edges:
276
- if key in x_sections[element]:
277
- if 'onset' in x_sections[element][key]:
278
- if verbose:
279
- print(f" {x_sections[element]['name']}-{key}: {x_sections[element][key]['onset']:8.1f} eV ")
280
- out_string = out_string + f" {x_sections[element]['name']}-{key}: " \
281
- f"{x_sections[element][key]['onset']:8.1f} eV /n"
282
- edge_list[x_sections[element]['name']][key] = x_sections[element][key]['onset']
283
- return out_string, edge_list
464
+ # apply to all spectra
465
+ zero_loss_fitter = SidFitter(fit_dset, zl_func, num_workers=n_workers, guess_fn=guess_function, threads=n_threads,
466
+ return_cov=False, return_fit=False, return_std=False, km_guess=False, num_fit_parms=6)
467
+
468
+ [z_loss_params] = zero_loss_fitter.do_fit()
469
+ z_loss_dset = dset.copy()
470
+ z_loss_dset *= 0.0
284
471
 
472
+ energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
473
+ z_loss_dset.shape[1], energy.shape[0]))
474
+ z_loss_peaks = zl_func(energy_grid, *z_loss_params)
475
+ z_loss_dset += z_loss_peaks
285
476
 
286
- def find_major_edges(edge_onset, maximal_chemical_shift=5.):
287
- """Find all major edges within an energy range
477
+ shifts = z_loss_params[:, :, 0] * z_loss_params[:, :, 3]
478
+ widths = z_loss_params[:, :, 2] * z_loss_params[:, :, 5]
288
479
 
289
- Parameters
290
- ----------
291
- edge_onset: float
292
- approximate energy of ionization edge
293
- maximal_chemical_shift: float
294
- optional, range of energy window around edge_onset to look for major edges
480
+ z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
481
+ 'endFitEnergy': endFitEnergy,
482
+ 'fit_parameter': z_loss_params,
483
+ 'original_low_loss': dset.title})
295
484
 
296
- Returns
297
- -------
298
- text: str
299
- string with all major edges in energy range
300
485
 
301
- """
302
- text = ''
303
- x_sections = get_x_sections()
304
- for element in x_sections:
305
- for key in x_sections[element]:
486
+ return z_loss_dset
306
487
 
307
- # if isinstance(x_sections[element][key], dict):
308
- if key in major_edges:
309
488
 
310
- if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
311
- # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
312
- text = text + f"\n {x_sections[element]['name']:2s}-{key}: " \
313
- f"{x_sections[element][key]['onset']:8.1f} eV "
489
+ def drude(energy_scale, peak_position, peak_width, gamma):
490
+ """dielectric function according to Drude theory"""
314
491
 
315
- return text
492
+ eps = (1 - (peak_position ** 2 - peak_width * energy_scale * 1j) /
493
+ (energy_scale ** 2 + 2 * energy_scale * gamma * 1j)) # Mod drude term
494
+ return eps
316
495
 
317
496
 
318
- def find_all_edges(edge_onset, maximal_chemical_shift=5):
319
- """Find all (major and minor) edges within an energy range
497
+ def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
498
+ """dielectric function according to Drude-Lorentz theory"""
320
499
 
321
- Parameters
322
- ----------
323
- edge_onset: float
324
- approximate energy of ionization edge
325
- maximal_chemical_shift: float
326
- optional, range of energy window around edge_onset to look for major edges
500
+ eps = eps_inf
501
+ for i in range(leng):
502
+ eps = eps + amplitude[i] * (1 / (e + ep[i] + gamma[i] * 1j) - 1 / (e - ep[i] + gamma[i] * 1j))
503
+ return eps
327
504
 
328
- Returns
329
- -------
330
- text: str
331
- string with all edges in energy range
332
505
 
506
+ def fit_plasmon(dataset, startFitEnergy, endFitEnergy, plot_result=False, number_workers=4, number_threads=8):
333
507
  """
508
+ Fit plasmon peak positions and widths in a TEM dataset using a Drude model.
509
+
510
+ This function applies the Drude model to fit plasmon peaks in a dataset obtained
511
+ from transmission electron microscopy (TEM). It processes the dataset to determine
512
+ peak positions, widths, and amplitudes within a specified energy range. The function
513
+ can handle datasets with different dimensions and offers parallel processing capabilities.
514
+
515
+ Parameters:
516
+ dataset: sidpy.Dataset or numpy.ndarray
517
+ The dataset containing TEM spectral data.
518
+ startFitEnergy: float
519
+ The start energy of the fitting window.
520
+ endFitEnergy: float
521
+ The end energy of the fitting window.
522
+ plot_result: bool, optional
523
+ If True, plots the fitting results (default is False).
524
+ number_workers: int, optional
525
+ The number of workers for parallel processing (default is 4).
526
+ number_threads: int, optional
527
+ The number of threads for parallel processing (default is 8).
334
528
 
335
- text = ''
336
- x_sections = get_x_sections()
337
- for element in x_sections:
338
- for key in x_sections[element]:
339
-
340
- if isinstance(x_sections[element][key], dict):
341
- if 'onset' in x_sections[element][key]:
342
- if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
343
- # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
344
- text = text + f"\n {x_sections[element]['name']:2s}-{key}: " \
345
- f"{x_sections[element][key]['onset']:8.1f} eV "
346
- return text
347
-
529
+ Returns:
530
+ fitted_dataset: sidpy.Dataset or numpy.ndarray
531
+ The dataset with fitted plasmon peak parameters. The dimensions and
532
+ format depend on the input dataset.
348
533
 
349
- def second_derivative(dataset, sensitivity):
350
- """Calculates second derivative of a sidpy.dataset"""
534
+ Raises:
535
+ ValueError: If the input dataset does not have the expected dimensions or format.
351
536
 
352
- dim = dataset.get_spectrum_dims()
353
- energy_scale = np.array(dataset._axes[dim[0]])
354
- if dataset.data_type.name == 'SPECTRAL_IMAGE':
355
- spectrum = dataset.view.get_spectrum()
537
+ Notes:
538
+ - The function uses the Drude model to fit plasmon peaks.
539
+ - The fitting parameters are peak position (Ep), peak width (Ew), and amplitude (A).
540
+ - If `plot_result` is True, the function plots Ep, Ew, and A as separate subplots.
541
+ """
542
+ # define Drude function for plasmon fitting
543
+ def energy_loss_function(E, Ep, Ew, A):
544
+ E = E/E.max()
545
+ eps = 1 - Ep**2/(E**2+Ew**2) + 1j * Ew * Ep**2/E/(E**2+Ew**2)
546
+ elf = (-1/eps).imag
547
+ return A*elf
548
+
549
+ # define window for fitting
550
+ energy = dataset.get_spectral_dims(return_axis=True)[0].values
551
+ start_fit_pixel = np.searchsorted(energy, startFitEnergy)
552
+ end_fit_pixel = np.searchsorted(energy, endFitEnergy)
553
+
554
+ # rechunk dataset
555
+ if dataset.ndim == 3:
556
+ dataset = dataset.rechunk(chunks=(1, 1, -1))
557
+ fit_dset = dataset[:, :, start_fit_pixel:end_fit_pixel]
558
+ elif dataset.ndim == 2:
559
+ dataset = dataset.rechunk(chunks=(1, -1))
560
+ fit_dset = dataset[:, start_fit_pixel:end_fit_pixel]
356
561
  else:
357
- spectrum = np.array(dataset)
562
+ fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel])
563
+ guess_pos = np.argmax(fit_dset)
564
+ guess_amplitude = fit_dset[guess_pos]
565
+ guess_width = (endFitEnergy - startFitEnergy)/2
566
+ popt, pcov = curve_fit(energy_loss_function, energy, dataset,
567
+ p0=[guess_pos, guess_width, guess_amplitude])
568
+ return popt
569
+
570
+ # if it can be parallelized:
571
+ fitter = SidFitter(fit_dset, energy_loss_function, num_workers=number_workers,
572
+ threads=number_threads, return_cov=False, return_fit=False, return_std=False,
573
+ km_guess=False, num_fit_parms=3)
574
+ [fitted_dataset] = fitter.do_fit()
358
575
 
359
- spec = scipy.ndimage.gaussian_filter(spectrum, 3)
576
+ if plot_result:
577
+ fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True)
578
+ ax1.imshow(fitted_dataset[:, :, 0], cmap='jet')
579
+ ax1.set_title('Ep - Peak Position')
580
+ ax2.imshow(fitted_dataset[:, :, 1], cmap='jet')
581
+ ax2.set_title('Ew - Peak Width')
582
+ ax3.imshow(fitted_dataset[:, :, 2], cmap='jet')
583
+ ax3.set_title('A - Amplitude')
584
+ plt.show()
585
+ return fitted_dataset
360
586
 
361
- dispersion = get_slope(energy_scale)
362
- second_dif = np.roll(spec, -3) - 2 * spec + np.roll(spec, +3)
363
- second_dif[:3] = 0
364
- second_dif[-3:] = 0
365
587
 
366
- # find if there is a strong edge at high energy_scale
367
- noise_level = 2. * np.std(second_dif[3:50])
368
- [indices, _] = scipy.signal.find_peaks(second_dif, noise_level)
369
- width = 50 / dispersion
370
- if width < 50:
371
- width = 50
372
- start_end_noise = int(len(energy_scale) - width)
373
- for index in indices[::-1]:
374
- if index > start_end_noise:
375
- start_end_noise = index - 70
376
588
 
377
- noise_level_start = sensitivity * np.std(second_dif[3:50])
378
- noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
379
- slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
380
- noise_level = noise_level_start + np.arange(len(energy_scale)) * slope
381
- return second_dif, noise_level
589
+ def drude_simulation(dset, e, ep, ew, tnm, eb):
590
+ """probabilities of dielectric function eps relative to zero-loss integral (i0 = 1)
382
591
 
592
+ Gives probabilities of dielectric function eps relative to zero-loss integral (i0 = 1) per eV
593
+ Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
383
594
 
384
- def find_edges(dataset, sensitivity=2.5):
385
- """find edges within a sidpy.Dataset"""
595
+ # function drude(ep,ew,eb,epc,e0,beta,nn,tnm)
596
+ # Given the plasmon energy (ep), plasmon fwhm (ew) and binding energy(eb),
597
+ # this program generates:
598
+ # EPS1, EPS2 from modified Eq. (3.40), ELF=Im(-1/EPS) from Eq. (3.42),
599
+ # single scattering from Eq. (4.26) and SRFINT from Eq. (4.31)
600
+ # The output is e, ssd into the file drude.ssd (for use in Flog etc.)
601
+ # and e,eps1 ,eps2 into drude.eps (for use in Kroeger etc.)
602
+ # Gives probabilities relative to zero-loss integral (i0 = 1) per eV
603
+ # Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
604
+ # Version 10.11.26
386
605
 
387
- dim = dataset.get_spectrum_dims()
388
- energy_scale = np.array(dataset._axes[dim[0]])
389
606
 
390
- second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
607
+ b.7 drude Simulation of a Low-Loss Spectrum
608
+ The program DRUDE calculates a single-scattering plasmon-loss spectrum for
609
+ a specimen of a given thickness tnm (in nm), recorded with electrons of a
610
+ specified incident energy e0 by a spectrometer that accepts scattering up to a
611
+ specified collection semi-angle beta. It is based on the extended drude model
612
+ (Section 3.3.2), with a volume energy-loss function elf in accord with Eq. (3.64) and
613
+ a surface-scattering energy-loss function srelf as in Eq. (4.31). Retardation effects
614
+ and coupling between the two surface modes are not included. The surface term can
615
+ be made negligible by entering a large specimen thickness (tnm > 1000).
616
+ Surface intensity srfint and volume intensity volint are calculated from
617
+ Eqs. (4.31) and (4.26), respectively. The total spectral intensity ssd is written to
618
+ the file DRUDE.SSD, which can be used as input for KRAKRO. These intensities are
619
+ all divided by i0, to give relative probabilities (per eV). The real and imaginary parts
620
+ of the dielectric function are written to DRUDE.EPS and can be used for comparison
621
+ with the results of Kramers–Kronig analysis (KRAKRO.DAT).
622
+ Written output includes the surface-loss probability Ps, obtained by integrating
623
+ srfint (a value that relates to two surfaces but includes the negative begrenzungs
624
+ term), for comparison with the analytical integration represented by Eq. (3.77). The
625
+ volume-loss probability p_v is obtained by integrating volint and is used to calculate
626
+ the volume plasmon mean free path (lam = tnm/p_v). The latter is listed and
627
+ compared with the MFP obtained from Eq. (3.44), which represents analytical integration
628
+ assuming a zero-width plasmon peak. The total probability (Pt = p_v+Ps) is
629
+ calculated and used to evaluate the thickness (lam.Pt) that would be given by the formula
630
+ t/λ = ln(It/i0), ignoring the surface-loss probability. Note that p_v will exceed
631
+ 1 for thicker specimens (t/λ > 1), since it represents the probability of plasmon
632
+ scattering relative to that of no inelastic scattering.
633
+ The command-line usage is drude(ep,ew,eb,epc,beta,e0,tnm,nn), where ep is the
634
+ plasmon energy, ew the plasmon width, eb the binding energy of the electrons (0 for
635
+ a metal), and nn is the number of channels in the output spectrum. An example of
636
+ the output is shown in Fig. b.1a,b.
391
637
 
392
- [indices, peaks] = scipy.signal.find_peaks(second_dif, noise_level)
638
+ """
639
+
640
+ epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
641
+
642
+ b = dset.metadata['collection_angle'] / 1000. # rad
643
+ epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
644
+ e0 = dset.metadata['acceleration_voltage'] / 1000. # input('incident energy e0(kev) : ');
393
645
 
394
- peaks['peak_positions'] = energy_scale[indices]
395
- peaks['peak_indices'] = indices
396
- edge_energies = [energy_scale[50]]
397
- edge_indices = []
646
+ # effective kinetic energy: T = m_o v^2/2,
647
+ t = 1000.0 * e0 * (1. + e0 / 1022.12) / (1.0 + e0 / 511.06) ** 2 # eV # equ.5.2a or Appendix E p 427
648
+
649
+ # 2 gamma T
650
+ tgt = 1000 * e0 * (1022.12 + e0) / (511.06 + e0) # eV Appendix E p 427
651
+
652
+ rk0 = 2590 * (1.0 + e0 / 511.06) * np.sqrt(2.0 * t / 511060)
653
+
654
+ os = e[0]
655
+ ew_mod = eb
656
+ tags = dset.metadata
657
+
658
+ eps = 1 - (ep ** 2 - ew_mod * e * 1j) / (e ** 2 + 2 * e * ew * 1j) # Mod drude term
659
+
660
+ eps[np.nonzero(eps == 0.0)] = 1e-19
661
+ elf = np.imag(-1 / eps)
398
662
 
399
- [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
400
- minima = energy_scale[indices]
663
+ the = e / tgt # varies with energy loss! # Appendix E p 427
664
+ # srfelf = 4..*eps2./((1+eps1).^2+eps2.^2) - elf; %equivalent
665
+ srfelf = np.imag(-4. / (1.0 + eps)) - elf # for 2 surfaces
666
+ angdep = np.arctan(b / the) / the - b / (b * b + the * the)
667
+ srfint = angdep * srfelf / (3.1416 * 0.05292 * rk0 * t) # probability per eV
668
+ anglog = np.log(1.0 + b * b / the / the)
669
+ i0 = dset.sum() # *tags['counts2e']
401
670
 
402
- for peak_number in range(len(peaks['peak_positions'])):
403
- position = peaks['peak_positions'][peak_number]
404
- if position - edge_energies[-1] > 20:
405
- impossible = minima[minima < position]
406
- impossible = impossible[impossible > position - 5]
407
- if len(impossible) == 0:
408
- possible = minima[minima > position]
409
- possible = possible[possible < position + 5]
410
- if len(possible) > 0:
411
- edge_energies.append((position + possible[0])/2)
412
- edge_indices.append(np.searchsorted(energy_scale, (position + possible[0])/2))
671
+ # 2 * t = m_0 v**2 !!! a_0 = 0.05292 nm
672
+ volint = abs(tnm / (np.pi * 0.05292 * t * 2.0) * elf * anglog) # S equ 4.26% probability per eV
673
+ volint = volint * i0 / epc # S probability per channel
674
+ ssd = volint # + srfint;
413
675
 
414
- selected_edges = []
415
- for peak in edge_indices:
416
- if 525 < energy_scale[peak] < 533:
417
- selected_edges.append('O-K1')
418
- else:
419
- selected_edge = ''
420
- edges = find_major_edges(energy_scale[peak], 20)
421
- edges = edges.split('\n')
422
- minimum_dist = 100.
423
- for edge in edges[1:]:
424
- edge = edge[:-3].split(':')
425
- name = edge[0].strip()
426
- energy = float(edge[1].strip())
427
- if np.abs(energy - energy_scale[peak]) < minimum_dist:
428
- minimum_dist = np.abs(energy - energy_scale[peak])
429
- selected_edge = name
676
+ if e[0] < -1.0:
677
+ xs = int(abs(-e[0] / epc))
430
678
 
431
- if selected_edge != '':
432
- selected_edges.append(selected_edge)
679
+ ssd[0:xs] = 0.0
680
+ volint[0:xs] = 0.0
681
+ srfint[0:xs] = 0.0
433
682
 
434
- return selected_edges
683
+ # if os <0:
684
+ p_s = np.trapz(e, srfint) # 2 surfaces but includes negative Begrenzung contribution.
685
+ p_v = abs(np.trapz(e, abs(volint / tags['spec'].sum()))) # integrated volume probability
686
+ p_v = (volint / i0).sum() # our data have he same epc and the trapez formula does not include
687
+ lam = tnm / p_v # does NOT depend on free-electron approximation (no damping).
688
+ lamfe = 4.0 * 0.05292 * t / ep / np.log(1 + (b * tgt / ep) ** 2) # Eq.(3.44) approximation
435
689
 
690
+ tags['eps'] = eps
691
+ tags['lam'] = lam
692
+ tags['lamfe'] = lamfe
693
+ tags['p_v'] = p_v
436
694
 
437
- def assign_likely_edges(edge_channels, energy_scale):
438
- edges_in_list = []
439
- result = {}
440
- for channel in edge_channels:
441
- if channel not in edge_channels[edges_in_list]:
442
- shift = 5
443
- element_list = find_major_edges(energy_scale[channel], maximal_chemical_shift=shift)
444
- while len(element_list) < 1:
445
- shift+=1
446
- element_list = find_major_edges(energy_scale[channel], maximal_chemical_shift=shift)
695
+ return ssd # /np.pi
447
696
 
448
- if len(element_list) > 1:
449
- while len(element_list) > 0:
450
- shift-=1
451
- element_list = find_major_edges(energy_scale[channel], maximal_chemical_shift=shift)
452
- element_list = find_major_edges(energy_scale[channel], maximal_chemical_shift=shift+1)
453
- element = (element_list[:4]).strip()
454
- z = get_z(element)
455
- result[element] =[]
456
- _, edge_list = list_all_edges(z)
457
697
 
458
- for peak in edge_list:
459
- for edge in edge_list[peak]:
460
- possible_minor_edge = np.argmin(np.abs(energy_scale[edge_channels]-edge_list[peak][edge]))
461
- if np.abs(energy_scale[edge_channels[possible_minor_edge]]-edge_list[peak][edge]) < 3:
462
- #print('nex', next_e)
463
- edges_in_list.append(possible_minor_edge)
464
-
465
- result[element].append(edge)
466
-
467
- return result
698
+ def kroeger_core(e_data, a_data, eps_data, acceleration_voltage_kev, thickness, relativistic=True):
699
+ """This function calculates the differential scattering probability
468
700
 
701
+ .. math::
702
+ \\frac{d^2P}{d \\Omega d_e}
703
+ of the low-loss region for total loss and volume plasmon loss
469
704
 
470
- def auto_id_edges(dataset):
471
- edge_channels = identify_edges(dataset)
472
- dim = dataset.get_spectrum_dims()
473
- energy_scale = np.array(dataset._axes[dim[0]])
474
- found_edges = assign_likely_edges(edge_channels, energy_scale)
475
- return found_edges
705
+ Args:
706
+ e_data (array): energy scale [eV]
707
+ a_data (array): angle or momentum range [rad]
708
+ eps_data (array) dielectric function
709
+ acceleration_voltage_kev (float): acceleration voltage [keV]
710
+ thickness (float): thickness in nm
711
+ relativistic (boolean): relativistic correction
476
712
 
713
+ Returns:
714
+ P (numpy array 2d): total loss probability
715
+ p_vol (numpy array 2d): volume loss probability
477
716
 
478
- def identify_edges(dataset, noise_level=2.0):
717
+ return P, P*scale*1e2,p_vol*1e2, p_simple*1e2
479
718
  """
480
- Using first derivative to determine edge onsets
481
- Any peak in first derivative higher than noise_level times standard deviation will be considered
482
-
483
- Parameters
484
- ----------
485
- dataset: sidpy.Dataset
486
- the spectrum
487
- noise_level: float
488
- ths number times standard deviation in first derivative decides on whether an edge onset is significant
489
-
490
- Return
491
- ------
492
- edge_channel: numpy.ndarray
493
-
494
- """
495
- dim = dataset.get_spectrum_dims()
496
- energy_scale = np.array(dataset._axes[dim[0]])
497
- dispersion = get_slope(energy_scale)
498
- spec = scipy.ndimage.gaussian_filter(dataset, 3/dispersion) # smooth with 3eV wideGaussian
499
719
 
500
- first_derivative = spec - np.roll(spec, +2)
501
- first_derivative[:3] = 0
502
- first_derivative[-3:] = 0
720
+ # $d^2P/(dEd\Omega) = \frac{1}{\pi^2 a_0 m_0 v^2} \Im \left[ \frac{t\mu^2}{\varepsilon \phi^2 } \right]
721
+ """
722
+ # Internally everything is calculated in si units
723
+ # acceleration_voltage_kev = 200 #keV
724
+ # thick = 32.0*10-9 # m
503
725
 
504
- # find if there is a strong edge at high energy_scale
505
- noise_level = noise_level*np.std(first_derivative[3:50])
506
- [edge_channels, _] = scipy.signal.find_peaks(first_derivative, noise_level)
507
-
508
- return edge_channels
726
+ """
727
+ a_data = np.array(a_data)
728
+ e_data = np.array(e_data)
729
+ # adjust input to si units
730
+ wavelength = get_wave_length(acceleration_voltage_kev * 1e3) # in m
731
+ thickness = thickness * 1e-9 # input thickness now in m
509
732
 
733
+ # Define constants
734
+ # ec = 14.4;
735
+ m_0 = constants.value(u'electron mass') # REST electron mass in kg
736
+ # h = constants.Planck # Planck's constant
737
+ hbar = constants.hbar
510
738
 
511
- def add_element_to_dataset(dataset, z):
512
- """
513
- """
514
- # We check whether this element is already in the
515
- energy_scale = dataset.energy_loss
516
- zz = get_z(z)
517
- if 'edges' not in dataset.metadata:
518
- dataset.metadata['edges'] = {'model': {}, 'use_low_loss': False}
519
- index = 0
520
- for key, edge in dataset.metadata['edges'].items():
521
- if key.isdigit():
522
- index += 1
523
- if 'z' in edge:
524
- if zz == edge['z']:
525
- index = int(key)
526
- break
739
+ c = constants.speed_of_light # speed of light m/s
740
+ bohr = constants.value(u'Bohr radius') # Bohr radius in meters
741
+ e = constants.value(u'elementary charge') # electron charge in Coulomb
742
+ # print('hbar =', hbar ,' [Js] =', hbar/e ,'[ eV s]')
527
743
 
528
- major_edge = ''
529
- minor_edge = ''
530
- all_edges = {}
531
- x_section = get_x_sections(zz)
532
- edge_start = 10 # int(15./ft.get_slope(self.energy_scale)+0.5)
533
- for key in x_section:
534
- if len(key) == 2 and key[0] in ['K', 'L', 'M', 'N', 'O'] and key[1].isdigit():
535
- if energy_scale[edge_start] < x_section[key]['onset'] < energy_scale[-edge_start]:
536
- if key in ['K1', 'L3', 'M5', 'M3']:
537
- major_edge = key
538
-
539
- all_edges[key] = {'onset': x_section[key]['onset']}
744
+ # Calculate fixed terms of equation
745
+ va = 1 - (511. / (511. + acceleration_voltage_kev)) ** 2 # acceleration_voltage_kev is incident energy in keV
746
+ v = c * np.sqrt(va)
540
747
 
541
- if major_edge != '':
542
- key = major_edge
543
- elif minor_edge != '':
544
- key = minor_edge
748
+ if relativistic:
749
+ beta = v / c # non-relativistic for =1
750
+ gamma = 1. / np.sqrt(1 - beta ** 2)
545
751
  else:
546
- print(f'Could not find no edge of {zz} in spectrum')
547
- return False
752
+ beta = 1
753
+ gamma = 1 # set = 1 to correspond to E+B & Siegle
548
754
 
549
-
550
- if str(index) not in dataset.metadata['edges']:
551
- dataset.metadata['edges'][str(index)] = {}
755
+ momentum = m_0 * v * gamma # used for xya, E&B have no gamma
552
756
 
553
- start_exclude = x_section[key]['onset'] - x_section[key]['excl before']
554
- end_exclude = x_section[key]['onset'] + x_section[key]['excl after']
757
+ # ##### Define mapped variables
555
758
 
556
- dataset.metadata['edges'][str(index)] = {'z': zz, 'symmetry': key, 'element': elements[zz],
557
- 'onset': x_section[key]['onset'], 'end_exclude': end_exclude,
558
- 'start_exclude': start_exclude}
559
- dataset.metadata['edges'][str(index)]['all_edges'] = all_edges
560
- dataset.metadata['edges'][str(index)]['chemical_shift'] = 0.0
561
- dataset.metadata['edges'][str(index)]['areal_density'] = 0.0
562
- dataset.metadata['edges'][str(index)]['original_onset'] = dataset.metadata['edges'][str(index)]['onset']
563
- return True
759
+ # Define independent variables E, theta
760
+ [energy, theta] = np.meshgrid(e_data + 1e-12, a_data)
761
+ # Define CONJUGATE dielectric function variable eps
762
+ [eps, _] = np.meshgrid(np.conj(eps_data), a_data)
564
763
 
764
+ # ##### Calculate lambda in equation EB 2.3
765
+ theta2 = theta ** 2 + 1e-15
565
766
 
566
- def make_edges(edges_present, energy_scale, e_0, coll_angle, low_loss=None):
567
- """Makes the edges dictionary for quantification
767
+ theta_e = energy * e / momentum / v # critical angle
568
768
 
569
- Parameters
570
- ----------
571
- edges_present: list
572
- list of edges
573
- energy_scale: numpy array
574
- energy scale on which to make cross-section
575
- e_0: float
576
- acceleration voltage (in V)
577
- coll_angle: float
578
- collection angle in mrad
579
- low_loss: numpy array with same length as energy_scale
580
- low_less spectrum with which to convolve the cross-section (default=None)
769
+ lambda2 = theta2 - eps * theta_e ** 2 * beta ** 2 # Eq 2.3
581
770
 
582
- Returns
583
- -------
584
- edges: dict
585
- dictionary with all information on cross-section
586
- """
587
- x_sections = get_x_sections()
588
- edges = {}
589
- for i, edge in enumerate(edges_present):
590
- element, symmetry = edge.split('-')
591
- z = 0
592
- for key in x_sections:
593
- if element == x_sections[key]['name']:
594
- z = int(key)
595
- edges[i] = {}
596
- edges[i]['z'] = z
597
- edges[i]['symmetry'] = symmetry
598
- edges[i]['element'] = element
771
+ lambd = np.sqrt(lambda2)
772
+ if (np.real(lambd) < 0).any():
773
+ print(' error negative lambda')
599
774
 
600
- for key in edges:
601
- xsec = x_sections[str(edges[key]['z'])]
602
- if 'chemical_shift' not in edges[key]:
603
- edges[key]['chemical_shift'] = 0
604
- if 'symmetry' not in edges[key]:
605
- edges[key]['symmetry'] = 'K1'
606
- if 'K' in edges[key]['symmetry']:
607
- edges[key]['symmetry'] = 'K1'
608
- elif 'L' in edges[key]['symmetry']:
609
- edges[key]['symmetry'] = 'L3'
610
- elif 'M' in edges[key]['symmetry']:
611
- edges[key]['symmetry'] = 'M5'
612
- else:
613
- edges[key]['symmetry'] = edges[key]['symmetry'][0:2]
775
+ # ##### Calculate lambda0 in equation EB 2.4
776
+ # According to Kröger real(lambda0) is defined as positive!
614
777
 
615
- edges[key]['original_onset'] = xsec[edges[key]['symmetry']]['onset']
616
- edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
617
- edges[key]['start_exclude'] = edges[key]['onset'] - xsec[edges[key]['symmetry']]['excl before']
618
- edges[key]['end_exclude'] = edges[key]['onset'] + xsec[edges[key]['symmetry']]['excl after']
778
+ phi2 = lambda2 + theta_e ** 2 # Eq. 2.2
779
+ lambda02 = theta2 - theta_e ** 2 * beta ** 2 # eta=1 Eq 2.4
780
+ lambda02[lambda02 < 0] = 0
781
+ lambda0 = np.sqrt(lambda02)
782
+ if not (np.real(lambda0) >= 0).any():
783
+ print(' error negative lambda0')
619
784
 
620
- edges = make_cross_sections(edges, energy_scale, e_0, coll_angle, low_loss)
785
+ de = thickness * energy * e / (2.0 * hbar * v) # Eq 2.5
786
+ xya = lambd * de / theta_e # used in Eqs 2.6, 2.7, 4.4
621
787
 
622
- return edges
788
+ lplus = lambda0 * eps + lambd * np.tanh(xya) # eta=1 %Eq 2.6
789
+ lminus = lambda0 * eps + lambd / np.tanh(xya) # eta=1 %Eq 2.7
623
790
 
624
- def fit_dataset(dataset):
625
- energy_scale = dataset.energy_loss
626
- if 'fit_area' not in dataset.metadata['edges']:
627
- dataset.metadata['edges']['fit_area'] = {}
628
- if 'fit_start' not in dataset.metadata['edges']['fit_area']:
629
- dataset.metadata['edges']['fit_area']['fit_start'] = energy_scale[50]
630
- if 'fit_end' not in dataset.metadata['edges']['fit_area']:
631
- dataset.metadata['edges']['fit_area']['fit_end'] = energy_scale[-2]
632
- dataset.metadata['edges']['use_low_loss'] = False
633
-
634
- if 'experiment' in dataset.metadata:
635
- exp = dataset.metadata['experiment']
636
- if 'convergence_angle' not in exp:
637
- raise ValueError('need a convergence_angle in experiment of metadata dictionary ')
638
- alpha = exp['convergence_angle']
639
- beta = exp['collection_angle']
640
- beam_kv = exp['acceleration_voltage']
641
- energy_scale = dataset.energy_loss
642
- eff_beta = effective_collection_angle(energy_scale, alpha, beta, beam_kv)
643
- edges = make_cross_sections(dataset.metadata['edges'], np.array(energy_scale), beam_kv, eff_beta)
644
- dataset.metadata['edges'] = fit_edges2(dataset, energy_scale, edges)
645
- areal_density = []
646
- elements = []
647
- for key in edges:
648
- if key.isdigit(): # only edges have numbers in that dictionary
649
- elements.append(edges[key]['element'])
650
- areal_density.append(edges[key]['areal_density'])
651
- areal_density = np.array(areal_density)
652
- out_string = '\nRelative composition: \n'
653
- for i, element in enumerate(elements):
654
- out_string += f'{element}: {areal_density[i] / areal_density.sum() * 100:.1f}% '
791
+ mue2 = 1 - (eps * beta ** 2) # Eq. 4.5
792
+ phi20 = lambda02 + theta_e ** 2 # Eq 4.6
793
+ phi201 = theta2 + theta_e ** 2 * (1 - (eps + 1) * beta ** 2) # eta=1, eps-1 in E+b Eq.(4.7)
655
794
 
656
- print(out_string)
795
+ # Eq 4.2
796
+ a1 = phi201 ** 2 / eps
797
+ a2 = np.sin(de) ** 2 / lplus + np.cos(de) ** 2 / lminus
798
+ a = a1 * a2
657
799
 
800
+ # Eq 4.3
801
+ b1 = beta ** 2 * lambda0 * theta_e * phi201
802
+ b2 = (1. / lplus - 1. / lminus) * np.sin(2. * de)
803
+ b = b1 * b2
658
804
 
659
- def auto_chemical_composition(dataset):
805
+ # Eq 4.4
806
+ c1 = -beta ** 4 * lambda0 * lambd * theta_e ** 2
807
+ c2 = np.cos(de) ** 2 * np.tanh(xya) / lplus
808
+ c3 = np.sin(de) ** 2 / np.tanh(xya) / lminus
809
+ c = c1 * (c2 + c3)
660
810
 
661
- found_edges = auto_id_edges(dataset)
662
- for key in found_edges:
663
- add_element_to_dataset(dataset, key)
664
- fit_dataset(dataset)
811
+ # Put all the pieces together...
812
+ p_coef = e / (bohr * np.pi ** 2 * m_0 * v ** 2)
665
813
 
814
+ p_v = thickness * mue2 / eps / phi2
666
815
 
667
- def make_cross_sections(edges, energy_scale, e_0, coll_angle, low_loss=None):
668
- """Updates the edges dictionary with collection angle-integrated X-ray photo-absorption cross-sections
816
+ p_s1 = 2. * theta2 * (eps - 1) ** 2 / phi20 ** 2 / phi2 ** 2 # ASSUMES eta=1
817
+ p_s2 = hbar / momentum
818
+ p_s3 = a + b + c
669
819
 
670
- """
671
- for key in edges:
672
- if str(key).isdigit():
673
- edges[key]['data'] = xsec_xrpa(energy_scale, e_0 / 1000., edges[key]['z'], coll_angle,
674
- edges[key]['chemical_shift']) / 1e10 # from barnes to 1/nm^2
675
- if low_loss is not None:
676
- low_loss = np.roll(np.array(low_loss), 1024 - np.argmax(np.array(low_loss)))
677
- edges[key]['data'] = scipy.signal.convolve(edges[key]['data'], low_loss/low_loss.sum(), mode='same')
820
+ p_s = p_s1 * p_s2 * p_s3
678
821
 
679
- edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
680
- edges[key]['X_section_type'] = 'XRPA'
681
- edges[key]['X_section_source'] = 'pyTEMlib'
822
+ # print(p_v.min(),p_v.max(),p_s.min(),p_s.max())
823
+ # Calculate P and p_vol (volume only)
824
+ dtheta = a_data[1] - a_data[0]
825
+ scale = np.sin(np.abs(theta)) * dtheta * 2 * np.pi
682
826
 
683
- return edges
827
+ p = p_coef * np.imag(p_v - p_s) # Eq 4.1
828
+ p_vol = p_coef * np.imag(p_v) * scale
684
829
 
830
+ # lplus_min = e_data[np.argmin(np.real(lplus), axis=1)]
831
+ # lminus_min = e_data[np.argmin(np.imag(lminus), axis=1)]
685
832
 
686
- def power_law(energy, a, r):
687
- """power law for power_law_background"""
688
- return a * np.power(energy, -r)
833
+ p_simple = p_coef * np.imag(1 / eps) * thickness / (theta2 + theta_e ** 2) * scale
834
+ # Watch it: eps is conjugated dielectric function
689
835
 
836
+ return p, p * scale * 1e2, p_vol * 1e2, p_simple * 1e2 # ,lplus_min,lminus_min
690
837
 
691
- def power_law_background(spectrum, energy_scale, fit_area, verbose=False):
692
- """fit of power law to spectrum """
693
838
 
694
- # Determine energy window for background fit in pixels
695
- startx = np.searchsorted(energy_scale, fit_area[0])
696
- endx = np.searchsorted(energy_scale, fit_area[1])
697
-
698
- x = np.array(energy_scale)[startx:endx]
839
+ #################################################################
840
+ # CORE - LOSS functions
841
+ #################################################################
699
842
 
700
- y = np.array(spectrum)[startx:endx].flatten()
843
+ def get_z(z: Union[int, str]) -> int:
844
+ """Returns the atomic number independent of input as a string or number
701
845
 
702
- # Initial values of parameters
703
- p0 = np.array([1.0E+20, 3])
846
+ Parameter
847
+ ---------
848
+ z: int, str
849
+ atomic number of chemical symbol (0 if not valid)
850
+ Return:
851
+ ------
852
+ z_out: int
853
+ atomic number
854
+ """
855
+ x_sections = get_x_sections()
704
856
 
705
- # background fitting
706
- def bgdfit(pp, yy, xx):
707
- err = yy - power_law(xx, pp[0], pp[1])
708
- return err
857
+ z_out = 0
858
+ if str(z).isdigit():
859
+ z_out = int(z)
860
+ elif isinstance(z, str):
861
+ for key in x_sections:
862
+ if x_sections[key]['name'].lower() == z.lower(): # Well one really should know how to write elemental
863
+ z_out = int(key)
864
+ else:
865
+ raise TypeError('A string or number is required')
866
+ return z_out
709
867
 
710
- [p, _] = leastsq(bgdfit, p0, args=(y, x), maxfev=2000)
711
868
 
712
- background_difference = y - power_law(x, p[0], p[1])
713
- background_noise_level = std_dev = np.std(background_difference)
714
- if verbose:
715
- print(f'Power-law background with amplitude A: {p[0]:.1f} and exponent -r: {p[1]:.2f}')
716
- print(background_difference.max() / background_noise_level)
869
+ def get_x_sections(z: int=0) -> dict:
870
+ """Reads X-ray fluorescent cross-sections from a dictionary.
717
871
 
718
- print(f'Noise level in spectrum {std_dev:.3f} counts')
872
+ Parameters
873
+ ----------
874
+ z: int
875
+ atomic number if zero all cross-sections will be returned
719
876
 
720
- # Calculate background over the whole energy scale
721
- background = power_law(energy_scale, p[0], p[1])
722
- return background, p
877
+ Returns
878
+ -------
879
+ dictionary
880
+ cross-section of an element or of all elements if z = 0
723
881
 
882
+ """
883
+ if z < 1:
884
+ return x_sections
885
+ else:
886
+ z = str(z)
887
+ if z in x_sections:
888
+ return x_sections[z]
889
+ else:
890
+ return 0
724
891
 
725
- def cl_model(x, p, number_of_edges, xsec):
726
- """ core loss model for fitting"""
727
- y = (p[9] * np.power(x, (-p[10]))) + p[7] * x + p[8] * x * x
728
- for i in range(number_of_edges):
729
- y = y + p[i] * xsec[i, :]
730
- return y
731
892
 
893
+ def list_all_edges(z: Union[str, int]=0, verbose=False)->[str, dict]:
894
+ """List all ionization edges of an element with atomic number z
732
895
 
733
- def fit_edges2(spectrum, energy_scale, edges):
734
- """fit edges for quantification"""
896
+ Parameters
897
+ ----------
898
+ z: int
899
+ atomic number
900
+ verbose: bool, optional
901
+ more info if set to True
735
902
 
736
- dispersion = energy_scale[1] - energy_scale[0]
737
- # Determine fitting ranges and masks to exclude ranges
738
- mask = np.ones(len(spectrum))
903
+ Returns
904
+ -------
905
+ out_string: str
906
+ string with all major edges in energy range
907
+ """
739
908
 
740
- background_fit_start = edges['fit_area']['fit_start']
741
- if edges['fit_area']['fit_end'] > energy_scale[-1]:
742
- edges['fit_area']['fit_end'] = energy_scale[-1]
743
- background_fit_end = edges['fit_area']['fit_end']
909
+ element = str(get_z(z))
910
+ x_sections = get_x_sections()
911
+ out_string = ''
912
+ if verbose:
913
+ print('Major edges')
914
+ edge_list = {x_sections[element]['name']: {}}
915
+
916
+ for key in all_edges:
917
+ if key in x_sections[element]:
918
+ if 'onset' in x_sections[element][key]:
919
+ if verbose:
920
+ print(f" {x_sections[element]['name']}-{key}: {x_sections[element][key]['onset']:8.1f} eV ")
921
+ out_string = out_string + f" {x_sections[element]['name']}-{key}: " \
922
+ f"{x_sections[element][key]['onset']:8.1f} eV /n"
923
+ edge_list[x_sections[element]['name']][key] = x_sections[element][key]['onset']
924
+ return out_string, edge_list
744
925
 
745
- startx = np.searchsorted(energy_scale, background_fit_start)
746
- endx = np.searchsorted(energy_scale, background_fit_end)
747
- mask[0:startx] = 0.0
748
- mask[endx:-1] = 0.0
749
- for key in edges:
750
- if key.isdigit():
751
- if edges[key]['start_exclude'] > background_fit_start + dispersion:
752
- if edges[key]['start_exclude'] < background_fit_end - dispersion * 2:
753
- if edges[key]['end_exclude'] > background_fit_end - dispersion:
754
- # we need at least one channel to fit.
755
- edges[key]['end_exclude'] = background_fit_end - dispersion
756
- startx = np.searchsorted(energy_scale, edges[key]['start_exclude'])
757
- if startx < 2:
758
- startx = 1
759
- endx = np.searchsorted(energy_scale, edges[key]['end_exclude'])
760
- mask[startx: endx] = 0.0
761
926
 
762
- ########################
763
- # Background Fit
764
- ########################
765
- bgd_fit_area = [background_fit_start, background_fit_end]
766
- background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
927
+ def find_all_edges(edge_onset: float, maximal_chemical_shift: float=5.0, major_edges_only: bool=False) -> str:
928
+ """Find all (major and minor) edges within an energy range
767
929
 
768
- #######################
769
- # Edge Fit
770
- #######################
771
- x = energy_scale
772
- blurred = gaussian_filter(spectrum, sigma=5)
930
+ Parameters
931
+ ----------
932
+ edge_onset: float
933
+ approximate energy of ionization edge
934
+ maximal_chemical_shift: float, default = 5eV
935
+ range of energy window around edge_onset to look for major edges
936
+ major_edges_only: boolean, default = False
937
+ only major edges are considered if True
938
+ Returns
939
+ -------
940
+ text: str
941
+ string with all edges in energy range
773
942
 
774
- y = blurred # now in probability
775
- y[np.where(y < 1e-8)] = 1e-8
943
+ """
776
944
 
777
- xsec = []
778
- number_of_edges = 0
779
- for key in edges:
780
- if key.isdigit():
781
- xsec.append(edges[key]['data'])
782
- number_of_edges += 1
783
- xsec = np.array(xsec)
945
+ text = ''
946
+ x_sections = get_x_sections()
947
+ for element in x_sections:
948
+ for key in x_sections[element]:
949
+ if isinstance(x_sections[element][key], dict):
950
+ if 'onset' in x_sections[element][key]:
951
+ if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
952
+ # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
953
+ new_text = f"\n {x_sections[element]['name']:2s}-{key}: " \
954
+ f"{x_sections[element][key]['onset']:8.1f} eV "
955
+ if major_edges_only:
956
+ if key in major_edges:
957
+ text += new_text
958
+ else:
959
+ text += new_text
784
960
 
785
- def model(xx, pp):
786
- yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
787
- for i in range(number_of_edges):
788
- pp[i] = np.abs(pp[i])
789
- yy = yy + pp[i] * xsec[i, :]
790
- return yy
961
+ return text
791
962
 
792
- def residuals(pp, xx, yy):
793
- err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
794
- return err
795
963
 
796
- scale = y[100]
797
- pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
798
- [p, _] = leastsq(residuals, pin, args=(x, y))
964
+ def find_associated_edges(dataset: sidpy.Dataset) -> None:
965
+ onsets = []
966
+ edges = []
967
+ if 'edges' in dataset.metadata:
968
+ for key, edge in dataset.metadata['edges'].items():
969
+ if key.isdigit():
970
+ element = edge['element']
971
+ pre_edge = 0. # edge['onset']-edge['start_exclude']
972
+ post_edge = edge['end_exclude'] - edge['onset']
973
+
974
+ for sym in edge['all_edges']: # TODO: Could be replaced with exclude
975
+ onsets.append(edge['all_edges'][sym]['onset'] + edge['chemical_shift']-pre_edge)
976
+ edges.append([key, f"{element}-{sym}", onsets[-1]])
977
+ for key, peak in dataset.metadata['peak_fit']['peaks'].items():
978
+ if key.isdigit():
979
+ distance = dataset.get_spectral_dims(return_axis=True)[0].values[-1]
980
+ index = -1
981
+ for ii, onset in enumerate(onsets):
982
+ if onset < peak['position'] < onset+post_edge:
983
+ if distance > np.abs(peak['position'] - onset):
984
+ distance = np.abs(peak['position'] - onset) # TODO: check whether absolute is good
985
+ distance_onset = peak['position'] - onset
986
+ index = ii
987
+ if index >= 0:
988
+ peak['associated_edge'] = edges[index][1] # check if more info is necessary
989
+ peak['distance_to_onset'] = distance_onset
990
+
991
+
992
+ def find_white_lines(dataset: sidpy.Dataset) -> None:
993
+ if 'edges' in dataset.metadata:
994
+ white_lines = {}
995
+ for index, peak in dataset.metadata['peak_fit']['peaks'].items():
996
+ if index.isdigit():
997
+ if 'associated_edge' in peak:
998
+ if peak['associated_edge'][-2:] in ['L3', 'L2', 'M5', 'M4']:
999
+ if peak['distance_to_onset'] < 10:
1000
+ area = np.sqrt(2 * np.pi) * peak['amplitude'] * np.abs(peak['width']/np.sqrt(2 * np.log(2)))
1001
+ if peak['associated_edge'] not in white_lines:
1002
+ white_lines[peak['associated_edge']] = 0.
1003
+ if area > 0:
1004
+ white_lines[peak['associated_edge']] += area # TODO: only positive ones?
1005
+ white_line_ratios = {}
1006
+ white_line_sum = {}
1007
+ for sym, area in white_lines.items():
1008
+ if sym[-2:] in ['L2', 'M4', 'M2']:
1009
+ if area > 0 and f"{sym[:-1]}{int(sym[-1]) + 1}" in white_lines:
1010
+ if white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"] > 0:
1011
+ white_line_ratios[f"{sym}/{sym[-2]}{int(sym[-1]) + 1}"] = area / white_lines[
1012
+ f"{sym[:-1]}{int(sym[-1]) + 1}"]
1013
+ white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] = (
1014
+ area + white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"])
1015
+
1016
+ areal_density = 1.
1017
+ if 'edges' in dataset.metadata:
1018
+ for key, edge in dataset.metadata['edges'].items():
1019
+ if key.isdigit():
1020
+ if edge['element'] == sym.split('-')[0]:
1021
+ areal_density = edge['areal_density']
1022
+ break
1023
+ white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] /= areal_density
1024
+
1025
+ dataset.metadata['peak_fit']['white_lines'] = white_lines
1026
+ dataset.metadata['peak_fit']['white_line_ratios'] = white_line_ratios
1027
+ dataset.metadata['peak_fit']['white_line_sums'] = white_line_sum
1028
+
799
1029
 
800
- for key in edges:
801
- if key.isdigit():
802
- edges[key]['areal_density'] = p[int(key)]
1030
+ def second_derivative(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
1031
+ """Calculates second derivative of a sidpy.dataset"""
803
1032
 
804
- edges['model'] = {}
805
- edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
806
- edges['model']['background-poly_0'] = p[6]
807
- edges['model']['background-poly_1'] = p[7]
808
- edges['model']['background-poly_2'] = p[8]
809
- edges['model']['background-A'] = A
810
- edges['model']['background-r'] = r
811
- edges['model']['spectrum'] = model(x, p)
812
- edges['model']['blurred'] = blurred
813
- edges['model']['mask'] = mask
814
- edges['model']['fit_parameter'] = p
815
- edges['model']['fit_area_start'] = edges['fit_area']['fit_start']
816
- edges['model']['fit_area_end'] = edges['fit_area']['fit_end']
1033
+ dim = dataset.get_spectral_dims()
1034
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1035
+ if dataset.data_type.name == 'SPECTRAL_IMAGE':
1036
+ spectrum = dataset.view.get_spectrum()
1037
+ else:
1038
+ spectrum = np.array(dataset)
817
1039
 
818
- return edges
1040
+ spec = scipy.ndimage.gaussian_filter(spectrum, 3)
819
1041
 
1042
+ dispersion = get_slope(energy_scale)
1043
+ second_dif = np.roll(spec, -3) - 2 * spec + np.roll(spec, +3)
1044
+ second_dif[:3] = 0
1045
+ second_dif[-3:] = 0
820
1046
 
821
- def fit_edges(spectrum, energy_scale, region_tags, edges):
822
- """fit edges for quantification"""
1047
+ # find if there is a strong edge at high energy_scale
1048
+ noise_level = 2. * np.std(second_dif[3:50])
1049
+ [indices, _] = scipy.signal.find_peaks(second_dif, noise_level)
1050
+ width = 50 / dispersion
1051
+ if width < 50:
1052
+ width = 50
1053
+ start_end_noise = int(len(energy_scale) - width)
1054
+ for index in indices[::-1]:
1055
+ if index > start_end_noise:
1056
+ start_end_noise = index - 70
823
1057
 
824
- # Determine fitting ranges and masks to exclude ranges
825
- mask = np.ones(len(spectrum))
1058
+ noise_level_start = sensitivity * np.std(second_dif[3:50])
1059
+ noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
1060
+ slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
1061
+ noise_level = noise_level_start + np.arange(len(energy_scale)) * slope
1062
+ return second_dif, noise_level
826
1063
 
827
- background_fit_end = energy_scale[-1]
828
- for key in region_tags:
829
- end = region_tags[key]['start_x'] + region_tags[key]['width_x']
830
1064
 
831
- startx = np.searchsorted(energy_scale, region_tags[key]['start_x'])
832
- endx = np.searchsorted(energy_scale, end)
1065
+ def find_edges(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
1066
+ """find edges within a sidpy.Dataset"""
833
1067
 
834
- if key == 'fit_area':
835
- mask[0:startx] = 0.0
836
- mask[endx:-1] = 0.0
837
- else:
838
- mask[startx:endx] = 0.0
839
- if region_tags[key]['start_x'] < background_fit_end: # Which is the onset of the first edge?
840
- background_fit_end = region_tags[key]['start_x']
1068
+ dim = dataset.get_spectral_dims()
1069
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
841
1070
 
842
- ########################
843
- # Background Fit
844
- ########################
845
- bgd_fit_area = [region_tags['fit_area']['start_x'], background_fit_end]
846
- background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
1071
+ second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
847
1072
 
848
- #######################
849
- # Edge Fit
850
- #######################
851
- x = energy_scale
852
- blurred = gaussian_filter(spectrum, sigma=5)
1073
+ [indices, peaks] = scipy.signal.find_peaks(second_dif, noise_level)
853
1074
 
854
- y = blurred # now in probability
855
- y[np.where(y < 1e-8)] = 1e-8
1075
+ peaks['peak_positions'] = energy_scale[indices]
1076
+ peaks['peak_indices'] = indices
1077
+ edge_energies = [energy_scale[50]]
1078
+ edge_indices = []
856
1079
 
857
- xsec = []
858
- number_of_edges = 0
859
- for key in edges:
860
- if key.isdigit():
861
- xsec.append(edges[key]['data'])
862
- number_of_edges += 1
863
- xsec = np.array(xsec)
1080
+ [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
1081
+ minima = energy_scale[indices]
864
1082
 
865
- def model(xx, pp):
866
- yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
867
- for i in range(number_of_edges):
868
- pp[i] = np.abs(pp[i])
869
- yy = yy + pp[i] * xsec[i, :]
870
- return yy
1083
+ for peak_number in range(len(peaks['peak_positions'])):
1084
+ position = peaks['peak_positions'][peak_number]
1085
+ if position - edge_energies[-1] > 20:
1086
+ impossible = minima[minima < position]
1087
+ impossible = impossible[impossible > position - 5]
1088
+ if len(impossible) == 0:
1089
+ possible = minima[minima > position]
1090
+ possible = possible[possible < position + 5]
1091
+ if len(possible) > 0:
1092
+ edge_energies.append((position + possible[0])/2)
1093
+ edge_indices.append(np.searchsorted(energy_scale, (position + possible[0])/2))
871
1094
 
872
- def residuals(pp, xx, yy):
873
- err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
874
- return err
1095
+ selected_edges = []
1096
+ for peak in edge_indices:
1097
+ if 525 < energy_scale[peak] < 533:
1098
+ selected_edges.append('O-K1')
1099
+ else:
1100
+ selected_edge = ''
1101
+ edges = find_all_edges(energy_scale[peak], 20, major_edges_only=True)
1102
+ edges = edges.split('\n')
1103
+ minimum_dist = 100.
1104
+ for edge in edges[1:]:
1105
+ edge = edge[:-3].split(':')
1106
+ name = edge[0].strip()
1107
+ energy = float(edge[1].strip())
1108
+ if np.abs(energy - energy_scale[peak]) < minimum_dist:
1109
+ minimum_dist = np.abs(energy - energy_scale[peak])
1110
+ selected_edge = name
875
1111
 
876
- scale = y[100]
877
- pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
878
- [p, _] = leastsq(residuals, pin, args=(x, y))
1112
+ if selected_edge != '':
1113
+ selected_edges.append(selected_edge)
879
1114
 
880
- for key in edges:
881
- if key.isdigit():
882
- edges[key]['areal_density'] = p[int(key) - 1]
1115
+ return selected_edges
883
1116
 
884
- edges['model'] = {}
885
- edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
886
- edges['model']['background-poly_0'] = p[6]
887
- edges['model']['background-poly_1'] = p[7]
888
- edges['model']['background-poly_2'] = p[8]
889
- edges['model']['background-A'] = A
890
- edges['model']['background-r'] = r
891
- edges['model']['spectrum'] = model(x, p)
892
- edges['model']['blurred'] = blurred
893
- edges['model']['mask'] = mask
894
- edges['model']['fit_parameter'] = p
895
- edges['model']['fit_area_start'] = region_tags['fit_area']['start_x']
896
- edges['model']['fit_area_end'] = region_tags['fit_area']['start_x'] + region_tags['fit_area']['width_x']
897
1117
 
898
- return edges
1118
+ def assign_likely_edges(edge_channels: Union[list, np.ndarray], energy_scale: np.ndarray):
1119
+ edges_in_list = []
1120
+ result = {}
1121
+ for channel in edge_channels:
1122
+ if channel not in edge_channels[edges_in_list]:
1123
+ shift = 5
1124
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1125
+ while len(element_list) < 1:
1126
+ shift += 1
1127
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
899
1128
 
1129
+ if len(element_list) > 1:
1130
+ while len(element_list) > 0:
1131
+ shift-=1
1132
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1133
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift+1, major_edges_only=True)
1134
+ element = (element_list[:4]).strip()
1135
+ z = get_z(element)
1136
+ result[element] =[]
1137
+ _, edge_list = list_all_edges(z)
900
1138
 
901
- def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
902
- """find peaks in spectrum"""
1139
+ for peak in edge_list:
1140
+ for edge in edge_list[peak]:
1141
+ possible_minor_edge = np.argmin(np.abs(energy_scale[edge_channels]-edge_list[peak][edge]))
1142
+ if np.abs(energy_scale[edge_channels[possible_minor_edge]]-edge_list[peak][edge]) < 3:
1143
+ #print('nex', next_e)
1144
+ edges_in_list.append(possible_minor_edge)
1145
+
1146
+ result[element].append(edge)
1147
+
1148
+ return result
903
1149
 
904
- if dataset.data_type.name == 'SPECTRAL_IMAGE':
905
- spectrum = dataset.view.get_spectrum()
906
- else:
907
- spectrum = np.array(dataset)
908
1150
 
909
- spec_dim = ft.get_dimensions_by_type('SPECTRAL', dataset)[0]
910
- energy_scale = np.array(spec_dim[1])
1151
+ def auto_id_edges(dataset):
1152
+ edge_channels = identify_edges(dataset)
1153
+ dim = dataset.get_spectral_dims()
1154
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1155
+ found_edges = assign_likely_edges(edge_channels, energy_scale)
1156
+ return found_edges
911
1157
 
912
- second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
913
- [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
914
1158
 
915
- start_channel = np.searchsorted(energy_scale, fit_start)
916
- end_channel = np.searchsorted(energy_scale, fit_end)
917
- peaks = []
918
- for index in indices:
919
- if start_channel < index < end_channel:
920
- peaks.append(index - start_channel)
1159
+ def identify_edges(dataset: sidpy.Dataset, noise_level: float=2.0):
1160
+ """
1161
+ Using first derivative to determine edge onsets
1162
+ Any peak in first derivative higher than noise_level times standard deviation will be considered
1163
+
1164
+ Parameters
1165
+ ----------
1166
+ dataset: sidpy.Dataset
1167
+ the spectrum
1168
+ noise_level: float
1169
+ ths number times standard deviation in first derivative decides on whether an edge onset is significant
1170
+
1171
+ Return
1172
+ ------
1173
+ edge_channel: numpy.ndarray
1174
+
1175
+ """
1176
+ dim = dataset.get_spectral_dims()
1177
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1178
+ dispersion = get_slope(energy_scale)
1179
+ spec = scipy.ndimage.gaussian_filter(dataset, 3/dispersion) # smooth with 3eV wideGaussian
921
1180
 
922
- if 'model' in dataset.metadata:
923
- model = dataset.metadata['model'][start_channel:end_channel]
1181
+ first_derivative = spec - np.roll(spec, +2)
1182
+ first_derivative[:3] = 0
1183
+ first_derivative[-3:] = 0
924
1184
 
925
- elif energy_scale[0] > 0:
926
- if 'edges' not in dataset.metadata:
927
- return
928
- if 'model' not in dataset.metadata['edges']:
929
- return
930
- model = dataset.metadata['edges']['model']['spectrum'][start_channel:end_channel]
1185
+ # find if there is a strong edge at high energy_scale
1186
+ noise_level = noise_level*np.std(first_derivative[3:50])
1187
+ [edge_channels, _] = scipy.signal.find_peaks(first_derivative, noise_level)
1188
+
1189
+ return edge_channels
931
1190
 
932
- else:
933
- model = np.zeros(end_channel - start_channel)
934
1191
 
935
- energy_scale = energy_scale[start_channel:end_channel]
1192
+ def add_element_to_dataset(dataset: sidpy.Dataset, z: Union[int, str]):
1193
+ """
1194
+ """
1195
+ # We check whether this element is already in the
1196
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
936
1197
 
937
- difference = np.array(spectrum)[start_channel:end_channel] - model
938
- fit = np.zeros(len(energy_scale))
939
- p_out = []
940
- if len(peaks) > 0:
941
- p_in = np.ravel([[energy_scale[i], difference[i], .7] for i in peaks])
942
- [p_out, _] = scipy.optimize.leastsq(residuals_smooth, p_in, ftol=1e-3, args=(energy_scale,
943
- difference,
944
- False))
945
- fit = fit + model_smooth(energy_scale, p_out, False)
1198
+ zz = get_z(z)
1199
+ if 'edges' not in dataset.metadata:
1200
+ dataset.metadata['edges'] = {'model': {}, 'use_low_loss': False}
1201
+ index = 0
1202
+ for key, edge in dataset.metadata['edges'].items():
1203
+ if key.isdigit():
1204
+ index += 1
1205
+ if 'z' in edge:
1206
+ if zz == edge['z']:
1207
+ index = int(key)
1208
+ break
946
1209
 
947
- peak_model = np.zeros(len(spec_dim[1]))
948
- peak_model[start_channel:end_channel] = fit
1210
+ major_edge = ''
1211
+ minor_edge = ''
1212
+ all_edges = {}
1213
+ x_section = get_x_sections(zz)
1214
+ edge_start = 10 # int(15./ft.get_slope(self.energy_scale)+0.5)
1215
+ for key in x_section:
1216
+ if len(key) == 2 and key[0] in ['K', 'L', 'M', 'N', 'O'] and key[1].isdigit():
1217
+ if energy_scale[edge_start] < x_section[key]['onset'] < energy_scale[-edge_start]:
1218
+ if key in ['K1', 'L3', 'M5', 'M3']:
1219
+ major_edge = key
1220
+
1221
+ all_edges[key] = {'onset': x_section[key]['onset']}
949
1222
 
950
- return peak_model, p_out
1223
+ if major_edge != '':
1224
+ key = major_edge
1225
+ elif minor_edge != '':
1226
+ key = minor_edge
1227
+ else:
1228
+ print(f'Could not find no edge of {zz} in spectrum')
1229
+ return False
1230
+
1231
+
1232
+ if str(index) not in dataset.metadata['edges']:
1233
+ dataset.metadata['edges'][str(index)] = {}
951
1234
 
1235
+ start_exclude = x_section[key]['onset'] - x_section[key]['excl before']
1236
+ end_exclude = x_section[key]['onset'] + x_section[key]['excl after']
952
1237
 
953
- def find_maxima(y, number_of_peaks):
954
- """ find the first most prominent peaks
1238
+ dataset.metadata['edges'][str(index)] = {'z': zz, 'symmetry': key, 'element': elements[zz],
1239
+ 'onset': x_section[key]['onset'], 'end_exclude': end_exclude,
1240
+ 'start_exclude': start_exclude}
1241
+ dataset.metadata['edges'][str(index)]['all_edges'] = all_edges
1242
+ dataset.metadata['edges'][str(index)]['chemical_shift'] = 0.0
1243
+ dataset.metadata['edges'][str(index)]['areal_density'] = 0.0
1244
+ dataset.metadata['edges'][str(index)]['original_onset'] = dataset.metadata['edges'][str(index)]['onset']
1245
+ return True
955
1246
 
956
- peaks are then sorted by energy
1247
+
1248
+ def make_edges(edges_present: dict, energy_scale: np.ndarray, e_0:float, coll_angle:float, low_loss:np.ndarray=None)->dict:
1249
+ """Makes the edges dictionary for quantification
957
1250
 
958
1251
  Parameters
959
1252
  ----------
960
- y: numpy array
961
- (part) of spectrum
962
- number_of_peaks: int
1253
+ edges_present: list
1254
+ list of edges
1255
+ energy_scale: numpy array
1256
+ energy scale on which to make cross-section
1257
+ e_0: float
1258
+ acceleration voltage (in V)
1259
+ coll_angle: float
1260
+ collection angle in mrad
1261
+ low_loss: numpy array with same length as energy_scale
1262
+ low_less spectrum with which to convolve the cross-section (default=None)
963
1263
 
964
1264
  Returns
965
1265
  -------
966
- numpy array
967
- indices of peaks
1266
+ edges: dict
1267
+ dictionary with all information on cross-section
968
1268
  """
969
- blurred2 = gaussian_filter(y, sigma=2)
970
- peaks, _ = scipy.signal.find_peaks(blurred2)
971
- prominences = peak_prominences(blurred2, peaks)[0]
972
- prominences_sorted = np.argsort(prominences)
973
- peaks = peaks[prominences_sorted[-number_of_peaks:]]
1269
+ x_sections = get_x_sections()
1270
+ edges = {}
1271
+ for i, edge in enumerate(edges_present):
1272
+ element, symmetry = edge.split('-')
1273
+ z = 0
1274
+ for key in x_sections:
1275
+ if element == x_sections[key]['name']:
1276
+ z = int(key)
1277
+ edges[i] = {}
1278
+ edges[i]['z'] = z
1279
+ edges[i]['symmetry'] = symmetry
1280
+ edges[i]['element'] = element
974
1281
 
975
- peak_indices = np.argsort(peaks)
976
- return peaks[peak_indices]
1282
+ for key in edges:
1283
+ xsec = x_sections[str(edges[key]['z'])]
1284
+ if 'chemical_shift' not in edges[key]:
1285
+ edges[key]['chemical_shift'] = 0
1286
+ if 'symmetry' not in edges[key]:
1287
+ edges[key]['symmetry'] = 'K1'
1288
+ if 'K' in edges[key]['symmetry']:
1289
+ edges[key]['symmetry'] = 'K1'
1290
+ elif 'L' in edges[key]['symmetry']:
1291
+ edges[key]['symmetry'] = 'L3'
1292
+ elif 'M' in edges[key]['symmetry']:
1293
+ edges[key]['symmetry'] = 'M5'
1294
+ else:
1295
+ edges[key]['symmetry'] = edges[key]['symmetry'][0:2]
977
1296
 
1297
+ edges[key]['original_onset'] = xsec[edges[key]['symmetry']]['onset']
1298
+ edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
1299
+ edges[key]['start_exclude'] = edges[key]['onset'] - xsec[edges[key]['symmetry']]['excl before']
1300
+ edges[key]['end_exclude'] = edges[key]['onset'] + xsec[edges[key]['symmetry']]['excl after']
978
1301
 
979
- def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
980
- """Gaussian Function
1302
+ edges = make_cross_sections(edges, energy_scale, e_0, coll_angle, low_loss)
981
1303
 
982
- p[0]==mean, p[1]= amplitude p[2]==fwhm
983
- area = np.sqrt(2* np.pi)* p[1] * np.abs(p[2] / 2.3548)
984
- FWHM = 2 * np.sqrt(2 np.log(2)) * sigma = 2.3548 * sigma
985
- sigma = FWHM/3548
986
- """
987
- if p[2] == 0:
988
- return x * 0.
989
- else:
990
- return p[1] * np.exp(-(x - p[0]) ** 2 / (2.0 * (p[2] / 2.3548) ** 2))
991
-
992
-
993
- def lorentz(x, p):
994
- """lorentzian function"""
995
- lorentz_peak = 0.5 * p[2] / np.pi / ((x - p[0]) ** 2 + (p[2] / 2) ** 2)
996
- return p[1] * lorentz_peak / lorentz_peak.max()
997
-
998
-
999
- def zl(x, p, p_zl):
1000
- """zero-loss function"""
1001
- p_zl_local = p_zl.copy()
1002
- p_zl_local[2] += p[0]
1003
- p_zl_local[5] += p[0]
1004
- zero_loss = zl_func(p_zl_local, x)
1005
- return p[1] * zero_loss / zero_loss.max()
1006
-
1007
-
1008
- def model3(x, p, number_of_peaks, peak_shape, p_zl, pin=None, restrict_pos=0, restrict_width=0):
1009
- """ model for fitting low-loss spectrum"""
1010
- if pin is None:
1011
- pin = p
1012
-
1013
- # if len([restrict_pos]) == 1:
1014
- # restrict_pos = [restrict_pos]*number_of_peaks
1015
- # if len([restrict_width]) == 1:
1016
- # restrict_width = [restrict_width]*number_of_peaks
1017
- y = np.zeros(len(x))
1304
+ return edges
1018
1305
 
1019
- for i in range(number_of_peaks):
1020
- index = int(i * 3)
1021
- if restrict_pos > 0:
1022
- if p[index] > pin[index] * (1.0 + restrict_pos):
1023
- p[index] = pin[index] * (1.0 + restrict_pos)
1024
- if p[index] < pin[index] * (1.0 - restrict_pos):
1025
- p[index] = pin[index] * (1.0 - restrict_pos)
1306
+ def fit_dataset(dataset: sidpy.Dataset):
1307
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1308
+ if 'fit_area' not in dataset.metadata['edges']:
1309
+ dataset.metadata['edges']['fit_area'] = {}
1310
+ if 'fit_start' not in dataset.metadata['edges']['fit_area']:
1311
+ dataset.metadata['edges']['fit_area']['fit_start'] = energy_scale[50]
1312
+ if 'fit_end' not in dataset.metadata['edges']['fit_area']:
1313
+ dataset.metadata['edges']['fit_area']['fit_end'] = energy_scale[-2]
1314
+ dataset.metadata['edges']['use_low_loss'] = False
1315
+
1316
+ if 'experiment' in dataset.metadata:
1317
+ exp = dataset.metadata['experiment']
1318
+ if 'convergence_angle' not in exp:
1319
+ raise ValueError('need a convergence_angle in experiment of metadata dictionary ')
1320
+ alpha = exp['convergence_angle']
1321
+ beta = exp['collection_angle']
1322
+ beam_kv = exp['acceleration_voltage']
1323
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1324
+ eff_beta = effective_collection_angle(energy_scale, alpha, beta, beam_kv)
1325
+ edges = make_cross_sections(dataset.metadata['edges'], np.array(energy_scale), beam_kv, eff_beta)
1326
+ dataset.metadata['edges'] = fit_edges2(dataset, energy_scale, edges)
1327
+ areal_density = []
1328
+ elements = []
1329
+ for key in edges:
1330
+ if key.isdigit(): # only edges have numbers in that dictionary
1331
+ elements.append(edges[key]['element'])
1332
+ areal_density.append(edges[key]['areal_density'])
1333
+ areal_density = np.array(areal_density)
1334
+ out_string = '\nRelative composition: \n'
1335
+ for i, element in enumerate(elements):
1336
+ out_string += f'{element}: {areal_density[i] / areal_density.sum() * 100:.1f}% '
1026
1337
 
1027
- p[index + 1] = abs(p[index + 1])
1028
- # print(p[index + 1])
1029
- p[index + 2] = abs(p[index + 2])
1030
- if restrict_width > 0:
1031
- if p[index + 2] > pin[index + 2] * (1.0 + restrict_width):
1032
- p[index + 2] = pin[index + 2] * (1.0 + restrict_width)
1338
+ print(out_string)
1033
1339
 
1034
- if peak_shape[i] == 'Lorentzian':
1035
- y = y + lorentz(x, p[index:])
1036
- elif peak_shape[i] == 'zl':
1037
1340
 
1038
- y = y + zl(x, p[index:], p_zl)
1039
- else:
1040
- y = y + gauss(x, p[index:])
1041
- return y
1341
+ def auto_chemical_composition(dataset:sidpy.Dataset)->None:
1042
1342
 
1343
+ found_edges = auto_id_edges(dataset)
1344
+ for key in found_edges:
1345
+ add_element_to_dataset(dataset, key)
1346
+ fit_dataset(dataset)
1043
1347
 
1044
- def sort_peaks(p, peak_shape):
1045
- """sort fitting parameters by peak position"""
1046
- number_of_peaks = int(len(p) / 3)
1047
- p3 = np.reshape(p, (number_of_peaks, 3))
1048
- sort_pin = np.argsort(p3[:, 0])
1049
1348
 
1050
- p = p3[sort_pin].flatten()
1051
- peak_shape = np.array(peak_shape)[sort_pin].tolist()
1349
+ def make_cross_sections(edges:dict, energy_scale:np.ndarray, e_0:float, coll_angle:float, low_loss:np.ndarray=None)->dict:
1350
+ """Updates the edges dictionary with collection angle-integrated X-ray photo-absorption cross-sections
1052
1351
 
1053
- return p, peak_shape
1352
+ """
1353
+ for key in edges:
1354
+ if str(key).isdigit():
1355
+ edges[key]['data'] = xsec_xrpa(energy_scale, e_0 / 1000., edges[key]['z'], coll_angle,
1356
+ edges[key]['chemical_shift']) / 1e10 # from barnes to 1/nm^2
1357
+ if low_loss is not None:
1358
+ low_loss = np.roll(np.array(low_loss), 1024 - np.argmax(np.array(low_loss)))
1359
+ edges[key]['data'] = scipy.signal.convolve(edges[key]['data'], low_loss/low_loss.sum(), mode='same')
1054
1360
 
1361
+ edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
1362
+ edges[key]['X_section_type'] = 'XRPA'
1363
+ edges[key]['X_section_source'] = 'pyTEMlib'
1055
1364
 
1056
- def add_peaks(x, y, peaks, pin_in=None, peak_shape_in=None, shape='Gaussian'):
1057
- """ add peaks to fitting parameters"""
1058
- if pin_in is None:
1059
- return
1060
- if peak_shape_in is None:
1061
- return
1365
+ return edges
1062
1366
 
1063
- pin = pin_in.copy()
1064
1367
 
1065
- peak_shape = peak_shape_in.copy()
1066
- if isinstance(shape, str): # if peak_shape is only a string make a list of it.
1067
- shape = [shape]
1368
+ def power_law(energy: np.ndarray, a:float, r:float)->np.ndarray:
1369
+ """power law for power_law_background"""
1370
+ return a * np.power(energy, -r)
1068
1371
 
1069
- if len(shape) == 1:
1070
- shape = shape * len(peaks)
1071
- for i, peak in enumerate(peaks):
1072
- pin.append(x[peak])
1073
- pin.append(y[peak])
1074
- pin.append(.3)
1075
- peak_shape.append(shape[i])
1076
1372
 
1077
- return pin, peak_shape
1373
+ def power_law_background(spectrum:np.ndarray, energy_scale:np.ndarray, fit_area:list, verbose:bool=False):
1374
+ """fit of power law to spectrum """
1078
1375
 
1376
+ # Determine energy window for background fit in pixels
1377
+ startx = np.searchsorted(energy_scale, fit_area[0])
1378
+ endx = np.searchsorted(energy_scale, fit_area[1])
1079
1379
 
1080
- def fit_model(x, y, pin, number_of_peaks, peak_shape, p_zl, restrict_pos=0, restrict_width=0):
1081
- """model for fitting low-loss spectrum"""
1380
+ x = np.array(energy_scale)[startx:endx]
1381
+ y = np.array(spectrum)[startx:endx].flatten()
1082
1382
 
1083
- pin_original = pin.copy()
1383
+ # Initial values of parameters
1384
+ p0 = np.array([1.0E+20, 3])
1084
1385
 
1085
- def residuals3(pp, xx, yy):
1086
- err = (yy - model3(xx, pp, number_of_peaks, peak_shape, p_zl, pin_original, restrict_pos,
1087
- restrict_width)) / np.sqrt(np.abs(yy))
1386
+ # background fitting
1387
+ def bgdfit(pp, yy, xx):
1388
+ err = yy - power_law(xx, pp[0], pp[1])
1088
1389
  return err
1089
1390
 
1090
- [p, _] = leastsq(residuals3, pin, args=(x, y))
1091
- # p2 = p.tolist()
1092
- # p3 = np.reshape(p2, (number_of_peaks, 3))
1093
- # sort_pin = np.argsort(p3[:, 0])
1094
-
1095
- # p = p3[sort_pin].flatten()
1096
- # peak_shape = np.array(peak_shape)[sort_pin].tolist()
1097
-
1098
- return p, peak_shape
1391
+ [p, _] = leastsq(bgdfit, p0, args=(y, x), maxfev=2000)
1099
1392
 
1393
+ background_difference = y - power_law(x, p[0], p[1])
1394
+ background_noise_level = std_dev = np.std(background_difference)
1395
+ if verbose:
1396
+ print(f'Power-law background with amplitude A: {p[0]:.1f} and exponent -r: {p[1]:.2f}')
1397
+ print(background_difference.max() / background_noise_level)
1100
1398
 
1101
- def fix_energy_scale(spec, energy=None):
1102
- """Shift energy scale according to zero-loss peak position
1103
-
1104
- This function assumes that the fzero loss peak is the maximum of the spectrum.
1105
- """
1399
+ print(f'Noise level in spectrum {std_dev:.3f} counts')
1106
1400
 
1107
- # determine start and end fitting region in pixels
1108
- if isinstance(spec, sidpy.Dataset):
1109
- if energy is None:
1110
- energy = spec.energy_loss.values
1111
- spec = np.array(spec)
1112
-
1113
- else:
1114
- if energy is None:
1115
- return
1116
- if not isinstance(spec, np.ndarray):
1117
- return
1118
-
1119
- start = np.searchsorted(np.array(energy), -10)
1120
- end = np.searchsorted(np.array(energy), 10)
1121
- startx = np.argmax(spec[start:end]) + start
1401
+ # Calculate background over the whole energy scale
1402
+ background = power_law(energy_scale, p[0], p[1])
1403
+ return background, p
1122
1404
 
1123
- end = startx + 3
1124
- start = startx - 3
1125
- for i in range(10):
1126
- if spec[startx - i] < 0.3 * spec[startx]:
1127
- start = startx - i
1128
- if spec[startx + i] < 0.3 * spec[startx]:
1129
- end = startx + i
1130
- if end - start < 3:
1131
- end = startx + 2
1132
- start = startx - 2
1133
1405
 
1134
- x = np.array(energy[int(start):int(end)])
1135
- y = np.array(spec[int(start):int(end)]).copy()
1406
+ def cl_model(x, p, number_of_edges, xsec):
1407
+ """ core loss model for fitting"""
1408
+ y = (p[9] * np.power(x, (-p[10]))) + p[7] * x + p[8] * x * x
1409
+ for i in range(number_of_edges):
1410
+ y = y + p[i] * xsec[i, :]
1411
+ return y
1136
1412
 
1137
- y[np.nonzero(y <= 0)] = 1e-12
1138
1413
 
1139
- p0 = [energy[startx], 1000.0, (energy[end] - energy[start]) / 3.] # Initial guess is a normal distribution
1414
+ def fit_edges2(spectrum, energy_scale, edges):
1415
+ """fit edges for quantification"""
1140
1416
 
1141
- def errfunc(pp, xx, yy):
1142
- return (gauss(xx, pp) - yy) / np.sqrt(yy) # Distance to the target function
1417
+ dispersion = energy_scale[1] - energy_scale[0]
1418
+ # Determine fitting ranges and masks to exclude ranges
1419
+ mask = np.ones(len(spectrum))
1143
1420
 
1144
- [p1, _] = leastsq(errfunc, np.array(p0[:]), args=(x, y))
1145
- fit_mu, area, fwhm = p1
1421
+ background_fit_start = edges['fit_area']['fit_start']
1422
+ if edges['fit_area']['fit_end'] > energy_scale[-1]:
1423
+ edges['fit_area']['fit_end'] = energy_scale[-1]
1424
+ background_fit_end = edges['fit_area']['fit_end']
1146
1425
 
1147
- return fwhm, fit_mu
1426
+ startx = np.searchsorted(energy_scale, background_fit_start)
1427
+ endx = np.searchsorted(energy_scale, background_fit_end)
1428
+ mask[0:startx] = 0.0
1429
+ mask[endx:-1] = 0.0
1430
+ for key in edges:
1431
+ if key.isdigit():
1432
+ if edges[key]['start_exclude'] > background_fit_start + dispersion:
1433
+ if edges[key]['start_exclude'] < background_fit_end - dispersion * 2:
1434
+ if edges[key]['end_exclude'] > background_fit_end - dispersion:
1435
+ # we need at least one channel to fit.
1436
+ edges[key]['end_exclude'] = background_fit_end - dispersion
1437
+ startx = np.searchsorted(energy_scale, edges[key]['start_exclude'])
1438
+ if startx < 2:
1439
+ startx = 1
1440
+ endx = np.searchsorted(energy_scale, edges[key]['end_exclude'])
1441
+ mask[startx: endx] = 0.0
1148
1442
 
1149
- def resolution_function2(dataset, width =0.3):
1150
- guess = [0.2, 1000, 0.02, 0.2, 1000, 0.2]
1151
- p0 = np.array(guess)
1152
-
1153
- start = np.searchsorted(dataset.energy_loss, -width / 2.)
1154
- end = np.searchsorted(dataset.energy_loss, width / 2.)
1155
- x = dataset.energy_loss[start:end]
1156
- y = np.array(dataset)[start:end]
1157
- def zl2(pp, yy, xx):
1158
- eerr = (yy - zl_func(pp, xx)) # /np.sqrt(y)
1159
- return eerr
1160
-
1161
- [p_zl, _] = leastsq(zl2, p0, args=(y, x), maxfev=2000)
1443
+ ########################
1444
+ # Background Fit
1445
+ ########################
1446
+ bgd_fit_area = [background_fit_start, background_fit_end]
1447
+ background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
1162
1448
 
1163
- z_loss = zl_func(p_zl, dataset.energy_loss)
1164
- z_loss = dataset.like_data(z_loss)
1165
- z_loss.title = 'resolution_function'
1166
- z_loss.metadata['zero_loss_parameter']=p_zl
1167
-
1168
- dataset.metadata['low_loss']['zero_loss'] = {'zero_loss_parameter': p_zl,
1169
- 'zero_loss_fit': 'Product2Lorentzians'}
1170
- zero_loss = dataset.like_array(z_loss)
1171
- return zero_loss, p_zl
1449
+ #######################
1450
+ # Edge Fit
1451
+ #######################
1452
+ x = energy_scale
1453
+ blurred = gaussian_filter(spectrum, sigma=5)
1172
1454
 
1455
+ y = blurred # now in probability
1456
+ y[np.where(y < 1e-8)] = 1e-8
1173
1457
 
1458
+ xsec = []
1459
+ number_of_edges = 0
1460
+ for key in edges:
1461
+ if key.isdigit():
1462
+ xsec.append(edges[key]['data'])
1463
+ number_of_edges += 1
1464
+ xsec = np.array(xsec)
1174
1465
 
1175
- def resolution_function(energy_scale, spectrum, width, verbose=False):
1176
- """get resolution function (zero-loss peak shape) from low-loss spectrum"""
1177
1466
 
1178
- guess = [0.2, 1000, 0.02, 0.2, 1000, 0.2]
1179
- p0 = np.array(guess)
1467
+ def model(xx, pp):
1468
+ yy = pp[0] + x**pp[1] + pp[2] + pp[3] * xx + pp[4] * xx * xx
1469
+ for i in range(number_of_edges):
1470
+ pp[i+5] = np.abs(pp[i+5])
1471
+ yy = yy + pp[i+5] * xsec[i, :]
1472
+ return yy
1180
1473
 
1181
- start = np.searchsorted(energy_scale, -width / 2.)
1182
- end = np.searchsorted(energy_scale, width / 2.)
1183
- x = energy_scale[start:end]
1184
- y = spectrum[start:end]
1474
+ def residuals(pp, xx, yy):
1475
+ err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
1476
+ return err
1185
1477
 
1186
- def zl2(pp, yy, xx):
1187
- eerr = (yy - zl_func(pp, xx)) # /np.sqrt(y)
1188
- return eerr
1478
+ scale = y[100]
1479
+ pin = np.array([A,r, 10., 1., 0.00] + [scale/5] * number_of_edges)
1480
+ [p, _] = leastsq(residuals, pin, args=(x, y))
1189
1481
 
1190
- def zl_restrict(pp, yy, xx):
1482
+ for key in edges:
1483
+ if key.isdigit():
1484
+ edges[key]['areal_density'] = p[int(key)+5]
1191
1485
 
1192
- if pp[2] > xx[-1] * .8:
1193
- pp[2] = xx[-1] * .8
1194
- if pp[2] < xx[0] * .8:
1195
- pp[2] = xx[0] * .8
1486
+ edges['model'] = {}
1487
+ edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
1488
+ edges['model']['background-poly_0'] = p[6]
1489
+ edges['model']['background-poly_1'] = p[7]
1490
+ edges['model']['background-poly_2'] = p[8]
1491
+ edges['model']['background-A'] = A
1492
+ edges['model']['background-r'] = r
1493
+ edges['model']['spectrum'] = model(x, p)
1494
+ edges['model']['blurred'] = blurred
1495
+ edges['model']['mask'] = mask
1496
+ edges['model']['fit_parameter'] = p
1497
+ edges['model']['fit_area_start'] = edges['fit_area']['fit_start']
1498
+ edges['model']['fit_area_end'] = edges['fit_area']['fit_end']
1196
1499
 
1197
- if pp[5] > xx[-1] * .8:
1198
- pp[5] = xx[-1] * .8
1199
- if pp[5] < x[0] * .8:
1200
- pp[5] = xx[0] * .8
1500
+ return edges
1201
1501
 
1202
- if len(pp) > 6:
1203
- pp[7] = abs(pp[7])
1204
- if abs(pp[7]) > (pp[1] + pp[4]) / 10:
1205
- pp[7] = abs(pp[1] + pp[4]) / 10
1206
- if abs(pp[8]) > 1:
1207
- pp[8] = pp[8] / abs(pp[8])
1208
- pp[6] = abs(pp[6])
1209
- pp[9] = abs(pp[9])
1210
1502
 
1211
- pp[0] = abs(pp[0])
1212
- pp[3] = abs(pp[3])
1213
- if pp[0] > (xx[-1] - xx[0]) / 2.0:
1214
- pp[0] = xx[-1] - xx[0] / 2.0
1215
- if pp[3] > (xx[-1] - xx[0]) / 2.0:
1216
- pp[3] = xx[-1] - xx[0] / 2.0
1503
+ def fit_edges(spectrum, energy_scale, region_tags, edges):
1504
+ """fit edges for quantification"""
1217
1505
 
1218
- yy[yy < 0] = 0. # no negative numbers in sqrt below
1219
- eerr = (yy - zl_func(pp, xx)) / np.sqrt(yy)
1506
+ # Determine fitting ranges and masks to exclude ranges
1507
+ mask = np.ones(len(spectrum))
1220
1508
 
1221
- return eerr
1222
-
1223
- [p_zl, _] = leastsq(zl2, p0, args=(y, x), maxfev=2000)
1224
- if verbose:
1225
- print('Fit of a Product of two Lorentzian')
1226
- print('Positions: ', p_zl[2], p_zl[5], 'Distance: ', p_zl[2] - p_zl[5])
1227
- print('Width: ', p_zl[0], p_zl[3])
1228
- print('Areas: ', p_zl[1], p_zl[4])
1229
- err = (y - zl_func(p_zl, x)) / np.sqrt(y)
1230
- print(f'Goodness of Fit: {sum(err ** 2) / len(y) / sum(y) * 1e2:.5}%')
1231
-
1232
- z_loss = zl_func(p_zl, energy_scale)
1233
-
1234
- return z_loss, p_zl
1235
-
1236
-
1237
- def get_energy_shifts(spectrum_image, energy_scale=None, zero_loss_fit_width=0.3):
1238
- """ get shift of spectrum from zero-loss peak position
1239
- better to use get resolution_functions
1240
- """
1241
- resolution_functions = get_resolution_functions(spectrum_image, energy_scale=energy_scale, zero_loss_fit_width=zero_loss_fit_width)
1242
- return resolution_functions.metadata['low_loss']['shifts'], resolution_functions.metadata['low_loss']['widths']
1243
-
1244
- def get_resolution_functions(spectrum_image, energy_scale=None, zero_loss_fit_width=0.3):
1245
- """get resolution_function and shift of spectra form zero-loss peak position"""
1246
- if isinstance(spectrum_image, sidpy.Dataset):
1247
- energy_dimension = spectrum_image.get_dimensions_by_type('spectral')
1248
- if len(energy_dimension) != 1:
1249
- raise TypeError('Dataset needs to have exactly one spectral dimension to analyze zero-loss peak')
1250
- energy_dimension = spectrum_image.get_dimension_by_number(energy_dimension)[0]
1251
- energy_scale = energy_dimension.values
1252
- spatial_dimension = spectrum_image.get_dimensions_by_type('spatial')
1253
- if len(spatial_dimension) == 0:
1254
- fwhm, delta_e = fix_energy_scale(spectrum_image)
1255
- z_loss, p_zl = resolution_function(energy_scale - delta_e, spectrum_image, zero_loss_fit_width)
1256
- fwhm2, delta_e2 = fix_energy_scale(z_loss, energy_scale - delta_e)
1257
- return delta_e + delta_e2, fwhm2
1258
- elif len(spatial_dimension) != 2:
1259
- return
1260
- shifts = np.zeros(spectrum_image.shape[0:2])
1261
- widths = np.zeros(spectrum_image.shape[0:2])
1262
- resolution_functions = spectrum_image.copy()
1263
- for x in range(spectrum_image.shape[0]):
1264
- for y in range(spectrum_image.shape[1]):
1265
- spectrum = np.array(spectrum_image[x, y])
1266
- fwhm, delta_e = fix_energy_scale(spectrum, energy_scale)
1267
- z_loss, p_zl = resolution_function(energy_scale - delta_e, spectrum, zero_loss_fit_width)
1268
- resolution_functions[x, y] = z_loss
1269
- fwhm2, delta_e2 = fix_energy_scale(z_loss, energy_scale - delta_e)
1270
- shifts[x, y] = delta_e + delta_e2
1271
- widths[x,y] = fwhm2
1272
-
1273
- resolution_functions.metadata['low_loss'] = {'shifts': shifts,
1274
- 'widths': widths}
1275
- return resolution_functions
1276
-
1277
-
1278
- def shift_on_same_scale(spectrum_image, shifts=None, energy_scale=None, master_energy_scale=None):
1279
- """shift spectrum in energy"""
1280
- if isinstance(spectrum_image, sidpy.Dataset):
1281
- if shifts is None:
1282
- if 'low_loss' in spectrum_image.metadata:
1283
- if 'shifts' in spectrum_image.metadata['low_loss']:
1284
- shifts = spectrum_image.metadata['low_loss']['shifts']
1285
- else:
1286
- resolution_functions = get_resolution_functions(spectrum_image)
1287
- shifts = resolution_functions.metadata['low_loss']['shifts']
1288
- energy_dimension = spectrum_image.get_dimensions_by_type('spectral')
1289
- if len(energy_dimension) != 1:
1290
- raise TypeError('Dataset needs to have exactly one spectral dimension to analyze zero-loss peak')
1291
- energy_dimension = spectrum_image.get_dimension_by_number(energy_dimension)[0]
1292
- energy_scale = energy_dimension.values
1293
- master_energy_scale = energy_scale.copy()
1294
-
1295
- new_si = spectrum_image.copy()
1296
- new_si *= 0.0
1297
- for x in range(spectrum_image.shape[0]):
1298
- for y in range(spectrum_image.shape[1]):
1299
- tck = interpolate.splrep(np.array(energy_scale - shifts[x, y]), np.array(spectrum_image[x, y]), k=1, s=0)
1300
- new_si[x, y, :] = interpolate.splev(master_energy_scale, tck, der=0)
1301
- return new_si
1302
-
1303
-
1304
- def get_wave_length(e0):
1305
- """get deBroglie wavelength of electron accelerated by energy (in eV) e0"""
1306
-
1307
- ev = constants.e * e0
1308
- return constants.h / np.sqrt(2 * constants.m_e * ev * (1 + ev / (2 * constants.m_e * constants.c ** 2)))
1309
-
1310
-
1311
- def drude(ep, eb, gamma, e):
1312
- """dielectric function according to Drude theory"""
1313
-
1314
- eps = 1 - (ep ** 2 - eb * e * 1j) / (e ** 2 + 2 * e * gamma * 1j) # Mod drude term
1315
- return eps
1316
-
1317
-
1318
- def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
1319
- """dielectric function according to Drude-Lorentz theory"""
1320
-
1321
- eps = eps_inf
1322
- for i in range(leng):
1323
- eps = eps + amplitude[i] * (1 / (e + ep[i] + gamma[i] * 1j) - 1 / (e - ep[i] + gamma[i] * 1j))
1324
- return eps
1325
-
1326
-
1327
- def plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef=4., ep=16.8, es=0, ibt=[]):
1328
- """Plot loss function """
1329
-
1330
- [x, y] = np.meshgrid(e_data + 1e-12, a_data[1024:2048] * 1000)
1331
-
1332
- z = plotdata
1333
- lev = np.array([0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 3, 4, 4.9]) * max_p / 5
1334
-
1335
- wavelength = get_wave_length(ee)
1336
- q = a_data[1024:2048] / (wavelength * 1e9) # in [1/nm]
1337
- scale = np.array([0, a_data[-1], e_data[0], e_data[-1]])
1338
- ev2hertz = constants.value('electron volt-hertz relationship')
1339
-
1340
- if units[0] == 'mrad':
1341
- units[0] = 'scattering angle [mrad]'
1342
- scale[1] = scale[1] * 1000.
1343
- light_line = constants.c * a_data # for mrad
1344
- elif units[0] == '1/nm':
1345
- units[0] = 'scattering vector [1/nm]'
1346
- scale[1] = scale[1] / (wavelength * 1e9)
1347
- light_line = 1 / (constants.c / ev2hertz) * 1e-9
1348
-
1349
- if units[1] == 'eV':
1350
- units[1] = 'energy loss [eV]'
1351
-
1352
- if units[2] == 'ppm':
1353
- units[2] = 'probability [ppm]'
1354
- if units[2] == '1/eV':
1355
- units[2] = 'probability [eV$^{-1}$ srad$^{-1}$]'
1356
-
1357
- alpha = 3. / 5. * ef / ep
1358
-
1359
- ax2 = plt.gca()
1360
- fig2 = plt.gcf()
1361
- im = ax2.imshow(z.T, clim=(0, max_p), origin='lower', aspect='auto', extent=scale)
1362
- co = ax2.contour(y, x, z, levels=lev, colors='k', origin='lower')
1363
- # ,extent=(-ang*1000.,ang*1000.,e_data[0],e_data[-1]))#, vmin = p_vol.min(), vmax = 1000)
1364
-
1365
- fig2.colorbar(im, ax=ax2, label=units[2])
1366
-
1367
- ax2.plot(a_data, light_line, c='r', label='light line')
1368
- # ax2.plot(e_data*light_line*np.sqrt(np.real(eps_data)),e_data, color='steelblue',
1369
- # label='$\omega = c q \sqrt{\epsilon_2}$')
1370
-
1371
- # ax2.plot(q, Ep_disp, c='r')
1372
- ax2.plot([11.5 * light_line, 0.12], [11.5, 11.5], c='r')
1373
-
1374
- ax2.text(.05, 11.7, 'surface plasmon', color='r')
1375
- ax2.plot([0.0, 0.12], [16.8, 16.8], c='r')
1376
- ax2.text(.05, 17, 'volume plasmon', color='r')
1377
- ax2.set_xlim(0, scale[1])
1378
- ax2.set_ylim(0, 20)
1379
- # Interband transitions
1380
- ax2.plot([0.0, 0.25], [4.2, 4.2], c='g', label='interband transitions')
1381
- ax2.plot([0.0, 0.25], [5.2, 5.2], c='g')
1382
- ax2.set_ylabel(units[1])
1383
- ax2.set_xlabel(units[0])
1384
- ax2.legend(loc='lower right')
1385
-
1386
-
1387
- def zl_func(p, x):
1388
- """zero-loss peak function"""
1389
-
1390
- p[0] = abs(p[0])
1391
-
1392
- gauss1 = np.zeros(len(x))
1393
- gauss2 = np.zeros(len(x))
1394
- lorentz3 = np.zeros(len(x))
1395
- lorentz = ((0.5 * p[0] * p[1] / 3.14) / ((x - p[2]) ** 2 + ((p[0] / 2) ** 2)))
1396
- lorentz2 = ((0.5 * p[3] * p[4] / 3.14) / ((x - (p[5])) ** 2 + ((p[3] / 2) ** 2)))
1397
- if len(p) > 6:
1398
- lorentz3 = (0.5 * p[6] * p[7] / 3.14) / ((x - p[8]) ** 2 + (p[6] / 2) ** 2)
1399
- gauss2 = p[10] * np.exp(-(x - p[11]) ** 2 / (2.0 * (p[9] / 2.3548) ** 2))
1400
- # ((0.5 * p[9]* p[10]/3.14)/((x- (p[11]))**2+(( p[9]/2)**2)))
1401
- y = (lorentz * lorentz2) + gauss1 + gauss2 + lorentz3
1402
-
1403
- return y
1404
-
1405
-
1406
- def drude2(tags, e, p):
1407
- """dielectric function according to Drude theory for fitting"""
1408
-
1409
- return drude(e, p[0], p[1], p[2], p[3])
1410
-
1411
-
1412
- def xsec_xrpa(energy_scale, e0, z, beta, shift=0):
1413
- """ Calculate momentum-integrated cross-section for EELS from X-ray photo-absorption cross-sections.
1414
-
1415
- X-ray photo-absorption cross-sections from NIST.
1416
- Momentum-integrated cross-section for EELS according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
1417
-
1418
- Parameters
1419
- ----------
1420
- energy_scale: numpy array
1421
- energy scale of spectrum to be analyzed
1422
- e0: float
1423
- acceleration voltage in keV
1424
- z: int
1425
- atomic number of element
1426
- beta: float
1427
- effective collection angle in mrad
1428
- shift: float
1429
- chemical shift of edge in eV
1430
- """
1431
- beta = beta * 0.001 # collection half angle theta [rad]
1432
- # theta_max = self.parent.spec[0].convAngle * 0.001 # collection half angle theta [rad]
1433
- dispersion = energy_scale[1] - energy_scale[0]
1434
-
1435
- x_sections = get_x_sections(z)
1436
- enexs = x_sections['ene']
1437
- datxs = x_sections['dat']
1438
-
1439
- # enexs = enexs[:len(datxs)]
1440
-
1441
- #####
1442
- # Cross Section according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
1443
- #####
1444
-
1445
- # Relativistic correction factors
1446
- t = 511060.0 * (1.0 - 1.0 / (1.0 + e0 / 511.06) ** 2) / 2.0
1447
- gamma = 1 + e0 / 511.06
1448
- a = 6.5 # e-14 *10**14
1449
- b = beta
1450
-
1451
- theta_e = enexs / (2 * gamma * t)
1452
-
1453
- g = 2 * np.log(gamma) - np.log((b ** 2 + theta_e ** 2) / (b ** 2 + theta_e ** 2 / gamma ** 2)) - (
1454
- gamma - 1) * b ** 2 / (b ** 2 + theta_e ** 2 / gamma ** 2)
1455
- datxs = datxs * (a / enexs / t) * (np.log(1 + b ** 2 / theta_e ** 2) + g) / 1e8
1456
-
1457
- datxs = datxs * dispersion # from per eV to per dispersion
1458
- coeff = splrep(enexs, datxs, s=0) # now in areal density atoms / m^2
1459
- xsec = np.zeros(len(energy_scale))
1460
- # shift = 0# int(ek -onsetXRPS)#/dispersion
1461
- lin = interp1d(enexs, datxs, kind='linear') # Linear instead of spline interpolation to avoid oscillations.
1462
- if energy_scale[0] < enexs[0]:
1463
- start = np.searchsorted(energy_scale, enexs[0])+1
1464
- else:
1465
- start = 0
1466
- xsec[start:] = lin(energy_scale[start:] - shift)
1467
-
1468
- return xsec
1469
-
1470
-
1471
- def drude_simulation(dset, e, ep, ew, tnm, eb):
1472
- """probabilities of dielectric function eps relative to zero-loss integral (i0 = 1)
1473
-
1474
- Gives probabilities of dielectric function eps relative to zero-loss integral (i0 = 1) per eV
1475
- Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
1476
-
1477
- # function drude(ep,ew,eb,epc,e0,beta,nn,tnm)
1478
- # Given the plasmon energy (ep), plasmon fwhm (ew) and binding energy(eb),
1479
- # this program generates:
1480
- # EPS1, EPS2 from modified Eq. (3.40), ELF=Im(-1/EPS) from Eq. (3.42),
1481
- # single scattering from Eq. (4.26) and SRFINT from Eq. (4.31)
1482
- # The output is e, ssd into the file drude.ssd (for use in Flog etc.)
1483
- # and e,eps1 ,eps2 into drude.eps (for use in Kroeger etc.)
1484
- # Gives probabilities relative to zero-loss integral (i0 = 1) per eV
1485
- # Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
1486
- # Version 10.11.26
1487
-
1488
-
1489
- b.7 drude Simulation of a Low-Loss Spectrum
1490
- The program DRUDE calculates a single-scattering plasmon-loss spectrum for
1491
- a specimen of a given thickness tnm (in nm), recorded with electrons of a
1492
- specified incident energy e0 by a spectrometer that accepts scattering up to a
1493
- specified collection semi-angle beta. It is based on the extended drude model
1494
- (Section 3.3.2), with a volume energy-loss function elf in accord with Eq. (3.64) and
1495
- a surface-scattering energy-loss function srelf as in Eq. (4.31). Retardation effects
1496
- and coupling between the two surface modes are not included. The surface term can
1497
- be made negligible by entering a large specimen thickness (tnm > 1000).
1498
- Surface intensity srfint and volume intensity volint are calculated from
1499
- Eqs. (4.31) and (4.26), respectively. The total spectral intensity ssd is written to
1500
- the file DRUDE.SSD, which can be used as input for KRAKRO. These intensities are
1501
- all divided by i0, to give relative probabilities (per eV). The real and imaginary parts
1502
- of the dielectric function are written to DRUDE.EPS and can be used for comparison
1503
- with the results of Kramers–Kronig analysis (KRAKRO.DAT).
1504
- Written output includes the surface-loss probability Ps, obtained by integrating
1505
- srfint (a value that relates to two surfaces but includes the negative begrenzungs
1506
- term), for comparison with the analytical integration represented by Eq. (3.77). The
1507
- volume-loss probability p_v is obtained by integrating volint and is used to calculate
1508
- the volume plasmon mean free path (lam = tnm/p_v). The latter is listed and
1509
- compared with the MFP obtained from Eq. (3.44), which represents analytical integration
1510
- assuming a zero-width plasmon peak. The total probability (Pt = p_v+Ps) is
1511
- calculated and used to evaluate the thickness (lam.Pt) that would be given by the formula
1512
- t/λ = ln(It/i0), ignoring the surface-loss probability. Note that p_v will exceed
1513
- 1 for thicker specimens (t/λ > 1), since it represents the probability of plasmon
1514
- scattering relative to that of no inelastic scattering.
1515
- The command-line usage is drude(ep,ew,eb,epc,beta,e0,tnm,nn), where ep is the
1516
- plasmon energy, ew the plasmon width, eb the binding energy of the electrons (0 for
1517
- a metal), and nn is the number of channels in the output spectrum. An example of
1518
- the output is shown in Fig. b.1a,b.
1519
-
1520
- """
1521
-
1522
- epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
1523
-
1524
- b = dset.metadata['collection_angle']/ 1000. # rad
1525
- epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
1526
- e0 = dset.metadata['acceleration_voltage'] / 1000. # input('incident energy e0(kev) : ');
1527
-
1528
- # effective kinetic energy: T = m_o v^2/2,
1529
- t = 1000.0 * e0 * (1. + e0 / 1022.12) / (1.0 + e0 / 511.06) ** 2 # eV # equ.5.2a or Appendix E p 427
1530
-
1531
- # 2 gamma T
1532
- tgt = 1000 * e0 * (1022.12 + e0) / (511.06 + e0) # eV Appendix E p 427
1533
-
1534
- rk0 = 2590 * (1.0 + e0 / 511.06) * np.sqrt(2.0 * t / 511060)
1535
-
1536
- os = e[0]
1537
- ew_mod = eb
1538
- tags = dset.metadata
1539
-
1540
- eps = 1 - (ep ** 2 - ew_mod * e * 1j) / (e ** 2 + 2 * e * ew * 1j) # Mod drude term
1541
-
1542
- eps[np.nonzero(eps == 0.0)] = 1e-19
1543
- elf = np.imag(-1 / eps)
1544
-
1545
- the = e / tgt # varies with energy loss! # Appendix E p 427
1546
- # srfelf = 4..*eps2./((1+eps1).^2+eps2.^2) - elf; %equivalent
1547
- srfelf = np.imag(-4. / (1.0 + eps)) - elf # for 2 surfaces
1548
- angdep = np.arctan(b / the) / the - b / (b * b + the * the)
1549
- srfint = angdep * srfelf / (3.1416 * 0.05292 * rk0 * t) # probability per eV
1550
- anglog = np.log(1.0 + b * b / the / the)
1551
- i0 = dset.sum() # *tags['counts2e']
1552
-
1553
-
1554
- # 2 * t = m_0 v**2 !!! a_0 = 0.05292 nm
1555
- volint = abs(tnm / (np.pi * 0.05292 * t * 2.0) * elf * anglog) # S equ 4.26% probability per eV
1556
- volint = volint * i0 / epc # S probability per channel
1557
- ssd = volint # + srfint;
1558
-
1559
- if e[0] < -1.0:
1560
- xs = int(abs(-e[0] / epc))
1561
-
1562
- ssd[0:xs] = 0.0
1563
- volint[0:xs] = 0.0
1564
- srfint[0:xs] = 0.0
1565
-
1566
- # if os <0:
1567
- p_s = np.trapz(e, srfint) # 2 surfaces but includes negative Begrenzung contribution.
1568
- p_v = abs(np.trapz(e, abs(volint / tags['spec'].sum()))) # integrated volume probability
1569
- p_v = (volint / i0).sum() # our data have he same epc and the trapez formula does not include
1570
- lam = tnm / p_v # does NOT depend on free-electron approximation (no damping).
1571
- lamfe = 4.0 * 0.05292 * t / ep / np.log(1 + (b * tgt / ep) ** 2) # Eq.(3.44) approximation
1572
-
1573
- tags['eps'] = eps
1574
- tags['lam'] = lam
1575
- tags['lamfe'] = lamfe
1576
- tags['p_v'] = p_v
1577
-
1578
- return ssd # /np.pi
1579
-
1580
-
1581
- def effective_collection_angle(energy_scale, alpha, beta, beam_kv):
1582
- """Calculates the effective collection angle in mrad:
1583
-
1584
- Translate from original Fortran program
1585
- Calculates the effective collection angle in mrad:
1586
- Parameter
1587
- ---------
1588
- energy_scale: numpy array
1589
- first and last energy loss of spectrum in eV
1590
- alpha: float
1591
- convergence angle in mrad
1592
- beta: float
1593
- collection angle in mrad
1594
- beamKV: float
1595
- acceleration voltage in V
1596
-
1597
- Returns
1598
- -------
1599
- eff_beta: float
1600
- effective collection angle in mrad
1509
+ background_fit_end = energy_scale[-1]
1510
+ for key in region_tags:
1511
+ end = region_tags[key]['start_x'] + region_tags[key]['width_x']
1601
1512
 
1602
- # function y = effbeta(ene, alpha, beta, beam_kv)
1603
- #
1604
- # This program computes etha(alpha,beta), that is the collection
1605
- # efficiency associated to the following geometry :
1606
- #
1607
- # alpha = half angle of illumination (0 -> pi/2)
1608
- # beta = half angle of collection (0 -> pi/2)
1609
- # (pi/2 = 1570.795 mrad)
1610
- #
1611
- # A constant angular distribution of incident electrons is assumed
1612
- # for any incident angle (-alpha,alpha). These electrons imping the
1613
- # target and a single energy-loss event occurs, with a characteristic
1614
- # angle theta-e (relativistic). The angular distribution of the
1615
- # electrons after the target is analytically derived.
1616
- # This program integrates this distribution from theta=0 up to
1617
- # theta=beta with an adjustable angular step.
1618
- # This program also computes beta* which is the theoretical
1619
- # collection angle which would give the same value of etha(alpha,beta)
1620
- # with a parallel incident beam.
1621
- #
1622
- # subroutines and function subprograms required
1623
- # ---------------------------------------------
1624
- # none
1625
- #
1626
- # comments
1627
- # --------
1628
- #
1629
- # The following parameters are asked as input :
1630
- # accelerating voltage (kV), energy loss range (eV) for the study,
1631
- # energy loss step (eV) in this range, alpha (mrad), beta (mrad).
1632
- # The program returns for each energy loss step :
1633
- # alpha (mrad), beta (mrad), theta-e (relativistic) (mrad),
1634
- # energy loss (eV), etha (#), beta * (mrad)
1635
- #
1636
- # author :
1637
- # --------
1638
- # Pierre TREBBIA
1639
- # US 41 : "Microscopie Electronique Analytique Quantitative"
1640
- # Laboratoire de Physique des Solides, Bat. 510
1641
- # Universite Paris-Sud, F91405 ORSAY Cedex
1642
- # Phone : (33-1) 69 41 53 68
1643
- #
1644
- """
1645
- if beam_kv == 0:
1646
- beam_kv = 100.0
1513
+ startx = np.searchsorted(energy_scale, region_tags[key]['start_x'])
1514
+ endx = np.searchsorted(energy_scale, end)
1647
1515
 
1648
- if alpha == 0:
1649
- return beta
1516
+ if key == 'fit_area':
1517
+ mask[0:startx] = 0.0
1518
+ mask[endx:-1] = 0.0
1519
+ else:
1520
+ mask[startx:endx] = 0.0
1521
+ if region_tags[key]['start_x'] < background_fit_end: # Which is the onset of the first edge?
1522
+ background_fit_end = region_tags[key]['start_x']
1650
1523
 
1651
- if beta == 0:
1652
- return alpha
1524
+ ########################
1525
+ # Background Fit
1526
+ ########################
1527
+ bgd_fit_area = [region_tags['fit_area']['start_x'], background_fit_end]
1528
+ background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
1653
1529
 
1654
- z1 = beam_kv # eV
1655
- z2 = energy_scale[0]
1656
- z3 = energy_scale[-1]
1657
- z4 = 100.0
1530
+ #######################
1531
+ # Edge Fit
1532
+ #######################
1533
+ x = energy_scale
1534
+ blurred = gaussian_filter(spectrum, sigma=5)
1658
1535
 
1659
- z5 = alpha * 0.001 # rad
1660
- z6 = beta * 0.001 # rad
1661
- z7 = 500.0 # number of integration steps to be modified at will
1536
+ y = blurred # now in probability
1537
+ y[np.where(y < 1e-8)] = 1e-8
1662
1538
 
1663
- # main loop on energy loss
1664
- #
1665
- for zx in range(int(z2), int(z3), int(z4)): # ! zx = current energy loss
1666
- eta = 0.0
1667
- x0 = float(zx) * (z1 + 511060.) / (z1 * (z1 + 1022120.)) # x0 = relativistic theta-e
1668
- x1 = np.pi / (2. * x0)
1669
- x2 = x0 * x0 + z5 * z5
1670
- x3 = z5 / x0 * z5 / x0
1671
- x4 = 0.1 * np.sqrt(x2)
1672
- dtheta = (z6 - x4) / z7
1673
- #
1674
- # calculation of the analytical expression
1675
- #
1676
- for zi in range(1, int(z7)):
1677
- theta = x4 + dtheta * float(zi)
1678
- x5 = theta * theta
1679
- x6 = 4. * x5 * x0 * x0
1680
- x7 = x2 - x5
1681
- x8 = np.sqrt(x7 * x7 + x6)
1682
- x9 = (x8 + x7) / (2. * x0 * x0)
1683
- x10 = 2. * theta * dtheta * np.log(x9)
1684
- eta = eta + x10
1539
+ xsec = []
1540
+ number_of_edges = 0
1541
+ for key in edges:
1542
+ if key.isdigit():
1543
+ xsec.append(edges[key]['data'])
1544
+ number_of_edges += 1
1545
+ xsec = np.array(xsec)
1685
1546
 
1686
- eta = eta + x2 / 100. * np.log(1. + x3) # addition of the central contribution
1687
- x4 = z5 * z5 * np.log(1. + x1 * x1) # normalisation
1688
- eta = eta / x4
1689
- #
1690
- # correction by geometrical factor (beta/alpha)**2
1691
- #
1692
- if z6 < z5:
1693
- x5 = z5 / z6
1694
- eta = eta * x5 * x5
1547
+ def model(xx, pp):
1548
+ yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
1549
+ for i in range(number_of_edges):
1550
+ pp[i] = np.abs(pp[i])
1551
+ yy = yy + pp[i] * xsec[i, :]
1552
+ return yy
1695
1553
 
1696
- etha2 = eta * 100.
1697
- #
1698
- # calculation of beta *
1699
- #
1700
- x6 = np.power((1. + x1 * x1), eta)
1701
- x7 = x0 * np.sqrt(x6 - 1.)
1702
- beta = x7 * 1000. # in mrad
1554
+ def residuals(pp, xx, yy):
1555
+ err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
1556
+ return err
1703
1557
 
1704
- return beta
1558
+ scale = y[100]
1559
+ pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
1560
+ [p, _] = leastsq(residuals, pin, args=(x, y))
1705
1561
 
1562
+ for key in edges:
1563
+ if key.isdigit():
1564
+ edges[key]['areal_density'] = p[int(key) - 1]
1706
1565
 
1707
- def kroeger_core(e_data, a_data, eps_data, ee, thick, relativistic=True):
1708
- """This function calculates the differential scattering probability
1566
+ edges['model'] = {}
1567
+ edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
1568
+ edges['model']['background-poly_0'] = p[6]
1569
+ edges['model']['background-poly_1'] = p[7]
1570
+ edges['model']['background-poly_2'] = p[8]
1571
+ edges['model']['background-A'] = A
1572
+ edges['model']['background-r'] = r
1573
+ edges['model']['spectrum'] = model(x, p)
1574
+ edges['model']['blurred'] = blurred
1575
+ edges['model']['mask'] = mask
1576
+ edges['model']['fit_parameter'] = p
1577
+ edges['model']['fit_area_start'] = region_tags['fit_area']['start_x']
1578
+ edges['model']['fit_area_end'] = region_tags['fit_area']['start_x'] + region_tags['fit_area']['width_x']
1709
1579
 
1710
- .. math::
1711
- \\frac{d^2P}{d \\Omega d_e}
1712
- of the low-loss region for total loss and volume plasmon loss
1580
+ return edges
1713
1581
 
1714
- Args:
1715
- e_data (array): energy scale [eV]
1716
- a_data (array): angle or momentum range [rad]
1717
- eps_data (array): dielectric function data
1718
- ee (float): acceleration voltage [keV]
1719
- thick (float): thickness in m
1720
- relativistic: boolean include relativistic corrections
1582
+
1583
+
1584
+ def get_spectrum(dataset, x=0, y=0, bin_x=1, bin_y=1):
1585
+ """
1586
+ Parameter
1587
+ ---------
1588
+ dataset: sidpy.Dataset object
1589
+ contains spectrum or spectrum image
1590
+ x: int default = 0
1591
+ x position of spectrum image
1592
+ y: int default = 0
1593
+ y position of spectrum
1594
+ bin_x: int default = 1
1595
+ binning of spectrum image in x-direction
1596
+ bin_y: int default = 1
1597
+ binning of spectrum image in y-direction
1721
1598
 
1722
1599
  Returns:
1723
- P (numpy array 2d): total loss probability
1724
- p_vol (numpy array 2d): volume loss probability
1600
+ --------
1601
+ spectrum: sidpy.Dataset object
1602
+
1725
1603
  """
1604
+ if dataset.data_type.name == 'SPECTRUM':
1605
+ spectrum = dataset.copy()
1606
+ else:
1607
+ image_dims = dataset.get_image_dims()
1608
+ if x > dataset.shape[image_dims[0]] - bin_x:
1609
+ x = dataset.shape[image_dims[0]] - bin_x
1610
+ if y > dataset.shape[image_dims[1]] - bin_y:
1611
+ y = dataset.shape[image_dims[1]] - bin_y
1612
+ selection = []
1613
+ dimensions = dataset.get_dimension_types()
1614
+ for dim, dimension_type in enumerate(dimensions):
1615
+ # print(dim, axis.dimension_type)
1616
+ if dimension_type == 'SPATIAL':
1617
+ if dim == image_dims[0]:
1618
+ selection.append(slice(x, x + bin_x))
1619
+ else:
1620
+ selection.append(slice(y, y + bin_y))
1621
+ elif dimension_type == 'SPECTRAL':
1622
+ selection.append(slice(None))
1623
+ elif dimension_type == 'CHANNEL':
1624
+ selection.append(slice(None))
1625
+ else:
1626
+ selection.append(slice(0, 1))
1627
+
1628
+ spectrum = dataset[tuple(selection)].mean(axis=tuple(image_dims))
1629
+ spectrum.squeeze().compute()
1630
+ spectrum.data_type = 'Spectrum'
1631
+ return spectrum
1726
1632
 
1727
- # $d^2P/(dEd\Omega) = \frac{1}{\pi^2 a_0 m_0 v^2} \Im \left[ \frac{t\mu^2}{\varepsilon \phi^2 } \right] $ \
1633
+ def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
1634
+ """find peaks in spectrum"""
1728
1635
 
1729
- # ee = 200 #keV
1730
- # thick = 32.0# nm
1731
- thick = thick * 1e-9 # input thickness now in m
1732
- # Define constants
1733
- # ec = 14.4;
1734
- m_0 = constants.value(u'electron mass') # REST electron mass in kg
1735
- # h = constants.Planck # Planck's constant
1736
- hbar = constants.hbar
1636
+ if dataset.data_type.name == 'SPECTRAL_IMAGE':
1637
+ spectrum = dataset.view.get_spectrum()
1638
+ else:
1639
+ spectrum = np.array(dataset)
1737
1640
 
1738
- c = constants.speed_of_light # speed of light m/s
1739
- bohr = constants.value(u'Bohr radius') # Bohr radius in meters
1740
- e = constants.value(u'elementary charge') # electron charge in Coulomb
1741
- print('hbar =', hbar, ' [Js] =', hbar / e, '[ eV s]')
1641
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1742
1642
 
1743
- # Calculate fixed terms of equation
1744
- va = 1 - (511. / (511. + ee)) ** 2 # ee is incident energy in keV
1745
- v = c * np.sqrt(va)
1746
- beta = v / c # non-relativistic for =1
1643
+ second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
1644
+ [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
1645
+
1646
+ start_channel = np.searchsorted(energy_scale, fit_start)
1647
+ end_channel = np.searchsorted(energy_scale, fit_end)
1648
+ peaks = []
1649
+ for index in indices:
1650
+ if start_channel < index < end_channel:
1651
+ peaks.append(index - start_channel)
1652
+
1653
+ if 'model' in dataset.metadata:
1654
+ model = dataset.metadata['model'][start_channel:end_channel]
1655
+
1656
+ elif energy_scale[0] > 0:
1657
+ if 'edges' not in dataset.metadata:
1658
+ return
1659
+ if 'model' not in dataset.metadata['edges']:
1660
+ return
1661
+ model = dataset.metadata['edges']['model']['spectrum'][start_channel:end_channel]
1747
1662
 
1748
- if relativistic:
1749
- gamma = 1. / np.sqrt(1 - beta ** 2)
1750
1663
  else:
1751
- gamma = 1 # set = 1 to correspond to E+B & Siegle
1664
+ model = np.zeros(end_channel - start_channel)
1752
1665
 
1753
- momentum = m_0 * v * gamma # used for xya, E&B have no gamma
1666
+ energy_scale = energy_scale[start_channel:end_channel]
1754
1667
 
1755
- # ##### Define mapped variables
1668
+ difference = np.array(spectrum)[start_channel:end_channel] - model
1669
+ fit = np.zeros(len(energy_scale))
1670
+ p_out = []
1671
+ if len(peaks) > 0:
1672
+ p_in = np.ravel([[energy_scale[i], difference[i], .7] for i in peaks])
1673
+ [p_out, _] = scipy.optimize.leastsq(residuals_smooth, p_in, ftol=1e-3, args=(energy_scale,
1674
+ difference,
1675
+ False))
1676
+ fit = fit + model_smooth(energy_scale, p_out, False)
1756
1677
 
1757
- # Define independent variables E, theta
1758
- a_data = np.array(a_data)
1759
- e_data = np.array(e_data)
1760
- [energy, theta] = np.meshgrid(e_data + 1e-12, a_data)
1761
- # Define CONJUGATE dielectric function variable eps
1762
- [eps, _] = np.meshgrid(np.conj(eps_data), a_data)
1678
+ peak_model = np.zeros(len(spectrum))
1679
+ peak_model[start_channel:end_channel] = fit
1763
1680
 
1764
- # ##### Calculate lambda in equation EB 2.3
1765
- theta2 = theta ** 2 + 1e-15
1766
- theta_e = energy * e / momentum / v
1767
- theta_e2 = theta_e ** 2
1681
+ return peak_model, p_out
1768
1682
 
1769
- lambda2 = theta2 - eps * theta_e2 * beta ** 2 # Eq 2.3
1770
1683
 
1771
- lambd = np.sqrt(lambda2)
1772
- if (np.real(lambd) < 0).any():
1773
- print(' error negative lambda')
1684
+ def find_maxima(y, number_of_peaks):
1685
+ """ find the first most prominent peaks
1774
1686
 
1775
- # ##### Calculate lambda0 in equation EB 2.4
1776
- # According to Kröger real(lambda0) is defined as positive!
1687
+ peaks are then sorted by energy
1777
1688
 
1778
- phi2 = lambda2 + theta_e2 # Eq. 2.2
1779
- lambda02 = theta2 - theta_e2 * beta ** 2 # eta=1 Eq 2.4
1780
- lambda02[lambda02 < 0] = 0
1781
- lambda0 = np.sqrt(lambda02)
1782
- if not (np.real(lambda0) >= 0).any():
1783
- print(' error negative lambda0')
1689
+ Parameters
1690
+ ----------
1691
+ y: numpy array
1692
+ (part) of spectrum
1693
+ number_of_peaks: int
1784
1694
 
1785
- de = thick * energy * e / 2.0 / hbar / v # Eq 2.5
1695
+ Returns
1696
+ -------
1697
+ numpy array
1698
+ indices of peaks
1699
+ """
1700
+ blurred2 = gaussian_filter(y, sigma=2)
1701
+ peaks, _ = scipy.signal.find_peaks(blurred2)
1702
+ prominences = peak_prominences(blurred2, peaks)[0]
1703
+ prominences_sorted = np.argsort(prominences)
1704
+ peaks = peaks[prominences_sorted[-number_of_peaks:]]
1786
1705
 
1787
- xya = lambd * de / theta_e # used in Eqs 2.6, 2.7, 4.4
1706
+ peak_indices = np.argsort(peaks)
1707
+ return peaks[peak_indices]
1788
1708
 
1789
- lplus = lambda0 * eps + lambd * np.tanh(xya) # eta=1 %Eq 2.6
1790
- lminus = lambda0 * eps + lambd / np.tanh(xya) # eta=1 %Eq 2.7
1791
1709
 
1792
- mue2 = 1 - (eps * beta ** 2) # Eq. 4.5
1793
- phi20 = lambda02 + theta_e2 # Eq 4.6
1794
- phi201 = theta2 + theta_e2 * (1 - (eps + 1) * beta ** 2) # eta=1, eps-1 in E+B Eq.(4.7)
1710
+ #
1711
+ def model3(x, p, number_of_peaks, peak_shape, p_zl, pin=None, restrict_pos=0, restrict_width=0):
1712
+ """ model for fitting low-loss spectrum"""
1713
+ if pin is None:
1714
+ pin = p
1795
1715
 
1796
- # Eq 4.2
1797
- a1 = phi201 ** 2 / eps
1798
- a2 = np.sin(de) ** 2 / lplus + np.cos(de) ** 2 / lminus
1799
- a = a1 * a2
1716
+ # if len([restrict_pos]) == 1:
1717
+ # restrict_pos = [restrict_pos]*number_of_peaks
1718
+ # if len([restrict_width]) == 1:
1719
+ # restrict_width = [restrict_width]*number_of_peaks
1720
+ y = np.zeros(len(x))
1800
1721
 
1801
- # Eq 4.3
1802
- b1 = beta ** 2 * lambda0 * theta_e * phi201
1803
- b2 = (1. / lplus - 1. / lminus) * np.sin(2. * de)
1804
- b = b1 * b2
1722
+ for i in range(number_of_peaks):
1723
+ index = int(i * 3)
1724
+ if restrict_pos > 0:
1725
+ if p[index] > pin[index] * (1.0 + restrict_pos):
1726
+ p[index] = pin[index] * (1.0 + restrict_pos)
1727
+ if p[index] < pin[index] * (1.0 - restrict_pos):
1728
+ p[index] = pin[index] * (1.0 - restrict_pos)
1805
1729
 
1806
- # Eq 4.4
1807
- c1 = -beta ** 4 * lambda0 * lambd * theta_e2
1808
- c2 = np.cos(de) ** 2 * np.tanh(xya) / lplus
1809
- c3 = np.sin(de) ** 2 / np.tanh(xya) / lminus
1810
- c = c1 * (c2 + c3)
1730
+ p[index + 1] = abs(p[index + 1])
1731
+ # print(p[index + 1])
1732
+ p[index + 2] = abs(p[index + 2])
1733
+ if restrict_width > 0:
1734
+ if p[index + 2] > pin[index + 2] * (1.0 + restrict_width):
1735
+ p[index + 2] = pin[index + 2] * (1.0 + restrict_width)
1736
+
1737
+ if peak_shape[i] == 'Lorentzian':
1738
+ y = y + lorentz(x, p[index:])
1739
+ elif peak_shape[i] == 'zl':
1811
1740
 
1812
- # Put all the pieces together...
1813
- p_coef = e / (bohr * np.pi ** 2 * m_0 * v ** 2)
1741
+ y = y + zl(x, p[index:], p_zl)
1742
+ else:
1743
+ y = y + gauss(x, p[index:])
1744
+ return y
1814
1745
 
1815
- p_v = thick * mue2 / eps / phi2
1816
1746
 
1817
- p_s1 = 2. * theta2 * (eps - 1) ** 2 / phi20 ** 2 / phi2 ** 2 # ASSUMES eta=1
1818
- p_s2 = hbar / momentum
1819
- p_s3 = a + b + c
1747
+ def sort_peaks(p, peak_shape):
1748
+ """sort fitting parameters by peak position"""
1749
+ number_of_peaks = int(len(p) / 3)
1750
+ p3 = np.reshape(p, (number_of_peaks, 3))
1751
+ sort_pin = np.argsort(p3[:, 0])
1820
1752
 
1821
- p_s = p_s1 * p_s2 * p_s3
1753
+ p = p3[sort_pin].flatten()
1754
+ peak_shape = np.array(peak_shape)[sort_pin].tolist()
1822
1755
 
1823
- # print(p_v.min(),p_v.max(),p_s.min(),p_s.max())
1824
- # Calculate P and p_vol (volume only)
1825
- dtheta = a_data[1] - a_data[0]
1826
- scale = np.sin(np.abs(theta)) * dtheta * 2 * np.pi
1756
+ return p, peak_shape
1827
1757
 
1828
- p = p_coef * np.imag(p_v - p_s) # Eq 4.1
1829
- p_vol = p_coef * np.imag(p_v) * scale
1830
1758
 
1831
- # lplus_min = e_data[np.argmin(np.real(lplus), axis=1)]
1832
- # lminus_min = e_data[np.argmin(np.imag(lminus), axis=1)]
1759
+ def add_peaks(x, y, peaks, pin_in=None, peak_shape_in=None, shape='Gaussian'):
1760
+ """ add peaks to fitting parameters"""
1761
+ if pin_in is None:
1762
+ return
1763
+ if peak_shape_in is None:
1764
+ return
1833
1765
 
1834
- p_simple = p_coef * np.imag(1 / eps) * thick / (
1835
- theta2 + theta_e2) * scale # Watch it eps is conjugated dielectric function
1766
+ pin = pin_in.copy()
1836
1767
 
1837
- return p, p * scale * 1e2, p_vol * 1e2, p_simple * 1e2 # ,lplus_min,lminus_min
1768
+ peak_shape = peak_shape_in.copy()
1769
+ if isinstance(shape, str): # if peak_shape is only a string make a list of it.
1770
+ shape = [shape]
1838
1771
 
1772
+ if len(shape) == 1:
1773
+ shape = shape * len(peaks)
1774
+ for i, peak in enumerate(peaks):
1775
+ pin.append(x[peak])
1776
+ pin.append(y[peak])
1777
+ pin.append(.3)
1778
+ peak_shape.append(shape[i])
1839
1779
 
1840
- def kroeger_core2(e_data, a_data, eps_data, acceleration_voltage_kev, thickness, relativistic=True):
1841
- """This function calculates the differential scattering probability
1780
+ return pin, peak_shape
1842
1781
 
1843
- .. math::
1844
- \\frac{d^2P}{d \\Omega d_e}
1845
- of the low-loss region for total loss and volume plasmon loss
1846
1782
 
1847
- Args:
1848
- e_data (array): energy scale [eV]
1849
- a_data (array): angle or momentum range [rad]
1850
- eps_data (array) dielectric function
1851
- acceleration_voltage_kev (float): acceleration voltage [keV]
1852
- thickness (float): thickness in nm
1853
- relativistic (boolean): relativistic correction
1783
+ def fit_model(x, y, pin, number_of_peaks, peak_shape, p_zl, restrict_pos=0, restrict_width=0):
1784
+ """model for fitting low-loss spectrum"""
1854
1785
 
1855
- Returns:
1856
- P (numpy array 2d): total loss probability
1857
- p_vol (numpy array 2d): volume loss probability
1786
+ pin_original = pin.copy()
1858
1787
 
1859
- return P, P*scale*1e2,p_vol*1e2, p_simple*1e2
1860
- """
1788
+ def residuals3(pp, xx, yy):
1789
+ err = (yy - model3(xx, pp, number_of_peaks, peak_shape, p_zl, pin_original, restrict_pos,
1790
+ restrict_width)) / np.sqrt(np.abs(yy))
1791
+ return err
1861
1792
 
1862
- # $d^2P/(dEd\Omega) = \frac{1}{\pi^2 a_0 m_0 v^2} \Im \left[ \frac{t\mu^2}{\varepsilon \phi^2 } \right]
1863
- """
1864
- # Internally everything is calculated in si units
1865
- # acceleration_voltage_kev = 200 #keV
1866
- # thick = 32.0*10-9 # m
1793
+ [p, _] = leastsq(residuals3, pin, args=(x, y))
1794
+ # p2 = p.tolist()
1795
+ # p3 = np.reshape(p2, (number_of_peaks, 3))
1796
+ # sort_pin = np.argsort(p3[:, 0])
1867
1797
 
1868
- """
1869
- a_data = np.array(a_data)
1870
- e_data = np.array(e_data)
1871
- # adjust input to si units
1872
- wavelength = get_wave_length(acceleration_voltage_kev * 1e3) # in m
1873
- thickness = thickness * 1e-9 # input thickness now in m
1798
+ # p = p3[sort_pin].flatten()
1799
+ # peak_shape = np.array(peak_shape)[sort_pin].tolist()
1874
1800
 
1875
- # Define constants
1876
- # ec = 14.4;
1877
- m_0 = constants.value(u'electron mass') # REST electron mass in kg
1878
- # h = constants.Planck # Planck's constant
1879
- hbar = constants.hbar
1801
+ return p, peak_shape
1880
1802
 
1881
- c = constants.speed_of_light # speed of light m/s
1882
- bohr = constants.value(u'Bohr radius') # Bohr radius in meters
1883
- e = constants.value(u'elementary charge') # electron charge in Coulomb
1884
- # print('hbar =', hbar ,' [Js] =', hbar/e ,'[ eV s]')
1885
1803
 
1886
- # Calculate fixed terms of equation
1887
- va = 1 - (511. / (511. + acceleration_voltage_kev)) ** 2 # acceleration_voltage_kev is incident energy in keV
1888
- v = c * np.sqrt(va)
1889
1804
 
1890
- if relativistic:
1891
- beta = v / c # non-relativistic for =1
1892
- gamma = 1. / np.sqrt(1 - beta ** 2)
1893
- else:
1894
- beta = 1
1895
- gamma = 1 # set = 1 to correspond to E+B & Siegle
1805
+ def plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef=4., ep=16.8, es=0, ibt=[]):
1806
+ """Plot loss function """
1896
1807
 
1897
- momentum = m_0 * v * gamma # used for xya, E&B have no gamma
1808
+ [x, y] = np.meshgrid(e_data + 1e-12, a_data[1024:2048] * 1000)
1898
1809
 
1899
- # ##### Define mapped variables
1810
+ z = plotdata
1811
+ lev = np.array([0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 3, 4, 4.9]) * max_p / 5
1900
1812
 
1901
- # Define independent variables E, theta
1902
- [energy, theta] = np.meshgrid(e_data + 1e-12, a_data)
1903
- # Define CONJUGATE dielectric function variable eps
1904
- [eps, _] = np.meshgrid(np.conj(eps_data), a_data)
1813
+ wavelength = get_wave_length(ee)
1814
+ q = a_data[1024:2048] / (wavelength * 1e9) # in [1/nm]
1815
+ scale = np.array([0, a_data[-1], e_data[0], e_data[-1]])
1816
+ ev2hertz = constants.value('electron volt-hertz relationship')
1905
1817
 
1906
- # ##### Calculate lambda in equation EB 2.3
1907
- theta2 = theta ** 2 + 1e-15
1818
+ if units[0] == 'mrad':
1819
+ units[0] = 'scattering angle [mrad]'
1820
+ scale[1] = scale[1] * 1000.
1821
+ light_line = constants.c * a_data # for mrad
1822
+ elif units[0] == '1/nm':
1823
+ units[0] = 'scattering vector [1/nm]'
1824
+ scale[1] = scale[1] / (wavelength * 1e9)
1825
+ light_line = 1 / (constants.c / ev2hertz) * 1e-9
1908
1826
 
1909
- theta_e = energy * e / momentum / v # critical angle
1827
+ if units[1] == 'eV':
1828
+ units[1] = 'energy loss [eV]'
1910
1829
 
1911
- lambda2 = theta2 - eps * theta_e ** 2 * beta ** 2 # Eq 2.3
1830
+ if units[2] == 'ppm':
1831
+ units[2] = 'probability [ppm]'
1832
+ if units[2] == '1/eV':
1833
+ units[2] = 'probability [eV$^{-1}$ srad$^{-1}$]'
1912
1834
 
1913
- lambd = np.sqrt(lambda2)
1914
- if (np.real(lambd) < 0).any():
1915
- print(' error negative lambda')
1835
+ alpha = 3. / 5. * ef / ep
1916
1836
 
1917
- # ##### Calculate lambda0 in equation EB 2.4
1918
- # According to Kröger real(lambda0) is defined as positive!
1837
+ ax2 = plt.gca()
1838
+ fig2 = plt.gcf()
1839
+ im = ax2.imshow(z.T, clim=(0, max_p), origin='lower', aspect='auto', extent=scale)
1840
+ co = ax2.contour(y, x, z, levels=lev, colors='k', origin='lower')
1841
+ # ,extent=(-ang*1000.,ang*1000.,e_data[0],e_data[-1]))#, vmin = p_vol.min(), vmax = 1000)
1919
1842
 
1920
- phi2 = lambda2 + theta_e ** 2 # Eq. 2.2
1921
- lambda02 = theta2 - theta_e ** 2 * beta ** 2 # eta=1 Eq 2.4
1922
- lambda02[lambda02 < 0] = 0
1923
- lambda0 = np.sqrt(lambda02)
1924
- if not (np.real(lambda0) >= 0).any():
1925
- print(' error negative lambda0')
1843
+ fig2.colorbar(im, ax=ax2, label=units[2])
1926
1844
 
1927
- de = thickness * energy * e / (2.0 * hbar * v) # Eq 2.5
1928
- xya = lambd * de / theta_e # used in Eqs 2.6, 2.7, 4.4
1845
+ ax2.plot(a_data, light_line, c='r', label='light line')
1846
+ # ax2.plot(e_data*light_line*np.sqrt(np.real(eps_data)),e_data, color='steelblue',
1847
+ # label='$\omega = c q \sqrt{\epsilon_2}$')
1929
1848
 
1930
- lplus = lambda0 * eps + lambd * np.tanh(xya) # eta=1 %Eq 2.6
1931
- lminus = lambda0 * eps + lambd / np.tanh(xya) # eta=1 %Eq 2.7
1849
+ # ax2.plot(q, Ep_disp, c='r')
1850
+ ax2.plot([11.5 * light_line, 0.12], [11.5, 11.5], c='r')
1932
1851
 
1933
- mue2 = 1 - (eps * beta ** 2) # Eq. 4.5
1934
- phi20 = lambda02 + theta_e ** 2 # Eq 4.6
1935
- phi201 = theta2 + theta_e ** 2 * (1 - (eps + 1) * beta ** 2) # eta=1, eps-1 in E+b Eq.(4.7)
1852
+ ax2.text(.05, 11.7, 'surface plasmon', color='r')
1853
+ ax2.plot([0.0, 0.12], [16.8, 16.8], c='r')
1854
+ ax2.text(.05, 17, 'volume plasmon', color='r')
1855
+ ax2.set_xlim(0, scale[1])
1856
+ ax2.set_ylim(0, 20)
1857
+ # Interband transitions
1858
+ ax2.plot([0.0, 0.25], [4.2, 4.2], c='g', label='interband transitions')
1859
+ ax2.plot([0.0, 0.25], [5.2, 5.2], c='g')
1860
+ ax2.set_ylabel(units[1])
1861
+ ax2.set_xlabel(units[0])
1862
+ ax2.legend(loc='lower right')
1936
1863
 
1937
- # Eq 4.2
1938
- a1 = phi201 ** 2 / eps
1939
- a2 = np.sin(de) ** 2 / lplus + np.cos(de) ** 2 / lminus
1940
- a = a1 * a2
1941
1864
 
1942
- # Eq 4.3
1943
- b1 = beta ** 2 * lambda0 * theta_e * phi201
1944
- b2 = (1. / lplus - 1. / lminus) * np.sin(2. * de)
1945
- b = b1 * b2
1865
+ def xsec_xrpa(energy_scale, e0, z, beta, shift=0):
1866
+ """ Calculate momentum-integrated cross-section for EELS from X-ray photo-absorption cross-sections.
1946
1867
 
1947
- # Eq 4.4
1948
- c1 = -beta ** 4 * lambda0 * lambd * theta_e ** 2
1949
- c2 = np.cos(de) ** 2 * np.tanh(xya) / lplus
1950
- c3 = np.sin(de) ** 2 / np.tanh(xya) / lminus
1951
- c = c1 * (c2 + c3)
1868
+ X-ray photo-absorption cross-sections from NIST.
1869
+ Momentum-integrated cross-section for EELS according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
1952
1870
 
1953
- # Put all the pieces together...
1954
- p_coef = e / (bohr * np.pi ** 2 * m_0 * v ** 2)
1871
+ Parameters
1872
+ ----------
1873
+ energy_scale: numpy array
1874
+ energy scale of spectrum to be analyzed
1875
+ e0: float
1876
+ acceleration voltage in keV
1877
+ z: int
1878
+ atomic number of element
1879
+ beta: float
1880
+ effective collection angle in mrad
1881
+ shift: float
1882
+ chemical shift of edge in eV
1883
+ """
1884
+ beta = beta * 0.001 # collection half angle theta [rad]
1885
+ # theta_max = self.parent.spec[0].convAngle * 0.001 # collection half angle theta [rad]
1886
+ dispersion = energy_scale[1] - energy_scale[0]
1955
1887
 
1956
- p_v = thickness * mue2 / eps / phi2
1888
+ x_sections = get_x_sections(z)
1889
+ enexs = x_sections['ene']
1890
+ datxs = x_sections['dat']
1957
1891
 
1958
- p_s1 = 2. * theta2 * (eps - 1) ** 2 / phi20 ** 2 / phi2 ** 2 # ASSUMES eta=1
1959
- p_s2 = hbar / momentum
1960
- p_s3 = a + b + c
1892
+ # enexs = enexs[:len(datxs)]
1961
1893
 
1962
- p_s = p_s1 * p_s2 * p_s3
1894
+ #####
1895
+ # Cross Section according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
1896
+ #####
1963
1897
 
1964
- # print(p_v.min(),p_v.max(),p_s.min(),p_s.max())
1965
- # Calculate P and p_vol (volume only)
1966
- dtheta = a_data[1] - a_data[0]
1967
- scale = np.sin(np.abs(theta)) * dtheta * 2 * np.pi
1898
+ # Relativistic correction factors
1899
+ t = 511060.0 * (1.0 - 1.0 / (1.0 + e0 / 511.06) ** 2) / 2.0
1900
+ gamma = 1 + e0 / 511.06
1901
+ a = 6.5 # e-14 *10**14
1902
+ b = beta
1968
1903
 
1969
- p = p_coef * np.imag(p_v - p_s) # Eq 4.1
1970
- p_vol = p_coef * np.imag(p_v) * scale
1904
+ theta_e = enexs / (2 * gamma * t)
1971
1905
 
1972
- # lplus_min = e_data[np.argmin(np.real(lplus), axis=1)]
1973
- # lminus_min = e_data[np.argmin(np.imag(lminus), axis=1)]
1906
+ g = 2 * np.log(gamma) - np.log((b ** 2 + theta_e ** 2) / (b ** 2 + theta_e ** 2 / gamma ** 2)) - (
1907
+ gamma - 1) * b ** 2 / (b ** 2 + theta_e ** 2 / gamma ** 2)
1908
+ datxs = datxs * (a / enexs / t) * (np.log(1 + b ** 2 / theta_e ** 2) + g) / 1e8
1974
1909
 
1975
- p_simple = p_coef * np.imag(1 / eps) * thickness / (theta2 + theta_e ** 2) * scale
1976
- # Watch it: eps is conjugated dielectric function
1910
+ datxs = datxs * dispersion # from per eV to per dispersion
1911
+ coeff = splrep(enexs, datxs, s=0) # now in areal density atoms / m^2
1912
+ xsec = np.zeros(len(energy_scale))
1913
+ # shift = 0# int(ek -onsetXRPS)#/dispersion
1914
+ lin = interp1d(enexs, datxs, kind='linear') # Linear instead of spline interpolation to avoid oscillations.
1915
+ if energy_scale[0] < enexs[0]:
1916
+ start = np.searchsorted(energy_scale, enexs[0])+1
1917
+ else:
1918
+ start = 0
1919
+ xsec[start:] = lin(energy_scale[start:] - shift)
1977
1920
 
1978
- return p, p * scale * 1e2, p_vol * 1e2, p_simple * 1e2 # ,lplus_min,lminus_min
1921
+ return xsec
1979
1922
 
1980
1923
 
1981
1924
  ##########################
@@ -2100,4 +2043,4 @@ def get_spectrum_eels_db(formula=None, edge=None, title=None, element=None):
2100
2043
  print(parameters['TITLE'])
2101
2044
  print(f'found {len(reference_spectra.keys())} spectra in EELS database)')
2102
2045
 
2103
- return reference_spectra
2046
+ return reference_spectra