pyTEMlib 0.2020.11.1__py3-none-any.whl → 0.2024.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

Files changed (60) hide show
  1. pyTEMlib/__init__.py +11 -11
  2. pyTEMlib/animation.py +631 -0
  3. pyTEMlib/atom_tools.py +240 -245
  4. pyTEMlib/config_dir.py +57 -33
  5. pyTEMlib/core_loss_widget.py +658 -0
  6. pyTEMlib/crystal_tools.py +1255 -0
  7. pyTEMlib/diffraction_plot.py +756 -0
  8. pyTEMlib/dynamic_scattering.py +293 -0
  9. pyTEMlib/eds_tools.py +609 -0
  10. pyTEMlib/eels_dialog.py +749 -491
  11. pyTEMlib/{interactive_eels.py → eels_dialog_utilities.py} +1199 -1177
  12. pyTEMlib/eels_tools.py +2031 -1698
  13. pyTEMlib/file_tools.py +1276 -560
  14. pyTEMlib/file_tools_qt.py +193 -0
  15. pyTEMlib/graph_tools.py +1166 -450
  16. pyTEMlib/graph_viz.py +449 -0
  17. pyTEMlib/image_dialog.py +158 -0
  18. pyTEMlib/image_dlg.py +146 -232
  19. pyTEMlib/image_tools.py +1399 -1028
  20. pyTEMlib/info_widget.py +933 -0
  21. pyTEMlib/interactive_image.py +1 -226
  22. pyTEMlib/kinematic_scattering.py +1196 -0
  23. pyTEMlib/low_loss_widget.py +176 -0
  24. pyTEMlib/microscope.py +61 -81
  25. pyTEMlib/peak_dialog.py +1047 -410
  26. pyTEMlib/peak_dlg.py +286 -242
  27. pyTEMlib/probe_tools.py +653 -207
  28. pyTEMlib/sidpy_tools.py +153 -136
  29. pyTEMlib/simulation_tools.py +104 -87
  30. pyTEMlib/version.py +6 -3
  31. pyTEMlib/xrpa_x_sections.py +20972 -0
  32. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/LICENSE +21 -21
  33. pyTEMlib-0.2024.9.0.dist-info/METADATA +92 -0
  34. pyTEMlib-0.2024.9.0.dist-info/RECORD +37 -0
  35. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/WHEEL +5 -5
  36. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/entry_points.txt +0 -1
  37. pyTEMlib/KinsCat.py +0 -2758
  38. pyTEMlib/__version__.py +0 -2
  39. pyTEMlib/data/TEMlibrc +0 -68
  40. pyTEMlib/data/edges_db.csv +0 -189
  41. pyTEMlib/data/edges_db.pkl +0 -0
  42. pyTEMlib/data/fparam.txt +0 -103
  43. pyTEMlib/data/microscopes.csv +0 -7
  44. pyTEMlib/data/microscopes.xml +0 -167
  45. pyTEMlib/data/path.txt +0 -1
  46. pyTEMlib/defaults_parser.py +0 -90
  47. pyTEMlib/dm3_reader.py +0 -613
  48. pyTEMlib/edges_db.py +0 -76
  49. pyTEMlib/eels_dlg.py +0 -224
  50. pyTEMlib/hdf_utils.py +0 -483
  51. pyTEMlib/image_tools1.py +0 -2194
  52. pyTEMlib/info_dialog.py +0 -237
  53. pyTEMlib/info_dlg.py +0 -202
  54. pyTEMlib/nion_reader.py +0 -297
  55. pyTEMlib/nsi_reader.py +0 -170
  56. pyTEMlib/structure_tools.py +0 -316
  57. pyTEMlib/test.py +0 -2072
  58. pyTEMlib-0.2020.11.1.dist-info/METADATA +0 -20
  59. pyTEMlib-0.2020.11.1.dist-info/RECORD +0 -45
  60. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/top_level.txt +0 -0
pyTEMlib/eels_tools.py CHANGED
@@ -1,1698 +1,2031 @@
1
- """
2
- All tools for processing and quantifying EELS datasets
3
- """
4
- import numpy as np
5
-
6
- import scipy
7
- from scipy.interpolate import interp1d, splrep # splev, splint
8
- from scipy import interpolate
9
- from scipy.signal import peak_prominences
10
- from scipy.ndimage.filters import gaussian_filter
11
-
12
- from scipy import constants
13
- import matplotlib.pyplot as plt
14
- # import matplotlib.patches as patches
15
-
16
- # from matplotlib.widgets import SpanSelector
17
- # import ipywidgets as widgets
18
- # from IPython.display import display
19
-
20
- import requests
21
-
22
- from scipy.optimize import leastsq # least square fitting routine fo scipy
23
-
24
- import pickle # pkg_resources,
25
-
26
- # ## And we use the image tool library of Quantifit
27
- import pyTEMlib.file_tools as ft
28
- from pyTEMlib.config_dir import data_path
29
-
30
- major_edges = ['K1', 'L3', 'M5', 'N5']
31
- all_edges = ['K1', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5', 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2',
32
- 'O3', 'O4', 'O5', 'O6', 'O7', 'P1', 'P2', 'P3']
33
- first_close_edges = ['K1', 'L3', 'M5', 'M3', 'N5', 'N3']
34
-
35
- elements = [' ', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na',
36
- 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V',
37
- 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br',
38
- 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag',
39
- 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr',
40
- 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',
41
- 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi']
42
-
43
-
44
- # kroeger_core(e_data,a_data,eps_data,ee,thick, relativistic =True)
45
- # kroeger_core2(e_data,a_data,eps_data,acceleration_voltage_kev,thickness, relativistic =True)
46
- # get_wave_length(e0)
47
-
48
- # plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef = 4., ep= 16.8, Es = 0, IBT = [])
49
- # drude(tags, e, ep, ew, tnm, eb)
50
- # drude(ep, eb, gamma, e)
51
- # drude_lorentz(epsInf,leng, ep, eb, gamma, e, Amplitude)
52
- # zl_func( p, x)
53
-
54
- ################################################################
55
- # Read Functions
56
- #################################################################
57
-
58
-
59
- def read_dm3_eels_info(original_metadata):
60
- if 'DM' not in original_metadata:
61
- return {}
62
- main_image = original_metadata['DM']['chosen_image']
63
- exp_dictionary = original_metadata['ImageList'][str(main_image)]['ImageTags']
64
- experiment = {}
65
- if 'EELS' in exp_dictionary:
66
- if 'Acquisition' in exp_dictionary['EELS']:
67
- for key, item in exp_dictionary['EELS']['Acquisition'].items():
68
- if 'Exposure' in key:
69
- _, units = key.split('(')
70
- if units[:-1] == 's':
71
- experiment['single_exposure_time'] = item
72
- if 'Integration' in key:
73
- _, units = key.split('(')
74
- if units[:-1] == 's':
75
- experiment['exposure_time'] = item
76
- if 'frames' in key:
77
- experiment['number_of_frames'] = item
78
- if 'Experimental Conditions' in exp_dictionary['EELS']:
79
- for key, item in exp_dictionary['EELS']['Experimental Conditions'].items():
80
- if 'Convergence' in key:
81
- experiment['convergence_angle'] = item
82
- if 'Collection' in key:
83
- # print(item)
84
- # for val in item.values():
85
- experiment['collection_angle'] = item
86
- if 'Microscope Info' in exp_dictionary:
87
- # print(exp_dictionary['Microscope Info'].keys())
88
- if 'Voltage' in exp_dictionary['Microscope Info']:
89
- experiment['acceleration_voltage'] = exp_dictionary['Microscope Info']['Voltage']
90
- if 'Name' in exp_dictionary['Microscope Info']:
91
- experiment['microscope'] = exp_dictionary['Microscope Info']['Name']
92
- return experiment
93
-
94
-
95
- def set_previous_quantification(current_dataset):
96
- current_channel = current_dataset.h5_dataset.parent
97
- found_metadata = False
98
- for key in current_channel:
99
- if 'Log' in key:
100
- if current_channel[key]['analysis'][()] == 'EELS_quantification':
101
- current_dataset.metadata.update(ft.nest_dict(current_channel[key].attrs))
102
- found_metadata = True
103
- print('found previous quantification')
104
-
105
- if not found_metadata:
106
- # setting important experimental parameter
107
- current_dataset.metadata['experiment'] = read_dm3_eels_info(current_dataset.original_metadata)
108
-
109
- if 'experiment' not in current_dataset.metadata:
110
- current_dataset.metadata['experiment'] = {}
111
- if 'convergence_angle' not in current_dataset.metadata['experiment']:
112
- current_dataset.metadata['experiment']['convergence_angle'] = 30
113
- if 'collection_angle' not in current_dataset.metadata['experiment']:
114
- current_dataset.metadata['experiment']['collection_angle'] = 50
115
- if 'acceleration_voltage' not in current_dataset.metadata['experiment']:
116
- current_dataset.metadata['experiment']['acceleration_voltage'] = 200000
117
-
118
-
119
- ################################################################
120
- # Peak Fit Functions
121
- #################################################################
122
-
123
-
124
- def residuals_smooth(p, x, y, only_positive_intensity):
125
- err = (y - model_smooth(x, p, only_positive_intensity))
126
- return err
127
-
128
-
129
- def model_smooth(x, p, only_positive_intensity=False):
130
- y = np.zeros(len(x))
131
-
132
- number_of_peaks = int(len(p) / 3)
133
- for i in range(number_of_peaks):
134
- if only_positive_intensity:
135
- p[i * 3 + 1] = abs(p[i * 3 + 1])
136
- p[i * 3 + 2] = abs(p[i * 3 + 2])
137
- if p[i * 3 + 2] > abs(p[i * 3]) * 4.29193 / 2.0:
138
- p[i * 3 + 2] = abs(p[i * 3]) * 4.29193 / 2. # ## width cannot extend beyond zero, maximum is FWTM/2
139
-
140
- y = y + gauss(x, p[i * 3:])
141
-
142
- return y
143
-
144
-
145
- def residuals_ll(p, x, y, only_positive_intensity):
146
- err = (y - model_ll(x, p, only_positive_intensity)) / np.sqrt(np.abs(y))
147
- return err
148
-
149
-
150
- def residuals_ll2(p, x, y, only_positive_intensity):
151
- err = (y - model_ll(x, p, only_positive_intensity))
152
- return err
153
-
154
-
155
- def model_ll(x, p, only_positive_intensity):
156
- y = np.zeros(len(x))
157
-
158
- number_of_peaks = int(len(p) / 3)
159
- for i in range(number_of_peaks):
160
- if only_positive_intensity:
161
- p[i * 3 + 1] = abs(p[i * 3 + 1])
162
- p[i * 3 + 2] = abs(p[i * 3 + 2])
163
- if p[i * 3 + 2] > abs(p[i * 3]) * 4.29193 / 2.0:
164
- p[i * 3 + 2] = abs(p[i * 3]) * 4.29193 / 2. # ## width cannot extend beyond zero, maximum is FWTM/2
165
-
166
- y = y + gauss(x, p[i * 3:])
167
-
168
- return y
169
-
170
-
171
- def fit_peaks(spectrum, energy_scale, pin, start_fit, end_fit, only_positive_intensity=False):
172
- # TODO: remove zero_loss_fit_width add absolute
173
-
174
- fit_energy = energy_scale[start_fit:end_fit]
175
- fit_spectrum = spectrum[start_fit:end_fit]
176
-
177
- pin_flat = [item for sublist in pin for item in sublist]
178
- [p_out, _] = leastsq(residuals_ll, np.array(pin_flat), ftol=1e-3, args=(fit_energy, fit_spectrum,
179
- only_positive_intensity))
180
-
181
- p = []
182
- for i in range(len(pin)):
183
- if only_positive_intensity:
184
- p_out[i * 3 + 1] = abs(p_out[i * 3 + 1])
185
- p.append([p_out[i * 3], p_out[i * 3 + 1], abs(p_out[i * 3 + 2])])
186
- return p
187
-
188
-
189
- #################################################################
190
- # CORE - LOSS functions
191
- #################################################################
192
-
193
-
194
- def get_x_sections(z=0):
195
- """
196
- ####
197
- # reads X-ray fluorescent cross sections from a pickle file.
198
- ####
199
- Input: nothing or atomic number
200
- Output: dictionary
201
- of a element or of all elements if z = 0
202
-
203
- """
204
- pkl_file = open(data_path + '/edges_db.pkl', 'rb')
205
- x_sections = pickle.load(pkl_file)
206
- pkl_file.close()
207
-
208
- z = int(z)
209
-
210
- if z < 1:
211
- return x_sections
212
- else:
213
- z = str(z)
214
- if z in x_sections:
215
- return x_sections[z]
216
- else:
217
- return 0
218
-
219
-
220
- def get_z(z):
221
- """
222
- returns the atomic number independent of input as a string or number
223
-
224
- input:
225
- z: atomic number of chemical symbol (0 if not valid)
226
- """
227
- x_sections = get_x_sections()
228
-
229
- z_out = 0
230
- if str(z).isdigit():
231
- z_out = int(z)
232
- elif isinstance(z, str):
233
- for key in x_sections:
234
- if x_sections[key]['name'].lower() == z.lower(): # Well one really should know how to write elemental
235
- z_out = int(key)
236
- return z_out
237
-
238
-
239
- def list_all_edges(z):
240
- element = str(z)
241
- x_sections = get_x_sections()
242
- print('Major edges')
243
- for key in all_edges:
244
- if key in x_sections[element]:
245
- if 'onset' in x_sections[element][key]:
246
- print(f" {x_sections[element]['name']}-{key}: {x_sections[element][key]['onset']:8.1f} eV ")
247
-
248
-
249
- def find_major_edges(edge_onset, maximal_chemical_shift=5):
250
- text = ''
251
- x_sections = get_x_sections()
252
- for element in x_sections:
253
- for key in x_sections[element]:
254
-
255
- # if isinstance(x_sections[element][key], dict):
256
- if key in major_edges:
257
-
258
- if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
259
- # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
260
- text = text + f"\n {x_sections[element]['name']:2s}-{key}: " \
261
- f"{x_sections[element][key]['onset']:8.1f} eV "
262
-
263
- return text
264
-
265
-
266
- def find_all_edges(edge_onset, maximal_chemical_shift=5):
267
- text = ''
268
- x_sections = get_x_sections()
269
- for element in x_sections:
270
- for key in x_sections[element]:
271
-
272
- if isinstance(x_sections[element][key], dict):
273
- if 'onset' in x_sections[element][key]:
274
- if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
275
- # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
276
- text = text + f"\n {x_sections[element]['name']:2s}-{key}: " \
277
- f"{x_sections[element][key]['onset']:8.1f} eV "
278
- return text
279
-
280
-
281
- def second_derivative(dataset, sensitivity):
282
- dim = ft.get_dimensions_by_type('spectral', dataset)
283
- energy_scale = np.array(dim[0][1])
284
- if dataset.data_type.name == 'SPECTRAL_IMAGE':
285
- spectrum = dataset.view.get_spectrum()
286
- else:
287
- spectrum = np.array(dataset)
288
-
289
- spec = scipy.ndimage.gaussian_filter(spectrum, 3)
290
-
291
- dispersion = ft.get_slope(energy_scale)
292
- second_dif = np.roll(spec, -3) - 2 * spec + np.roll(spec, +3)
293
- second_dif[:3] = 0
294
- second_dif[-3:] = 0
295
-
296
- # find if there is a strong edge at high energy_scale
297
- noise_level = 2. * np.std(second_dif[3:50])
298
- [indices, _] = scipy.signal.find_peaks(second_dif, noise_level)
299
- width = 50 / dispersion
300
- if width < 50:
301
- width = 50
302
- start_end_noise = int(len(energy_scale) - width)
303
- for index in indices[::-1]:
304
- if index > start_end_noise:
305
- start_end_noise = index - 70
306
-
307
- noise_level_start = sensitivity * np.std(second_dif[3:50])
308
- noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
309
- slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
310
- noise_level = noise_level_start + np.arange(len(energy_scale)) * slope
311
- return second_dif, noise_level
312
-
313
-
314
- def find_edges(dataset, sensitivity=2.5):
315
- dim = ft.get_dimensions_by_type('spectral', dataset)
316
- energy_scale = np.array(dim[0][1])
317
-
318
- second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
319
-
320
- [indices, peaks] = scipy.signal.find_peaks(second_dif, noise_level)
321
-
322
- peaks['peak_positions'] = energy_scale[indices]
323
- peaks['peak_indices'] = indices
324
- edge_energies = [energy_scale[50]]
325
- edge_indices = []
326
-
327
- [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
328
- minima = energy_scale[indices]
329
-
330
- for peak_number in range(len(peaks['peak_positions'])):
331
- position = peaks['peak_positions'][peak_number]
332
- if position - edge_energies[-1] > 20:
333
- impossible = minima[minima < position]
334
- impossible = impossible[impossible > position - 5]
335
- if len(impossible) == 0:
336
- possible = minima[minima > position]
337
- possible = possible[possible < position + 5]
338
- if len(possible) > 0:
339
- edge_energies.append((position + possible[0])/2)
340
- edge_indices.append(np.searchsorted(energy_scale, (position + possible[0])/2))
341
-
342
- selected_edges = []
343
- for peak in edge_indices:
344
- if 525 < energy_scale[peak] < 533:
345
- selected_edges.append('O-K1')
346
- else:
347
- selected_edge = ''
348
- edges = find_major_edges(energy_scale[peak], 20)
349
- edges = edges.split('\n')
350
- minimum_dist = 100.
351
- for edge in edges[1:]:
352
- edge = edge[:-3].split(':')
353
- name = edge[0].strip()
354
- energy = float(edge[1].strip())
355
- if np.abs(energy - energy_scale[peak]) < minimum_dist:
356
- minimum_dist = np.abs(energy - energy_scale[peak])
357
- selected_edge = name
358
-
359
- if selected_edge != '':
360
- selected_edges.append(selected_edge)
361
-
362
- return selected_edges
363
-
364
-
365
- def make_edges(edges_present, energy_scale, e_0, coll_angle):
366
- """
367
- Makes the edges dictionary
368
- """
369
- x_sections = get_x_sections()
370
- edges = {}
371
- for i, edge in enumerate(edges_present):
372
- element, symmetry = edge.split('-')
373
- z = 0
374
- for key in x_sections:
375
- if element == x_sections[key]['name']:
376
- z = int(key)
377
- edges[i] = {}
378
- edges[i]['z'] = z
379
- edges[i]['symmetry'] = symmetry
380
- edges[i]['element'] = element
381
-
382
- for key in edges:
383
- xsec = x_sections[str(edges[key]['z'])]
384
- if 'chemical_shift' not in edges[key]:
385
- edges[key]['chemical_shift'] = 0
386
- if 'symmetry' not in edges[key]:
387
- edges[key]['symmetry'] = 'K1'
388
- if 'K' in edges[key]['symmetry']:
389
- edges[key]['symmetry'] = 'K1'
390
- elif 'L' in edges[key]['symmetry']:
391
- edges[key]['symmetry'] = 'L3'
392
- elif 'M' in edges[key]['symmetry']:
393
- edges[key]['symmetry'] = 'M5'
394
- else:
395
- edges[key]['symmetry'] = edges[key]['symmetry'][0:2]
396
-
397
- edges[key]['original_onset'] = xsec[edges[key]['symmetry']]['onset']
398
- edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
399
- edges[key]['start_exclude'] = edges[key]['onset'] - xsec[edges[key]['symmetry']]['excl before']
400
- edges[key]['end_exclude'] = edges[key]['onset'] + xsec[edges[key]['symmetry']]['excl after']
401
-
402
- edges = make_cross_sections(edges, energy_scale, e_0, coll_angle)
403
-
404
- return edges
405
-
406
-
407
- def make_cross_sections(edges, energy_scale, e_0, coll_angle):
408
- """
409
- Updates the edges dictionary with collection angle-integrated X-ray photo-absorption cross-sections
410
- """
411
- for key in edges:
412
- if key.isdigit():
413
- edges[key]['data'] = xsec_xrpa(energy_scale, e_0 / 1000., edges[key]['Z'], coll_angle,
414
- edges[key]['chemical_shift']) / 1e10 # from barnes to 1/nm^2
415
- edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
416
- edges[key]['X_section_type'] = 'XRPA'
417
- edges[key]['X_section_source'] = 'pyTEMlib'
418
-
419
- return edges
420
-
421
-
422
- def power_law(energy, a, r):
423
- """
424
- power law
425
- """
426
- return a * np.power(energy, -r)
427
-
428
-
429
- def power_law_background(spectrum, energy_scale, fit_area, verbose=False):
430
- # Determine energy window for background fit in pixels
431
-
432
- startx = np.searchsorted(energy_scale, fit_area[0])
433
- endx = np.searchsorted(energy_scale, fit_area[1])
434
-
435
- x = np.array(energy_scale)[startx:endx]
436
-
437
- y = np.array(spectrum)[startx:endx].flatten()
438
-
439
- # Initial values of parameters
440
- p0 = np.array([1.0E+20, 3])
441
-
442
- # background fitting
443
- def bgdfit(pp, yy, xx):
444
- err = yy - power_law(xx, pp[0], pp[1])
445
- return err
446
-
447
- [p, _] = leastsq(bgdfit, p0, args=(y, x), maxfev=2000)
448
-
449
- background_difference = y - power_law(x, p[0], p[1])
450
- background_noise_level = std_dev = np.std(background_difference)
451
- if verbose:
452
- print(f'Power-law background with amplitude A: {p[0]:.1f} and exponent -r: {p[1]:.2f}')
453
- print(background_difference.max() / background_noise_level)
454
-
455
- print(f'Noise level in spectrum {std_dev:.3f} counts')
456
-
457
- # Calculate background over the whole energy scale
458
- background = power_law(energy_scale, p[0], p[1])
459
- return background, p
460
-
461
-
462
- def cl_model(x, p, number_of_edges, xsec):
463
- y = (p[9] * np.power(x, (-p[10]))) + p[7] * x + p[8] * x * x
464
- for i in range(number_of_edges):
465
- y = y + p[i] * xsec[i, :]
466
- return y
467
-
468
-
469
- def fit_edges2(spectrum, energy_scale, edges):
470
- dispersion = energy_scale[1] - energy_scale[0]
471
- # Determine fitting ranges and masks to exclude ranges
472
- mask = np.ones(len(spectrum))
473
-
474
- background_fit_start = edges['fit_area']['fit_start']
475
- if edges['fit_area']['fit_end'] > energy_scale[-1]:
476
- edges['fit_area']['fit_end'] = energy_scale[-1]
477
- background_fit_end = edges['fit_area']['fit_end']
478
-
479
- startx = np.searchsorted(energy_scale, background_fit_start)
480
- endx = np.searchsorted(energy_scale, background_fit_end)
481
- mask[0:startx] = 0.0
482
- mask[endx:-1] = 0.0
483
- for key in edges:
484
- if key.isdigit():
485
- if edges[key]['start_exclude'] > background_fit_start + dispersion:
486
- if edges[key]['start_exclude'] < background_fit_end - dispersion * 2:
487
- if edges[key]['end_exclude'] > background_fit_end - dispersion:
488
- # we need at least one channel to fit.
489
- edges[key]['end_exclude'] = background_fit_end - dispersion
490
- startx = np.searchsorted(energy_scale, edges[key]['start_exclude'])
491
- if startx < 2:
492
- startx = 1
493
- endx = np.searchsorted(energy_scale, edges[key]['end_exclude'])
494
- mask[startx: endx] = 0.0
495
-
496
- ########################
497
- # Background Fit
498
- ########################
499
- bgd_fit_area = [background_fit_start, background_fit_end]
500
- background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
501
-
502
- #######################
503
- # Edge Fit
504
- #######################
505
- x = energy_scale
506
- blurred = gaussian_filter(spectrum, sigma=5)
507
-
508
- y = blurred # now in probability
509
- y[np.where(y < 1e-8)] = 1e-8
510
-
511
- xsec = []
512
- number_of_edges = 0
513
- for key in edges:
514
- if key.isdigit():
515
- xsec.append(edges[key]['data'])
516
- number_of_edges += 1
517
- xsec = np.array(xsec)
518
-
519
- def model(xx, pp):
520
- yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
521
- for i in range(number_of_edges):
522
- pp[i] = np.abs(pp[i])
523
- yy = yy + pp[i] * xsec[i, :]
524
- return yy
525
-
526
- def residuals(pp, xx, yy):
527
- err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
528
- return err
529
-
530
- scale = y[100]
531
- pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
532
- [p, _] = leastsq(residuals, pin, args=(x, y))
533
-
534
- for key in edges:
535
- if key.isdigit():
536
- edges[key]['areal_density'] = p[int(key)]
537
-
538
- edges['model'] = {}
539
- edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
540
- edges['model']['background-poly_0'] = p[6]
541
- edges['model']['background-poly_1'] = p[7]
542
- edges['model']['background-poly_2'] = p[8]
543
- edges['model']['background-A'] = A
544
- edges['model']['background-r'] = r
545
- edges['model']['spectrum'] = model(x, p)
546
- edges['model']['blurred'] = blurred
547
- edges['model']['mask'] = mask
548
- edges['model']['fit_parameter'] = p
549
- edges['model']['fit_area_start'] = edges['fit_area']['fit_start']
550
- edges['model']['fit_area_end'] = edges['fit_area']['fit_end']
551
-
552
- return edges
553
-
554
-
555
- def fit_edges(spectrum, energy_scale, region_tags, edges):
556
- # Determine fitting ranges and masks to exclude ranges
557
- mask = np.ones(len(spectrum))
558
-
559
- background_fit_end = energy_scale[-1]
560
- for key in region_tags:
561
- end = region_tags[key]['start_x'] + region_tags[key]['width_x']
562
-
563
- startx = np.searchsorted(energy_scale, region_tags[key]['start_x'])
564
- endx = np.searchsorted(energy_scale, end)
565
-
566
- if key == 'fit_area':
567
- mask[0:startx] = 0.0
568
- mask[endx:-1] = 0.0
569
- else:
570
- mask[startx:endx] = 0.0
571
- if region_tags[key]['start_x'] < background_fit_end: # Which is the onset of the first edge?
572
- background_fit_end = region_tags[key]['start_x']
573
-
574
- ########################
575
- # Background Fit
576
- ########################
577
- bgd_fit_area = [region_tags['fit_area']['start_x'], background_fit_end]
578
- background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
579
-
580
- #######################
581
- # Edge Fit
582
- #######################
583
- x = energy_scale
584
- blurred = gaussian_filter(spectrum, sigma=5)
585
-
586
- y = blurred # now in probability
587
- y[np.where(y < 1e-8)] = 1e-8
588
-
589
- xsec = []
590
- number_of_edges = 0
591
- for key in edges:
592
- if key.isdigit():
593
- xsec.append(edges[key]['data'])
594
- number_of_edges += 1
595
- xsec = np.array(xsec)
596
-
597
- def model(xx, pp):
598
- yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
599
- for i in range(number_of_edges):
600
- pp[i] = np.abs(pp[i])
601
- yy = yy + pp[i] * xsec[i, :]
602
- return yy
603
-
604
- def residuals(pp, xx, yy):
605
- err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
606
- return err
607
-
608
- scale = y[100]
609
- pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
610
- [p, _] = leastsq(residuals, pin, args=(x, y))
611
-
612
- for key in edges:
613
- if key.isdigit():
614
- edges[key]['areal_density'] = p[int(key) - 1]
615
-
616
- edges['model'] = {}
617
- edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
618
- edges['model']['background-poly_0'] = p[6]
619
- edges['model']['background-poly_1'] = p[7]
620
- edges['model']['background-poly_2'] = p[8]
621
- edges['model']['background-A'] = A
622
- edges['model']['background-r'] = r
623
- edges['model']['spectrum'] = model(x, p)
624
- edges['model']['blurred'] = blurred
625
- edges['model']['mask'] = mask
626
- edges['model']['fit_parameter'] = p
627
- edges['model']['fit_area_start'] = region_tags['fit_area']['start_x']
628
- edges['model']['fit_area_end'] = region_tags['fit_area']['start_x'] + region_tags['fit_area']['width_x']
629
-
630
- return edges
631
-
632
-
633
- def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
634
- if dataset.data_type.name == 'SPECTRAL_IMAGE':
635
- spectrum = dataset.view.get_spectrum()
636
- else:
637
- spectrum = np.array(dataset)
638
-
639
- spec_dim = ft.get_dimensions_by_type('SPECTRAL', dataset)[0]
640
- energy_scale = np.array(spec_dim[1])
641
-
642
- second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
643
- [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
644
-
645
- start_channel = np.searchsorted(energy_scale, fit_start)
646
- end_channel = np.searchsorted(energy_scale, fit_end)
647
- peaks = []
648
- for index in indices:
649
- if start_channel < index < end_channel:
650
- peaks.append(index - start_channel)
651
-
652
- if energy_scale[0] > 0:
653
- if 'edges' not in dataset.metadata:
654
- return
655
- if 'model' not in dataset.metadata['edges']:
656
- return
657
- model = dataset.metadata['edges']['model']['spectrum'][start_channel:end_channel]
658
-
659
- else:
660
- model = np.zeros(end_channel - start_channel)
661
-
662
- energy_scale = energy_scale[start_channel:end_channel]
663
-
664
- difference = np.array(spectrum)[start_channel:end_channel] - model
665
- fit = np.zeros(len(energy_scale))
666
- if len(peaks) > 0:
667
- p_in = np.ravel([[energy_scale[i], difference[i], .7] for i in peaks])
668
- [p_out, _] = scipy.optimize.leastsq(residuals_smooth, p_in, ftol=1e-3, args=(energy_scale,
669
- difference,
670
- False))
671
- fit = fit + model_smooth(energy_scale, p_out, False)
672
-
673
- peak_model = np.zeros(len(spec_dim[1]))
674
- peak_model[start_channel:end_channel] = fit
675
-
676
- return peak_model, p_out
677
-
678
-
679
- def find_maxima(y, number_of_peaks):
680
- """
681
- find the first most prominent peaks
682
- peaks are then sorted by energy
683
-
684
- input:
685
- y: array of (part) of spectrum
686
- number_of_peaks: int
687
- output:
688
- array of indices of peaks
689
- """
690
- blurred2 = gaussian_filter(y, sigma=2)
691
- peaks, _ = scipy.signal.find_peaks(blurred2)
692
- prominences = peak_prominences(blurred2, peaks)[0]
693
- prominences_sorted = np.argsort(prominences)
694
- peaks = peaks[prominences_sorted[-number_of_peaks:]]
695
-
696
- peak_indices = np.argsort(peaks)
697
- return peaks[peak_indices]
698
-
699
-
700
- def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
701
- """
702
- area = np.sqrt(2* np.pi)* p[1] * np.abs(p[2] / 2.3548)
703
- FWHM = 2 * np.sqrt(2 np.log(2)) * sigma = 2.3548 * sigma
704
- sigma = FWHM/3548
705
- """
706
- if p[2] == 0:
707
- return x * 0.
708
- else:
709
- return p[1] * np.exp(-(x - p[0]) ** 2 / (2.0 * (p[2] / 2.3548) ** 2))
710
-
711
-
712
- def lorentz(x, p):
713
- lorentz_peak = 0.5 * p[2] / np.pi / ((x - p[0]) ** 2 + (p[2] / 2) ** 2)
714
- return p[1] * lorentz_peak / lorentz_peak.max()
715
-
716
-
717
- def zl(x, p, p_zl):
718
- p_zl_local = p_zl.copy()
719
- p_zl_local[2] += p[0]
720
- p_zl_local[5] += p[0]
721
- zero_loss = zl_func(p_zl_local, x)
722
- return p[1] * zero_loss / zero_loss.max()
723
-
724
-
725
- def model3(x, p, number_of_peaks, peak_shape, p_zl, pin=None, restrict_pos=0, restrict_width=0):
726
- if pin is None:
727
- pin = p
728
-
729
- # if len([restrict_pos]) == 1:
730
- # restrict_pos = [restrict_pos]*number_of_peaks
731
- # if len([restrict_width]) == 1:
732
- # restrict_width = [restrict_width]*number_of_peaks
733
- y = np.zeros(len(x))
734
-
735
- for i in range(number_of_peaks):
736
- index = int(i * 3)
737
- if restrict_pos > 0:
738
- if p[index] > pin[index] * (1.0 + restrict_pos):
739
- p[index] = pin[index] * (1.0 + restrict_pos)
740
- if p[index] < pin[index] * (1.0 - restrict_pos):
741
- p[index] = pin[index] * (1.0 - restrict_pos)
742
-
743
- p[index + 1] = abs(p[index + 1])
744
- # print(p[index + 1])
745
- p[index + 2] = abs(p[index + 2])
746
- if restrict_width > 0:
747
- if p[index + 2] > pin[index + 2] * (1.0 + restrict_width):
748
- p[index + 2] = pin[index + 2] * (1.0 + restrict_width)
749
-
750
- if peak_shape[i] == 'Lorentzian':
751
- y = y + lorentz(x, p[index:])
752
- elif peak_shape[i] == 'zl':
753
-
754
- y = y + zl(x, p[index:], p_zl)
755
- else:
756
- y = y + gauss(x, p[index:])
757
- return y
758
-
759
-
760
- def sort_peaks(p, peak_shape):
761
- number_of_peaks = int(len(p) / 3)
762
- p3 = np.reshape(p, (number_of_peaks, 3))
763
- sort_pin = np.argsort(p3[:, 0])
764
-
765
- p = p3[sort_pin].flatten()
766
- peak_shape = np.array(peak_shape)[sort_pin].tolist()
767
-
768
- return p, peak_shape
769
-
770
-
771
- def add_peaks(x, y, peaks, pin_in=None, peak_shape_in=None, shape='Gaussian'):
772
- if pin_in is None:
773
- return
774
- if peak_shape_in is None:
775
- return
776
-
777
- pin = pin_in.copy()
778
-
779
- peak_shape = peak_shape_in.copy()
780
- if isinstance(shape, str): # if peak_shape is only a string make a list of it.
781
- shape = [shape]
782
-
783
- if len(shape) == 1:
784
- shape = shape * len(peaks)
785
- for i, peak in enumerate(peaks):
786
- pin.append(x[peak])
787
- pin.append(y[peak])
788
- pin.append(.3)
789
- peak_shape.append(shape[i])
790
-
791
- return pin, peak_shape
792
-
793
-
794
- def fit_model(x, y, pin, number_of_peaks, peak_shape, p_zl, restrict_pos=0, restrict_width=0):
795
- pin_original = pin.copy()
796
-
797
- def residuals3(pp, xx, yy):
798
- err = (yy - model3(xx, pp, number_of_peaks, peak_shape, p_zl, pin_original, restrict_pos,
799
- restrict_width)) / np.sqrt(np.abs(yy))
800
- return err
801
-
802
- [p, _] = leastsq(residuals3, pin, args=(x, y))
803
- # p2 = p.tolist()
804
- # p3 = np.reshape(p2, (number_of_peaks, 3))
805
- # sort_pin = np.argsort(p3[:, 0])
806
-
807
- # p = p3[sort_pin].flatten()
808
- # peak_shape = np.array(peak_shape)[sort_pin].tolist()
809
-
810
- return p, peak_shape
811
-
812
-
813
- def fix_energy_scale(spec, energy):
814
- # determine start and end fitting region in pixels
815
- start = np.searchsorted(energy, -10)
816
- end = np.searchsorted(energy, 10)
817
- startx = np.argmax(spec[start:end]) + start
818
-
819
- end = startx + 3
820
- start = startx - 3
821
- for i in range(10):
822
- if spec[startx - i] < 0.3 * spec[startx]:
823
- start = startx - i
824
- if spec[startx + i] < 0.3 * spec[startx]:
825
- end = startx + i
826
- if end - start < 3:
827
- end = startx + 2
828
- start = startx - 2
829
-
830
- x = np.array(energy[start:end])
831
- y = np.array(spec[start:end]).copy()
832
-
833
- y[np.nonzero(y <= 0)] = 1e-12
834
-
835
- p0 = [energy[startx], 1000.0, (energy[end] - energy[start]) / 3.] # Initial guess is a normal distribution
836
-
837
- def errfunc(pp, xx, yy):
838
- return (gauss(xx, pp) - yy) / np.sqrt(yy) # Distance to the target function
839
-
840
- [p1, _] = leastsq(errfunc, np.array(p0[:]), args=(x, y))
841
- fit_mu, area, fwhm = p1
842
-
843
- return fwhm, fit_mu
844
-
845
-
846
- def resolution_function(energy_scale, spectrum, width, verbose=False):
847
- guess = [0.2, 1000, 0.02, 0.2, 1000, 0.2]
848
- p0 = np.array(guess)
849
-
850
- start = np.searchsorted(energy_scale, -width / 2.)
851
- end = np.searchsorted(energy_scale, width / 2.)
852
- x = energy_scale[start:end]
853
- y = spectrum[start:end]
854
-
855
- def zl2(pp, yy, xx):
856
- eerr = (yy - zl_func(pp, xx)) # /np.sqrt(y)
857
- return eerr
858
-
859
- def zl_restrict(pp, yy, xx):
860
-
861
- if pp[2] > xx[-1] * .8:
862
- pp[2] = xx[-1] * .8
863
- if pp[2] < xx[0] * .8:
864
- pp[2] = xx[0] * .8
865
-
866
- if pp[5] > xx[-1] * .8:
867
- pp[5] = xx[-1] * .8
868
- if pp[5] < x[0] * .8:
869
- pp[5] = xx[0] * .8
870
-
871
- if len(pp) > 6:
872
- pp[7] = abs(pp[7])
873
- if abs(pp[7]) > (pp[1] + pp[4]) / 10:
874
- pp[7] = abs(pp[1] + pp[4]) / 10
875
- if abs(pp[8]) > 1:
876
- pp[8] = pp[8] / abs(pp[8])
877
- pp[6] = abs(pp[6])
878
- pp[9] = abs(pp[9])
879
-
880
- pp[0] = abs(pp[0])
881
- pp[3] = abs(pp[3])
882
- if pp[0] > (xx[-1] - xx[0]) / 2.0:
883
- pp[0] = xx[-1] - xx[0] / 2.0
884
- if pp[3] > (xx[-1] - xx[0]) / 2.0:
885
- pp[3] = xx[-1] - xx[0] / 2.0
886
-
887
- yy[yy < 0] = 0. # no negative numbers in sqrt below
888
- eerr = (yy - zl_func(pp, xx)) / np.sqrt(yy)
889
-
890
- return eerr
891
-
892
- [p_zl, _] = leastsq(zl2, p0, args=(y, x), maxfev=2000)
893
- if verbose:
894
- print('Fit of a Product of two Lorentzians')
895
- print('Positions: ', p_zl[2], p_zl[5], 'Distance: ', p_zl[2] - p_zl[5])
896
- print('Width: ', p_zl[0], p_zl[3])
897
- print('Areas: ', p_zl[1], p_zl[4])
898
- err = (y - zl_func(p_zl, x)) / np.sqrt(y)
899
- print(f'Goodness of Fit: {sum(err ** 2) / len(y) / sum(y) * 1e2:.5}%')
900
-
901
- z_loss = zl_func(p_zl, energy_scale)
902
-
903
- return z_loss, p_zl
904
-
905
-
906
- def get_energy_shifts(spectrum_image, energy_scale, zero_loss_fit_width):
907
- shifts = np.zeros(spectrum_image.shape[0:2])
908
- for x in range(spectrum_image.shape[0]):
909
- for y in range(spectrum_image.shape[1]):
910
- spectrum = spectrum_image[x, y, :]
911
- fwhm, delta_e = fix_energy_scale(spectrum, energy_scale)
912
- z_loss, p_zl = resolution_function(energy_scale - delta_e, spectrum, zero_loss_fit_width)
913
- fwhm2, delta_e2 = fix_energy_scale(z_loss, energy_scale - delta_e)
914
- shifts[x, y] = delta_e + delta_e2
915
- return shifts
916
-
917
-
918
- def shift_on_same_scale(spectrum_image, shift, energy_scale, master_energy_scale):
919
- new_si = np.zeros(spectrum_image.shape)
920
- for x in range(spectrum_image.shape[0]):
921
- for y in range(spectrum_image.shape[1]):
922
- tck = interpolate.splrep(energy_scale - shift[x, y], spectrum_image[x, y, :], k=1, s=0)
923
- new_si[x, y, :] = interpolate.splev(master_energy_scale, tck, der=0)
924
- return new_si
925
-
926
-
927
- def get_wave_length(e0):
928
- ev = constants.e * e0
929
- return constants.h / np.sqrt(2 * constants.m_e * ev * (1 + ev / (2 * constants.m_e * constants.c ** 2)))
930
-
931
-
932
- def drude(ep, eb, gamma, e):
933
- eps = 1 - (ep ** 2 - eb * e * 1j) / (e ** 2 + 2 * e * gamma * 1j) # Mod drude term
934
- return eps
935
-
936
-
937
- def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
938
- eps = eps_inf
939
- for i in range(leng):
940
- eps = eps + amplitude[i] * (1 / (e + ep[i] + gamma[i] * 1j) - 1 / (e - ep[i] + gamma[i] * 1j))
941
- return eps
942
-
943
-
944
- def plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef=4., ep=16.8, es=0, ibt=[]):
945
- [x, y] = np.meshgrid(e_data + 1e-12, a_data[1024:2048] * 1000)
946
-
947
- z = plotdata
948
- lev = np.array([0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 3, 4, 4.9]) * max_p / 5
949
-
950
- wavelength = get_wave_length(ee)
951
- q = a_data[1024:2048] / (wavelength * 1e9) # in [1/nm]
952
- scale = np.array([0, a_data[-1], e_data[0], e_data[-1]])
953
- ev2hertz = constants.value('electron volt-hertz relationship')
954
-
955
- if units[0] == 'mrad':
956
- units[0] = 'scattering angle [mrad]'
957
- scale[1] = scale[1] * 1000.
958
- light_line = constants.c * a_data # for mrad
959
- elif units[0] == '1/nm':
960
- units[0] = 'scattering vector [1/nm]'
961
- scale[1] = scale[1] / (wavelength * 1e9)
962
- light_line = 1 / (constants.c / ev2hertz) * 1e-9
963
-
964
- if units[1] == 'eV':
965
- units[1] = 'energy loss [eV]'
966
-
967
- if units[2] == 'ppm':
968
- units[2] = 'probability [ppm]'
969
- if units[2] == '1/eV':
970
- units[2] = 'probability [eV$^{-1}$ srad$^{-1}$]'
971
-
972
- alpha = 3. / 5. * ef / ep
973
-
974
- ax2 = plt.gca()
975
- fig2 = plt.gcf()
976
- im = ax2.imshow(z.T, clim=(0, max_p), origin='lower', aspect='auto', extent=scale)
977
- co = ax2.contour(y, x, z, levels=lev, colors='k', origin='lower')
978
- # ,extent=(-ang*1000.,ang*1000.,e_data[0],e_data[-1]))#, vmin = p_vol.min(), vmax = 1000)
979
-
980
- fig2.colorbar(im, ax=ax2, label=units[2])
981
-
982
- ax2.plot(a_data, light_line, c='r', label='light line')
983
- # ax2.plot(e_data*light_line*np.sqrt(np.real(eps_data)),e_data, color='steelblue',
984
- # label='$\omega = c q \sqrt{\epsilon_2}$')
985
-
986
- # ax2.plot(q, Ep_disp, c='r')
987
- ax2.plot([11.5 * light_line, 0.12], [11.5, 11.5], c='r')
988
-
989
- ax2.text(.05, 11.7, 'surface plasmon', color='r')
990
- ax2.plot([0.0, 0.12], [16.8, 16.8], c='r')
991
- ax2.text(.05, 17, 'volume plasmon', color='r')
992
- ax2.set_xlim(0, scale[1])
993
- ax2.set_ylim(0, 20)
994
- # Interband transitions
995
- ax2.plot([0.0, 0.25], [4.2, 4.2], c='g', label='interband transitions')
996
- ax2.plot([0.0, 0.25], [5.2, 5.2], c='g')
997
- ax2.set_ylabel(units[1])
998
- ax2.set_xlabel(units[0])
999
- ax2.legend(loc='lower right')
1000
-
1001
-
1002
- def zl_func(p, x):
1003
- p[0] = abs(p[0])
1004
-
1005
- gauss1 = np.zeros(len(x))
1006
- gauss2 = np.zeros(len(x))
1007
- lorentz3 = np.zeros(len(x))
1008
- lorentz = ((0.5 * p[0] * p[1] / 3.14) / ((x - p[2]) ** 2 + ((p[0] / 2) ** 2)))
1009
- lorentz2 = ((0.5 * p[3] * p[4] / 3.14) / ((x - (p[5])) ** 2 + ((p[3] / 2) ** 2)))
1010
- if len(p) > 6:
1011
- lorentz3 = (0.5 * p[6] * p[7] / 3.14) / ((x - p[8]) ** 2 + (p[6] / 2) ** 2)
1012
- gauss2 = p[10] * np.exp(-(x - p[11]) ** 2 / (2.0 * (p[9] / 2.3548) ** 2))
1013
- # ((0.5 * p[9]* p[10]/3.14)/((x- (p[11]))**2+(( p[9]/2)**2)))
1014
- y = (lorentz * lorentz2) + gauss1 + gauss2 + lorentz3
1015
-
1016
- return y
1017
-
1018
-
1019
- def drude2(tags, e, p):
1020
- return drude(e, p[0], p[1], p[2], p[3])
1021
-
1022
-
1023
- def xsec_xrpa(energy_scale, e0, z, beta, shift=0):
1024
- """
1025
- Calculate momentum-integrated cross-section for EELS from X-ray photoaborption cross-sections.
1026
-
1027
- Input:
1028
- ------
1029
- energy_scale: energyscale of spectrum to be analyzed
1030
- e0: acceleration voltage in keV
1031
- z: atomic number of element
1032
- beta: effective collection angle in mrad
1033
- shift: chemical shift of edge in eV
1034
- """
1035
- beta = beta * 0.001 # collection half angle theta [rad]
1036
- # thetamax = self.parent.spec[0].convAngle * 0.001 # collection half angle theta [rad]
1037
- dispersion = energy_scale[1] - energy_scale[0]
1038
-
1039
- x_sections = get_x_sections(z)
1040
- enexs = x_sections['ene']
1041
- datxs = x_sections['dat']
1042
-
1043
- #####
1044
- # Cross Section according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
1045
- #####
1046
-
1047
- # Relativistic correction factors
1048
- t = 511060.0 * (1.0 - 1.0 / (1.0 + e0 / 511.06) ** 2) / 2.0
1049
- gamma = 1 + e0 / 511.06
1050
- a = 6.5 # e-14 *10**14
1051
- b = beta
1052
-
1053
- theta_e = enexs / (2 * gamma * t)
1054
-
1055
- g = 2 * np.log(gamma) - np.log((b ** 2 + theta_e ** 2) / (b ** 2 + theta_e ** 2 / gamma ** 2)) - (
1056
- gamma - 1) * b ** 2 / (b ** 2 + theta_e ** 2 / gamma ** 2)
1057
- datxs = datxs * (a / enexs / t) * (np.log(1 + b ** 2 / theta_e ** 2) + g) / 1e8
1058
-
1059
- datxs = datxs * dispersion # from per eV to per dispersion
1060
- coeff = splrep(enexs, datxs, s=0) # now in areal density atoms / m^2
1061
- xsec = np.zeros(len(energy_scale))
1062
- # shift = 0# int(ek -onsetXRPS)#/dispersion
1063
- lin = interp1d(enexs, datxs, kind='linear') # Linear instead of spline interpolation to avoid oscillations.
1064
- xsec = lin(energy_scale - shift)
1065
-
1066
- return xsec
1067
-
1068
-
1069
- def drude_simulation(dset, e, ep, ew, tnm, eb):
1070
- """
1071
- Gives probabilities of dielectric function eps relative to zero-loss integral (i0 = 1) per eV
1072
- Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011)
1073
-
1074
- # function drude(ep,ew,eb,epc,e0,beta,nn,tnm)
1075
- # Given the plasmon energy (ep), plasmon fwhm (ew) and binding energy(eb),
1076
- # this program generates:
1077
- # EPS1, EPS2 from modified Eq. (3.40), ELF=Im(-1/EPS) from Eq. (3.42),
1078
- # single scattering from Eq. (4.26) and SRFINT from Eq. (4.31)
1079
- # The output is e, ssd into the file drude.ssd (for use in Flog etc.)
1080
- # and e,eps1 ,eps2 into drude.eps (for use in Kroeger etc.)
1081
- # Gives probabilities relative to zero-loss integral (i0 = 1) per eV
1082
- # Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011)
1083
- # Version 10.11.26
1084
-
1085
-
1086
- b.7 drude Simulation of a Low-Loss Spectrum
1087
- The program DRUDE calculates a single-scattering plasmon-loss spectrum for
1088
- a specimen of a given thickness tnm (in nm), recorded with electrons of a
1089
- specified incident energy e0 by a spectrometer that accepts scattering up to a
1090
- specified collection semi-angle beta. It is based on the extended drude model
1091
- (Section 3.3.2), with a volume energy-loss function elf in accord with Eq. (3.64) and
1092
- a surface-scattering energy-loss function srelf as in Eq. (4.31). Retardation effects
1093
- and coupling between the two surface modes are not included. The surface term can
1094
- be made negligible by entering a large specimen thickness (tnm > 1000).
1095
- Surface intensity srfint and volume intensity volint are calculated from
1096
- Eqs. (4.31) and (4.26), respectively. The total spectral intensity ssd is written to
1097
- the file DRUDE.SSD, which can be used as input for KRAKRO. These intensities are
1098
- all divided by i0, to give relative probabilities (per eV). The real and imaginary parts
1099
- of the dielectric function are written to DRUDE.EPS and can be used for comparison
1100
- with the results of Kramers–Kronig analysis (KRAKRO.DAT).
1101
- Written output includes the surface-loss probability Ps, obtained by integrating
1102
- srfint (a value that relates to two surfaces but includes the negative begrenzungs
1103
- term), for comparison with the analytical integration represented by Eq. (3.77). The
1104
- volume-loss probability p_v is obtained by integrating volint and is used to calculate
1105
- the volume plasmon mean free path (lam = tnm/p_v). The latter is listed and
1106
- compared with the MFP obtained from Eq. (3.44), which represents analytical integration
1107
- assuming a zero-width plasmon peak. The total probability (Pt = p_v+Ps) is
1108
- calculated and used to evaluate the thickness (lam.Pt) that would be given by the formula
1109
- t/λ = ln(It/i0), ignoring the surface-loss probability. Note that p_v will exceed
1110
- 1 for thicker specimens (t/λ > 1), since it represents the probability of plasmon
1111
- scattering relative to that of no inelastic scattering.
1112
- The command-line usage is drude(ep,ew,eb,epc,beta,e0,tnm,nn), where ep is the
1113
- plasmon energy, ew the plasmon width, eb the binding energy of the electrons (0 for
1114
- a metal), and nn is the number of channels in the output spectrum. An example of
1115
- the output is shown in Fig. b.1a,b.
1116
-
1117
- """
1118
- epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
1119
- e0 = dset.metadata['acceleration_voltage'] / 1000. # input('incident energy e0(kev) : ');
1120
- # tnm = input('thickness(nm) : ');
1121
-
1122
- b = dset.metadata['collection_angle'] # rad
1123
- epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
1124
- e0 = dset.metadata['acceleration_voltage'] / 1000. # input('incident energy e0(kev) : ');
1125
-
1126
- t = 1000.0 * e0 * (1. + e0 / 1022.12) / (1.0 + e0 / 511.06) ** 2 # eV # equ.5.2a or Appendix E p 427
1127
- tgt = 1000 * e0 * (1022.12 + e0) / (511.06 + e0) # eV Appendix E p 427
1128
- rk0 = 2590 * (1.0 + e0 / 511.06) * np.sqrt(2.0 * t / 511060)
1129
- os = e[0]
1130
- ew_mod = eb
1131
- tags = dset.metadata
1132
- # eps = 1 - ep**2/(e**2-eb**2+2*e*ew*1j) # eq 3.64
1133
- # eps = 1 - ep**2/(e**2+2*e*ew*1j) # eq 3.64
1134
- # eps = 1 - (ep**2)/(e**2+e*ew*1j-ep**2) # Lorentz Term
1135
- eps = 1 - (ep ** 2 - ew_mod * e * 1j) / (e ** 2 + 2 * e * ew * 1j) # Mod drude term
1136
- # eps1 = np.real(eps);
1137
- # eps2 = np.imag(eps);
1138
- # eps1 = 1. - ep.^2./(e.^2+ew.^2); #eq 3.40
1139
- # eps2 = ew.*ep.^2./e./(e.^2+ew.^2);#eq 3.40
1140
- # elf = ep**2*e*ew/((e**2-ep**2)**2+(e*ew)**2); # eq 3.40?
1141
- eps[np.nonzero(eps == 0.0)] = 1e-19
1142
- elf = np.imag(-1 / eps)
1143
-
1144
- the = e / tgt # varies with energy loss! # Appendix E p 427
1145
- # srfelf = 4..*eps2./((1+eps1).^2+eps2.^2) - elf; %equivalent
1146
- srfelf = np.imag(-4. / (1.0 + eps)) - elf # for 2 surfaces
1147
- angdep = np.arctan(b / the) / the - b / (b * b + the * the)
1148
- srfint = angdep * srfelf / (3.1416 * 0.05292 * rk0 * t) # probability per eV
1149
- anglog = np.log(1.0 + b * b / the / the)
1150
- i0 = tags['spec'].sum() # *tags['counts2e']
1151
- # print('counts2e',1/tags['counts2e'])
1152
-
1153
- # 2 * t = m_0 v**2 !!! a_0 = 0.05292 nm
1154
- volint = abs(tnm / (np.pi * 0.05292 * t * 2.0) * elf * anglog) # S equ 4.26% probability per eV
1155
- volint = volint * i0 / epc # S probability per channel
1156
- ssd = volint # + srfint;
1157
-
1158
- if os < -1.0:
1159
- xs = int(abs(-os / epc))
1160
-
1161
- ssd[0:xs] = 0.0
1162
- volint[0:xs] = 0.0
1163
- srfint[0:xs] = 0.0
1164
-
1165
- # if os <0:
1166
- p_s = np.trapz(e, srfint) # 2 surfaces but includes negative Begrenzung contribution.
1167
- p_v = abs(np.trapz(e, abs(volint / tags['spec'].sum()))) # integrated volume probability
1168
- p_v = (volint / i0).sum() # our data have he same epc and the trapez formula does not include
1169
- lam = tnm / p_v # does NOT depend on free-electron approximation (no damping).
1170
- lamfe = 4.0 * 0.05292 * t / ep / np.log(1 + (b * tgt / ep) ** 2) # Eq.(3.44) approximation
1171
-
1172
- # print('p_s(2surfaces+begrenzung terms) =', p_s, 'p_v=t/lambda(beta)= ',p_v,'\n');
1173
- # print('Volume-plasmon MFP(nm) = ', lam,' Free-electron MFP(nm) = ',lamfe,'\n');
1174
- # print('--------------------------------\n');
1175
-
1176
- tags['eps'] = eps
1177
- tags['lam'] = lam
1178
- tags['lamfe'] = lamfe
1179
- tags['p_v'] = p_v
1180
-
1181
- return ssd # /np.pi
1182
-
1183
-
1184
- def effective_collection_angle(energy_scale, alpha, beta, beam_kv):
1185
- """
1186
- Translate from original Fortran program
1187
- Calculates the effective collection angle in mrad:
1188
- Input:
1189
- - energy_scale : numpy array (first and last energy loss of spectrum in eV)
1190
- - alpha, beta: float (collection and convergence angle in mrad)
1191
- - beamKV: float (acceleration voltage in V)
1192
- Output:
1193
- - effective collection angle in mrad
1194
-
1195
- # function y = effbeta(ene, alpha, beta, beam_kv)
1196
- #
1197
- # This program computes etha(alpha,beta), that is the collection
1198
- # efficiency associated to the following geometry :
1199
- #
1200
- # alpha = half angle of illumination (0 -> pi/2)
1201
- # beta = half angle of collection (0 -> pi/2)
1202
- # (pi/2 = 1570.795 mrad)
1203
- #
1204
- # A constant angular distribution of incident electrons is assumed
1205
- # for any incident angle (-alpha,alpha). These electrons imping the
1206
- # target and a single energy-loss event occurs, with a characteristic
1207
- # angle theta-e (relativistic). The angular distribution of the
1208
- # electrons after the target is analytically derived.
1209
- # This program integrates this distribution from theta=0 up to
1210
- # theta=beta with an adjustable angular step.
1211
- # This program also computes beta* which is the theoretical
1212
- # collection angle which would give the same value of etha(alpha,beta)
1213
- # with a parallel incident beam.
1214
- #
1215
- # subroutines and function subprograms required
1216
- # ---------------------------------------------
1217
- # none
1218
- #
1219
- # comments
1220
- # --------
1221
- #
1222
- # The following parameters are asked as input :
1223
- # accelerating voltage (kV), energy loss range (eV) for the study,
1224
- # energy loss step (eV) in this range, alpha (mrad), beta (mrad).
1225
- # The program returns for each energy loss step :
1226
- # alpha (mrad), beta (mrad), theta-e (relativistic) (mrad),
1227
- # energy loss (eV), etha (#), beta * (mrad)
1228
- #
1229
- # author :
1230
- # --------
1231
- # Pierre TREBBIA
1232
- # US 41 : "Microscopie Electronique Analytique Quantitative"
1233
- # Laboratoire de Physique des Solides, Bat. 510
1234
- # Universite Paris-Sud, F91405 ORSAY Cedex
1235
- # Phone : (33-1) 69 41 53 68
1236
- #
1237
- # real*8 pi,zx,zi,z1,z2,z3,z4,z5,z6,z7,x0,x1,x2,x3,x4,x5,x6,x7,x8
1238
- # real*8 x9,x10,theta,dtheta,eta,etha2,beta
1239
- # np.logical pr
1240
- pi = np.pi # 3.14159;
1241
- # pr=.true. ! enable record of data
1242
- """
1243
- if beam_kv == 0:
1244
- beam_kv = 100.0
1245
-
1246
- if alpha == 0:
1247
- return beta
1248
-
1249
- if beta == 0:
1250
- return alpha
1251
-
1252
- z1 = beam_kv # eV
1253
- z2 = energy_scale[0]
1254
- z3 = energy_scale[-1]
1255
- z4 = 100.0
1256
-
1257
- z5 = alpha * 0.001 # rad
1258
- z6 = beta * 0.001 # rad
1259
- z7 = 500.0 # number of integration steps to be modified at will
1260
-
1261
- # main loop on energy loss
1262
- #
1263
- for zx in range(int(z2), int(z3), int(z4)): # ! zx = current energy loss
1264
- eta = 0.0
1265
- x0 = float(zx) * (z1 + 511060.) / (z1 * (z1 + 1022120.)) # x0 = relativistic theta-e
1266
- x1 = np.pi / (2. * x0)
1267
- x2 = x0 * x0 + z5 * z5
1268
- x3 = z5 / x0 * z5 / x0
1269
- x4 = 0.1 * np.sqrt(x2)
1270
- dtheta = (z6 - x4) / z7
1271
- #
1272
- # calculation of the analytical expression
1273
- #
1274
- for zi in range(1, int(z7)):
1275
- theta = x4 + dtheta * float(zi)
1276
- x5 = theta * theta
1277
- x6 = 4. * x5 * x0 * x0
1278
- x7 = x2 - x5
1279
- x8 = np.sqrt(x7 * x7 + x6)
1280
- x9 = (x8 + x7) / (2. * x0 * x0)
1281
- x10 = 2. * theta * dtheta * np.log(x9)
1282
- eta = eta + x10
1283
-
1284
- eta = eta + x2 / 100. * np.log(1. + x3) # addition of the central contribution
1285
- x4 = z5 * z5 * np.log(1. + x1 * x1) # normalisation
1286
- eta = eta / x4
1287
- #
1288
- # correction by geometrical factor (beta/alpha)**2
1289
- #
1290
- if z6 < z5:
1291
- x5 = z5 / z6
1292
- eta = eta * x5 * x5
1293
-
1294
- etha2 = eta * 100.
1295
- #
1296
- # calculation of beta *
1297
- #
1298
- x6 = np.power((1. + x1 * x1), eta)
1299
- x7 = x0 * np.sqrt(x6 - 1.)
1300
- beta = x7 * 1000. # in mrad
1301
-
1302
- return beta
1303
-
1304
-
1305
- def kroeger_core(e_data, a_data, eps_data, ee, thick, relativistic=True):
1306
- """
1307
-
1308
- This function calculates the differential scattering probability
1309
- .. math::
1310
- \\frac{d^2P}{d \\Omega d_e}
1311
- of the low-loss region for total loss and volume plasmon loss
1312
-
1313
- Args:
1314
- e_data (array): energy scale [eV]
1315
- a_data (array): angle or momentum range [rad]
1316
- eps_data (array): dielectric function data
1317
- ee (float): acceleration voltage [keV]
1318
- thick (float): thickness in m
1319
- relativistic: boolean include relativistic corrections
1320
-
1321
- Returns:
1322
- P (numpy array 2d): total loss probability
1323
- p_vol (numpy array 2d): volume loss probability
1324
- """
1325
-
1326
- # $d^2P/(dEd\Omega) = \frac{1}{\pi^2 a_0 m_0 v^2} \Im \left[ \frac{t\mu^2}{\varepsilon \phi^2 } \right] $ \
1327
-
1328
- # ee = 200 #keV
1329
- # thick = 32.0# nm
1330
- thick = thick * 1e-9 # input thickness now in m
1331
- # Define constants
1332
- # ec = 14.4;
1333
- m_0 = constants.value(u'electron mass') # REST electron mass in kg
1334
- # h = constants.Planck # Planck's constant
1335
- hbar = constants.hbar
1336
-
1337
- c = constants.speed_of_light # speed of light m/s
1338
- bohr = constants.value(u'Bohr radius') # Bohr radius in meters
1339
- e = constants.value(u'elementary charge') # electron charge in Coulomb
1340
- print('hbar =', hbar, ' [Js] =', hbar / e, '[ eV s]')
1341
-
1342
- # Calculate fixed terms of equation
1343
- va = 1 - (511. / (511. + ee)) ** 2 # ee is incident energy in keV
1344
- v = c * np.sqrt(va)
1345
- beta = v / c # non relativistic for =1
1346
-
1347
- if relativistic:
1348
- gamma = 1. / np.sqrt(1 - beta ** 2)
1349
- else:
1350
- gamma = 1 # set = 1 to correspond to E+B & Siegle
1351
-
1352
- momentum = m_0 * v * gamma # used for xya, E&B have no gamma
1353
-
1354
- # ##### Define mapped variables
1355
-
1356
- # Define independent variables E, theta
1357
- a_data = np.array(a_data)
1358
- e_data = np.array(e_data)
1359
- [energy, theta] = np.meshgrid(e_data + 1e-12, a_data)
1360
- # Define CONJUGATE dielectric function variable eps
1361
- [eps, _] = np.meshgrid(np.conj(eps_data), a_data)
1362
-
1363
- # ##### Calculate lambda in equation EB 2.3
1364
- theta2 = theta ** 2 + 1e-15
1365
- theta_e = energy * e / momentum / v
1366
- theta_e2 = theta_e ** 2
1367
-
1368
- lambda2 = theta2 - eps * theta_e2 * beta ** 2 # Eq 2.3
1369
-
1370
- lambd = np.sqrt(lambda2)
1371
- if (np.real(lambd) < 0).any():
1372
- print(' error negative lambda')
1373
-
1374
- # ##### Calculate lambda0 in equation EB 2.4
1375
- # According to Kröger real(lambda0) is defined as positive!
1376
-
1377
- phi2 = lambda2 + theta_e2 # Eq. 2.2
1378
- lambda02 = theta2 - theta_e2 * beta ** 2 # eta=1 Eq 2.4
1379
- lambda02[lambda02 < 0] = 0
1380
- lambda0 = np.sqrt(lambda02)
1381
- if not (np.real(lambda0) >= 0).any():
1382
- print(' error negative lambda0')
1383
-
1384
- de = thick * energy * e / 2.0 / hbar / v # Eq 2.5
1385
-
1386
- xya = lambd * de / theta_e # used in Eqs 2.6, 2.7, 4.4
1387
-
1388
- lplus = lambda0 * eps + lambd * np.tanh(xya) # eta=1 %Eq 2.6
1389
- lminus = lambda0 * eps + lambd / np.tanh(xya) # eta=1 %Eq 2.7
1390
-
1391
- mue2 = 1 - (eps * beta ** 2) # Eq. 4.5
1392
- phi20 = lambda02 + theta_e2 # Eq 4.6
1393
- phi201 = theta2 + theta_e2 * (1 - (eps + 1) * beta ** 2) # eta=1, eps-1 in E+B Eq.(4.7)
1394
-
1395
- # Eq 4.2
1396
- a1 = phi201 ** 2 / eps
1397
- a2 = np.sin(de) ** 2 / lplus + np.cos(de) ** 2 / lminus
1398
- a = a1 * a2
1399
-
1400
- # Eq 4.3
1401
- b1 = beta ** 2 * lambda0 * theta_e * phi201
1402
- b2 = (1. / lplus - 1. / lminus) * np.sin(2. * de)
1403
- b = b1 * b2
1404
-
1405
- # Eq 4.4
1406
- c1 = -beta ** 4 * lambda0 * lambd * theta_e2
1407
- c2 = np.cos(de) ** 2 * np.tanh(xya) / lplus
1408
- c3 = np.sin(de) ** 2 / np.tanh(xya) / lminus
1409
- c = c1 * (c2 + c3)
1410
-
1411
- # Put all the pieces together...
1412
- p_coef = e / (bohr * np.pi ** 2 * m_0 * v ** 2)
1413
-
1414
- p_v = thick * mue2 / eps / phi2
1415
-
1416
- p_s1 = 2. * theta2 * (eps - 1) ** 2 / phi20 ** 2 / phi2 ** 2 # ASSUMES eta=1
1417
- p_s2 = hbar / momentum
1418
- p_s3 = a + b + c
1419
-
1420
- p_s = p_s1 * p_s2 * p_s3
1421
-
1422
- # print(p_v.min(),p_v.max(),p_s.min(),p_s.max())
1423
- # Calculate P and p_vol (volume only)
1424
- dtheta = a_data[1] - a_data[0]
1425
- scale = np.sin(np.abs(theta)) * dtheta * 2 * np.pi
1426
-
1427
- p = p_coef * np.imag(p_v - p_s) # Eq 4.1
1428
- p_vol = p_coef * np.imag(p_v) * scale
1429
-
1430
- # lplus_min = e_data[np.argmin(np.real(lplus), axis=1)]
1431
- # lminus_min = e_data[np.argmin(np.imag(lminus), axis=1)]
1432
-
1433
- p_simple = p_coef * np.imag(1 / eps) * thick / (
1434
- theta2 + theta_e2) * scale # Watch it eps is conjugated dielectric function
1435
-
1436
- return p, p * scale * 1e2, p_vol * 1e2, p_simple * 1e2 # ,lplus_min,lminus_min
1437
-
1438
-
1439
- def kroeger_core2(e_data, a_data, eps_data, acceleration_voltage_kev, thickness, relativistic=True):
1440
- """
1441
-
1442
- This function calculates the differential scattering probability
1443
- .. math::
1444
- \\frac{d^2P}{d \\Omega d_e}
1445
- of the low-loss region for total loss and volume plasmon loss
1446
-
1447
- Args:
1448
- e_data (array): energy scale [eV]
1449
- a_data (array): angle or momentum range [rad]
1450
- eps_data (array) dielectric function
1451
- acceleration_voltage_kev (float): acceleration voltage [keV]
1452
- thickness (float): thickness in nm
1453
- relativistic (boolean): relativistic correction
1454
-
1455
- Returns:
1456
- P (numpy array 2d): total loss probability
1457
- p_vol (numpy array 2d): volume loss probability
1458
-
1459
- return P, P*scale*1e2,p_vol*1e2, p_simple*1e2
1460
- """
1461
- # $d^2P/(dEd\Omega) = \frac{1}{\pi^2 a_0 m_0 v^2} \Im \left[ \frac{t\mu^2}{\varepsilon \phi^2 } \right]
1462
- """
1463
- # Internally everything is calculated in si units
1464
- # acceleration_voltage_kev = 200 #keV
1465
- # thick = 32.0*10-9 # m
1466
-
1467
- """
1468
- a_data = np.array(a_data)
1469
- e_data = np.array(e_data)
1470
- # adjust input to si units
1471
- wavelength = get_wave_length(acceleration_voltage_kev * 1e3) # in m
1472
- thickness = thickness * 1e-9 # input thickness now in m
1473
-
1474
- # Define constants
1475
- # ec = 14.4;
1476
- m_0 = constants.value(u'electron mass') # REST electron mass in kg
1477
- # h = constants.Planck # Planck's constant
1478
- hbar = constants.hbar
1479
-
1480
- c = constants.speed_of_light # speed of light m/s
1481
- bohr = constants.value(u'Bohr radius') # Bohr radius in meters
1482
- e = constants.value(u'elementary charge') # electron charge in Coulomb
1483
- # print('hbar =', hbar ,' [Js] =', hbar/e ,'[ eV s]')
1484
-
1485
- # Calculate fixed terms of equation
1486
- va = 1 - (511. / (511. + acceleration_voltage_kev)) ** 2 # acceleration_voltage_kev is incident energy in keV
1487
- v = c * np.sqrt(va)
1488
-
1489
- if relativistic:
1490
- beta = v / c # non relativistic for =1
1491
- gamma = 1. / np.sqrt(1 - beta ** 2)
1492
- else:
1493
- beta = 1
1494
- gamma = 1 # set = 1 to correspond to E+B & Siegle
1495
-
1496
- momentum = m_0 * v * gamma # used for xya, E&B have no gamma
1497
-
1498
- # ##### Define mapped variables
1499
-
1500
- # Define independent variables E, theta
1501
- [energy, theta] = np.meshgrid(e_data + 1e-12, a_data)
1502
- # Define CONJUGATE dielectric function variable eps
1503
- [eps, _] = np.meshgrid(np.conj(eps_data), a_data)
1504
-
1505
- # ##### Calculate lambda in equation EB 2.3
1506
- theta2 = theta ** 2 + 1e-15
1507
-
1508
- theta_e = energy * e / momentum / v # critical angle
1509
-
1510
- lambda2 = theta2 - eps * theta_e ** 2 * beta ** 2 # Eq 2.3
1511
-
1512
- lambd = np.sqrt(lambda2)
1513
- if (np.real(lambd) < 0).any():
1514
- print(' error negative lambda')
1515
-
1516
- # ##### Calculate lambda0 in equation EB 2.4
1517
- # According to Kröger real(lambda0) is defined as positive!
1518
-
1519
- phi2 = lambda2 + theta_e ** 2 # Eq. 2.2
1520
- lambda02 = theta2 - theta_e ** 2 * beta ** 2 # eta=1 Eq 2.4
1521
- lambda02[lambda02 < 0] = 0
1522
- lambda0 = np.sqrt(lambda02)
1523
- if not (np.real(lambda0) >= 0).any():
1524
- print(' error negative lambda0')
1525
-
1526
- de = thickness * energy * e / (2.0 * hbar * v) # Eq 2.5
1527
- xya = lambd * de / theta_e # used in Eqs 2.6, 2.7, 4.4
1528
-
1529
- lplus = lambda0 * eps + lambd * np.tanh(xya) # eta=1 %Eq 2.6
1530
- lminus = lambda0 * eps + lambd / np.tanh(xya) # eta=1 %Eq 2.7
1531
-
1532
- mue2 = 1 - (eps * beta ** 2) # Eq. 4.5
1533
- phi20 = lambda02 + theta_e ** 2 # Eq 4.6
1534
- phi201 = theta2 + theta_e ** 2 * (1 - (eps + 1) * beta ** 2) # eta=1, eps-1 in E+b Eq.(4.7)
1535
-
1536
- # Eq 4.2
1537
- a1 = phi201 ** 2 / eps
1538
- a2 = np.sin(de) ** 2 / lplus + np.cos(de) ** 2 / lminus
1539
- a = a1 * a2
1540
-
1541
- # Eq 4.3
1542
- b1 = beta ** 2 * lambda0 * theta_e * phi201
1543
- b2 = (1. / lplus - 1. / lminus) * np.sin(2. * de)
1544
- b = b1 * b2
1545
-
1546
- # Eq 4.4
1547
- c1 = -beta ** 4 * lambda0 * lambd * theta_e ** 2
1548
- c2 = np.cos(de) ** 2 * np.tanh(xya) / lplus
1549
- c3 = np.sin(de) ** 2 / np.tanh(xya) / lminus
1550
- c = c1 * (c2 + c3)
1551
-
1552
- # Put all the pieces together...
1553
- p_coef = e / (bohr * np.pi ** 2 * m_0 * v ** 2)
1554
-
1555
- p_v = thickness * mue2 / eps / phi2
1556
-
1557
- p_s1 = 2. * theta2 * (eps - 1) ** 2 / phi20 ** 2 / phi2 ** 2 # ASSUMES eta=1
1558
- p_s2 = hbar / momentum
1559
- p_s3 = a + b + c
1560
-
1561
- p_s = p_s1 * p_s2 * p_s3
1562
-
1563
- # print(p_v.min(),p_v.max(),p_s.min(),p_s.max())
1564
- # Calculate P and p_vol (volume only)
1565
- dtheta = a_data[1] - a_data[0]
1566
- scale = np.sin(np.abs(theta)) * dtheta * 2 * np.pi
1567
-
1568
- p = p_coef * np.imag(p_v - p_s) # Eq 4.1
1569
- p_vol = p_coef * np.imag(p_v) * scale
1570
-
1571
- # lplus_min = e_data[np.argmin(np.real(lplus), axis=1)]
1572
- # lminus_min = e_data[np.argmin(np.imag(lminus), axis=1)]
1573
-
1574
- p_simple = p_coef * np.imag(1 / eps) * thickness / (theta2 + theta_e ** 2) * scale
1575
- # Watch it: eps is conjugated dielectric function
1576
-
1577
- return p, p * scale * 1e2, p_vol * 1e2, p_simple * 1e2 # ,lplus_min,lminus_min
1578
-
1579
-
1580
- ##########################
1581
- # EELS Database
1582
- ##########################
1583
-
1584
-
1585
- def read_msa(msa_string):
1586
- parameters = {}
1587
- y = []
1588
- x = []
1589
- # Read the keywords
1590
- data_section = False
1591
- msa_lines = msa_string.split('\n')
1592
-
1593
- for line in msa_lines:
1594
- if data_section is False:
1595
- if len(line) > 0:
1596
- if line[0] == "#":
1597
- try:
1598
- key, value = line.split(': ')
1599
- value = value.strip()
1600
- except ValueError:
1601
- key = line
1602
- value = None
1603
- key = key.strip('#').strip()
1604
-
1605
- if key != 'SPECTRUM':
1606
- parameters[key] = value
1607
- else:
1608
- data_section = True
1609
- else:
1610
- # Read the data
1611
-
1612
- if len(line) > 0 and line[0] != "#" and line.strip():
1613
- if parameters['DATATYPE'] == 'XY':
1614
- xy = line.replace(',', ' ').strip().split()
1615
- y.append(float(xy[1]))
1616
- x.append(float(xy[0]))
1617
- elif parameters['DATATYPE'] == 'Y':
1618
- print('y')
1619
- data = [
1620
- float(i) for i in line.replace(',', ' ').strip().split()]
1621
- y.extend(data)
1622
- parameters['data'] = np.array(y)
1623
- if 'XPERCHAN' in parameters:
1624
- parameters['enery_scale'] = np.arange(len(y)) * float(parameters['XPERCHAN']) + float(parameters['OFFSET'])
1625
- return parameters
1626
-
1627
-
1628
- def get_spectrum_eels_db(formula=None, edge=None):
1629
- """
1630
- get spectra from EELS database
1631
- chemical formula and edge is accepted.
1632
- Could expose more of the search parameters
1633
- """
1634
- valid_edges = ['K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5']
1635
- if edge is not None and edge not in valid_edges:
1636
- print('edge should be a in ', valid_edges)
1637
-
1638
- spectrum_type = None
1639
- title = None
1640
- author = None
1641
- element = None
1642
- min_energy = None
1643
- max_energy = None
1644
- resolution = None
1645
- min_energy_compare = "gt"
1646
- max_energy_compare = "lt",
1647
- resolution_compare = "lt"
1648
- max_n = -1
1649
- monochromated = None
1650
- order = None
1651
- order_direction = "ASC"
1652
- verify_certificate = True
1653
- # Verify arguments
1654
-
1655
- if spectrum_type is not None and spectrum_type not in {'coreloss', 'lowloss', 'zeroloss', 'xrayabs'}:
1656
- raise ValueError("spectrum_type must be one of \'coreloss\', \'lowloss\', "
1657
- "\'zeroloss\', \'xrayabs\'.")
1658
- # valid_edges = ['K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5']
1659
-
1660
- params = {
1661
- "type": spectrum_type,
1662
- "title": title,
1663
- "author": author,
1664
- "edge": edge,
1665
- "min_energy": min_energy,
1666
- "max_energy": max_energy,
1667
- "resolution": resolution,
1668
- "resolution_compare": resolution_compare,
1669
- "monochromated": monochromated,
1670
- "formula": formula,
1671
- "min_energy_compare": min_energy_compare,
1672
- "max_energy_compare": max_energy_compare,
1673
- "per_page": max_n,
1674
- "order": order,
1675
- "order_direction": order_direction,
1676
- }
1677
-
1678
- request = requests.get('http://api.eelsdb.eu/spectra', params=params, verify=True)
1679
- # spectra = []
1680
- jsons = request.json()
1681
- if "message" in jsons:
1682
- # Invalid query, EELSdb raises error.
1683
- raise IOError(
1684
- "Please report the following error to the HyperSpy developers: "
1685
- "%s" % jsons["message"])
1686
- reference_spectra = {}
1687
- for json_spectrum in jsons:
1688
- download_link = json_spectrum['download_link']
1689
- # print(download_link)
1690
- msa_string = requests.get(download_link, verify=verify_certificate).text
1691
- # print(msa_string[:100])
1692
- parameters = read_msa(msa_string)
1693
- if 'XPERCHAN' in parameters:
1694
- reference_spectra[parameters['TITLE']] = parameters
1695
- print(parameters['TITLE'])
1696
- print(f'found {len(reference_spectra.keys())} spectra in EELS database)')
1697
-
1698
- return reference_spectra
1
+ """
2
+ eels_tools
3
+ Model based quantification of electron energy-loss data
4
+ Copyright by Gerd Duscher
5
+
6
+ The University of Tennessee, Knoxville
7
+ Department of Materials Science & Engineering
8
+
9
+ Sources:
10
+ M. Tian et al.
11
+
12
+ Units:
13
+ everything is in SI units, except length is given in nm and angles in mrad.
14
+
15
+ Usage:
16
+ See the notebooks for examples of these routines
17
+
18
+ All the input and output is done through a dictionary which is to be found in the meta_data
19
+ attribute of the sidpy.Dataset
20
+
21
+ Update by Austin Houston, UTK 12-2023 : Parallization of spectrum images
22
+ """
23
+ import typing
24
+ from typing import Union
25
+ import numpy as np
26
+ import matplotlib.pyplot as plt
27
+
28
+ import scipy
29
+ from scipy import constants
30
+ from scipy import interpolate
31
+ from scipy.interpolate import interp1d, splrep
32
+ from scipy.signal import peak_prominences
33
+ from scipy.ndimage import gaussian_filter
34
+ from scipy.optimize import curve_fit, leastsq
35
+
36
+ import requests
37
+
38
+ # ## And we use the image tool library of pyTEMlib
39
+ from pyTEMlib.xrpa_x_sections import x_sections
40
+
41
+ import sidpy
42
+ from sidpy.proc.fitter import SidFitter
43
+ from sidpy.base.num_utils import get_slope
44
+
45
+ # we have a function called find peaks - is it necessary?
46
+ # or could we just use scipy.signal import find_peaks
47
+
48
+ major_edges = ['K1', 'L3', 'M5', 'N5']
49
+ all_edges = ['K1', 'L1', 'L2', 'L3', 'M1', 'M2', 'M3', 'M4', 'M5', 'N1', 'N2', 'N3', 'N4', 'N5', 'N6', 'N7', 'O1', 'O2',
50
+ 'O3', 'O4', 'O5', 'O6', 'O7', 'P1', 'P2', 'P3']
51
+ first_close_edges = ['K1', 'L3', 'M5', 'M3', 'N5', 'N3']
52
+
53
+ elements = [' ', 'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na',
54
+ 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V',
55
+ 'Cr', 'Mn', 'Fe', 'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br',
56
+ 'Kr', 'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag',
57
+ 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr',
58
+ 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',
59
+ 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi']
60
+
61
+
62
+ # kroeger_core(e_data,a_data,eps_data,ee,thick, relativistic =True)
63
+ # kroeger_core2(e_data,a_data,eps_data,acceleration_voltage_kev,thickness, relativistic =True)
64
+ # get_wave_length(e0)
65
+
66
+ # plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef = 4., ep= 16.8, Es = 0, IBT = [])
67
+ # drude(tags, e, ep, ew, tnm, eb)
68
+ # drude(ep, eb, gamma, e)
69
+ # drude_lorentz(epsInf,leng, ep, eb, gamma, e, Amplitude)
70
+ # zl_func( p, x)
71
+ # ###############################################################
72
+ # Utility Functions
73
+ # ################################################################
74
+
75
+ def get_wave_length(e0):
76
+ """get deBroglie wavelength of electron accelerated by energy (in eV) e0"""
77
+
78
+ ev = constants.e * e0
79
+ return constants.h / np.sqrt(2 * constants.m_e * ev * (1 + ev / (2 * constants.m_e * constants.c ** 2)))
80
+
81
+
82
+ def effective_collection_angle(energy_scale, alpha, beta, beam_kv):
83
+ """Calculates the effective collection angle in mrad:
84
+
85
+ Translate from original Fortran program
86
+ Calculates the effective collection angle in mrad:
87
+ Parameter
88
+ ---------
89
+ energy_scale: numpy array
90
+ first and last energy loss of spectrum in eV
91
+ alpha: float
92
+ convergence angle in mrad
93
+ beta: float
94
+ collection angle in mrad
95
+ beamKV: float
96
+ acceleration voltage in V
97
+
98
+ Returns
99
+ -------
100
+ eff_beta: float
101
+ effective collection angle in mrad
102
+
103
+ # function y = effbeta(ene, alpha, beta, beam_kv)
104
+ #
105
+ # This program computes etha(alpha,beta), that is the collection
106
+ # efficiency associated to the following geometry :
107
+ #
108
+ # alpha = half angle of illumination (0 -> pi/2)
109
+ # beta = half angle of collection (0 -> pi/2)
110
+ # (pi/2 = 1570.795 mrad)
111
+ #
112
+ # A constant angular distribution of incident electrons is assumed
113
+ # for any incident angle (-alpha,alpha). These electrons imping the
114
+ # target and a single energy-loss event occurs, with a characteristic
115
+ # angle theta-e (relativistic). The angular distribution of the
116
+ # electrons after the target is analytically derived.
117
+ # This program integrates this distribution from theta=0 up to
118
+ # theta=beta with an adjustable angular step.
119
+ # This program also computes beta* which is the theoretical
120
+ # collection angle which would give the same value of etha(alpha,beta)
121
+ # with a parallel incident beam.
122
+ #
123
+ # subroutines and function subprograms required
124
+ # ---------------------------------------------
125
+ # none
126
+ #
127
+ # comments
128
+ # --------
129
+ #
130
+ # The following parameters are asked as input :
131
+ # accelerating voltage (kV), energy loss range (eV) for the study,
132
+ # energy loss step (eV) in this range, alpha (mrad), beta (mrad).
133
+ # The program returns for each energy loss step :
134
+ # alpha (mrad), beta (mrad), theta-e (relativistic) (mrad),
135
+ # energy loss (eV), etha (#), beta * (mrad)
136
+ #
137
+ # author :
138
+ # --------
139
+ # Pierre TREBBIA
140
+ # US 41 : "Microscopie Electronique Analytique Quantitative"
141
+ # Laboratoire de Physique des Solides, Bat. 510
142
+ # Universite Paris-Sud, F91405 ORSAY Cedex
143
+ # Phone : (33-1) 69 41 53 68
144
+ #
145
+ """
146
+ if beam_kv == 0:
147
+ beam_kv = 100.0
148
+
149
+ if alpha == 0:
150
+ return beta
151
+
152
+ if beta == 0:
153
+ return alpha
154
+
155
+ z1 = beam_kv # eV
156
+ z2 = energy_scale[0]
157
+ z3 = energy_scale[-1]
158
+ z4 = 100.0
159
+
160
+ z5 = alpha * 0.001 # rad
161
+ z6 = beta * 0.001 # rad
162
+ z7 = 500.0 # number of integration steps to be modified at will
163
+
164
+ # main loop on energy loss
165
+ #
166
+ for zx in range(int(z2), int(z3), int(z4)): # ! zx = current energy loss
167
+ eta = 0.0
168
+ x0 = float(zx) * (z1 + 511060.) / (z1 * (z1 + 1022120.)) # x0 = relativistic theta-e
169
+ x1 = np.pi / (2. * x0)
170
+ x2 = x0 * x0 + z5 * z5
171
+ x3 = z5 / x0 * z5 / x0
172
+ x4 = 0.1 * np.sqrt(x2)
173
+ dtheta = (z6 - x4) / z7
174
+ #
175
+ # calculation of the analytical expression
176
+ #
177
+ for zi in range(1, int(z7)):
178
+ theta = x4 + dtheta * float(zi)
179
+ x5 = theta * theta
180
+ x6 = 4. * x5 * x0 * x0
181
+ x7 = x2 - x5
182
+ x8 = np.sqrt(x7 * x7 + x6)
183
+ x9 = (x8 + x7) / (2. * x0 * x0)
184
+ x10 = 2. * theta * dtheta * np.log(x9)
185
+ eta = eta + x10
186
+
187
+ eta = eta + x2 / 100. * np.log(1. + x3) # addition of the central contribution
188
+ x4 = z5 * z5 * np.log(1. + x1 * x1) # normalisation
189
+ eta = eta / x4
190
+ #
191
+ # correction by geometrical factor (beta/alpha)**2
192
+ #
193
+ if z6 < z5:
194
+ x5 = z5 / z6
195
+ eta = eta * x5 * x5
196
+
197
+ etha2 = eta * 100.
198
+ #
199
+ # calculation of beta *
200
+ #
201
+ x6 = np.power((1. + x1 * x1), eta)
202
+ x7 = x0 * np.sqrt(x6 - 1.)
203
+ beta = x7 * 1000. # in mrad
204
+
205
+ return beta
206
+
207
+
208
+ def set_default_metadata(current_dataset: sidpy.Dataset) -> None:
209
+
210
+ if 'experiment' not in current_dataset.metadata:
211
+ current_dataset.metadata['experiment'] = {}
212
+ if 'convergence_angle' not in current_dataset.metadata['experiment']:
213
+ current_dataset.metadata['experiment']['convergence_angle'] = 30
214
+ if 'collection_angle' not in current_dataset.metadata['experiment']:
215
+ current_dataset.metadata['experiment']['collection_angle'] = 50
216
+ if 'acceleration_voltage' not in current_dataset.metadata['experiment']:
217
+ current_dataset.metadata['experiment']['acceleration_voltage'] = 200000
218
+
219
+ ###
220
+
221
+ # ###############################################################
222
+ # Peak Fit Functions
223
+ # ################################################################
224
+
225
+
226
+ def residuals_smooth(p, x, y, only_positive_intensity):
227
+ """part of fit"""
228
+
229
+ err = (y - model_smooth(x, p, only_positive_intensity))
230
+ return err
231
+
232
+
233
+ def model_smooth(x, p, only_positive_intensity=False):
234
+ """part of fit"""
235
+
236
+ y = np.zeros(len(x))
237
+
238
+ number_of_peaks = int(len(p) / 3)
239
+ for i in range(number_of_peaks):
240
+ if only_positive_intensity:
241
+ p[i * 3 + 1] = abs(p[i * 3 + 1])
242
+ p[i * 3 + 2] = abs(p[i * 3 + 2])
243
+ if p[i * 3 + 2] > abs(p[i * 3]) * 4.29193 / 2.0:
244
+ p[i * 3 + 2] = abs(p[i * 3]) * 4.29193 / 2. # ## width cannot extend beyond zero, maximum is FWTM/2
245
+
246
+ y = y + gauss(x, p[i * 3:])
247
+
248
+ return y
249
+
250
+
251
+ def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
252
+ """Gaussian Function
253
+
254
+ p[0]==mean, p[1]= amplitude p[2]==fwhm
255
+ area = np.sqrt(2* np.pi)* p[1] * np.abs(p[2] / 2.3548)
256
+ FWHM = 2 * np.sqrt(2 np.log(2)) * sigma = 2.3548 * sigma
257
+ sigma = FWHM/3548
258
+ """
259
+ if p[2] == 0:
260
+ return x * 0.
261
+ else:
262
+ return p[1] * np.exp(-(x - p[0]) ** 2 / (2.0 * (p[2] / 2.3548) ** 2))
263
+
264
+
265
+ def lorentz(x, center, amplitude, width):
266
+ """ Lorentzian Function """
267
+ lorentz_peak = 0.5 * width / np.pi / ((x - center) ** 2 + (width / 2) ** 2)
268
+ return amplitude * lorentz_peak / lorentz_peak.max()
269
+
270
+
271
+ def zl_func(x, center1, amplitude1, width1, center2, amplitude2, width2):
272
+ """ zero loss function as product of two lorentzians """
273
+ return lorentz(x, center1, amplitude1, width1) * lorentz(x, center2, amplitude2, width2)
274
+
275
+
276
+ def zl(x, p, p_zl):
277
+ """zero-loss function"""
278
+ p_zl_local = p_zl.copy()
279
+ p_zl_local[2] += p[0]
280
+ p_zl_local[5] += p[0]
281
+ zero_loss = zl_func(x, p_zl_local)
282
+ return p[1] * zero_loss / zero_loss.max()
283
+
284
+
285
+ def get_channel_zero(spectrum: np.ndarray, energy: np.ndarray, width: int = 8):
286
+ """Determin shift of energy scale according to zero-loss peak position
287
+
288
+ This function assumes that the zero loss peak is the maximum of the spectrum.
289
+ """
290
+
291
+ zero = scipy.signal.find_peaks(spectrum/np.max(spectrum), height=0.98)[0][0]
292
+ width = int(width/2)
293
+ x = np.array(energy[int(zero-width):int(zero+width)])
294
+ y = np.array(spectrum[int(zero-width):int(zero+width)]).copy()
295
+
296
+ y[np.nonzero(y <= 0)] = 1e-12
297
+
298
+ p0 = [energy[zero], spectrum.max(), .5] # Initial guess is a normal distribution
299
+
300
+ def errfunc(pp, xx, yy):
301
+ return (gauss(xx, pp) - yy) / np.sqrt(yy) # Distance to the target function
302
+
303
+ [p1, _] = leastsq(errfunc, np.array(p0[:]), args=(x, y))
304
+ fit_mu, area, fwhm = p1
305
+
306
+ return fwhm, fit_mu
307
+
308
+
309
+ def get_zero_loss_energy(dataset):
310
+
311
+ spectrum = dataset.sum(axis=tuple(range(dataset.ndim - 1)))
312
+
313
+ startx = scipy.signal.find_peaks(spectrum/np.max(spectrum), height=0.98)[0][0]
314
+
315
+ end = startx + 3
316
+ start = startx - 3
317
+ for i in range(10):
318
+ if spectrum[startx - i] < 0.3 * spectrum[startx]:
319
+ start = startx - i
320
+ if spectrum[startx + i] < 0.3 * spectrum[startx]:
321
+ end = startx + i
322
+ if end - start < 3:
323
+ end = startx + 2
324
+ start = startx - 2
325
+ width = int((end-start)/2+0.5)
326
+
327
+ energy = dataset.get_spectral_dims(return_axis=True)[0].values
328
+
329
+ if dataset.ndim == 1: # single spectrum
330
+ _, shifts = get_channel_zero(np.array(dataset), energy, width)
331
+ shifts = np.array([shifts])
332
+ elif dataset.ndim == 2: # line scan
333
+ shifts = np.zeros(dataset.shape[:1])
334
+ for x in range(dataset.shape[0]):
335
+ _, shifts[x] = get_channel_zero(dataset[x, :], energy, width)
336
+ elif dataset.ndim == 3: # spectral image
337
+ shifts = np.zeros(dataset.shape[:2])
338
+ for x in range(dataset.shape[0]):
339
+ for y in range(dataset.shape[1]):
340
+ _, shifts[x, y] = get_channel_zero(dataset[x, y, :], energy, width)
341
+ return shifts
342
+
343
+
344
+ def shift_energy(dataset: sidpy.Dataset, shifts: np.ndarray) -> sidpy.Dataset:
345
+ """ Align zero-loss peaks of any spectral sidpy dataset """
346
+
347
+ new_si = dataset.copy()
348
+ new_si *= 0.0
349
+
350
+ image_dims = dataset.get_image_dims()
351
+ if len(image_dims) == 0:
352
+ image_dims =[0]
353
+ if len(image_dims) != shifts.ndim:
354
+ raise TypeError('array of energy shifts have to have same dimension as dataset')
355
+ if not isinstance(dataset, sidpy.Dataset):
356
+ raise TypeError('This function needs a sidpy Dataset to shift energy scale')
357
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
358
+ if dataset.ndim == 1: # single spectrum
359
+ tck = interpolate.splrep(np.array(energy_scale - shifts), np.array(dataset), k=1, s=0)
360
+ new_si[:] = interpolate.splev(energy_scale, tck, der=0)
361
+ new_si.data_type = 'Spectrum'
362
+ elif dataset.ndim == 2: # line scan
363
+ for x in range(dataset.shape[0]):
364
+ tck = interpolate.splrep(np.array(energy_scale - shifts[x]), np.array(dataset[x, :]), k=1, s=0)
365
+ new_si[x, :] = interpolate.splev(energy_scale, tck, der=0)
366
+ elif dataset.ndim == 3: # spectral image
367
+ for x in range(dataset.shape[0]):
368
+ for y in range(dataset.shape[1]):
369
+ tck = interpolate.splrep(np.array(energy_scale - shifts[x, y]), np.array(dataset[x, y]), k=1, s=0)
370
+ new_si[x, y, :] = interpolate.splev(energy_scale, tck, der=0)
371
+
372
+ return new_si
373
+
374
+
375
+ def align_zero_loss(dataset: sidpy.Dataset) -> sidpy.Dataset:
376
+ """
377
+ Shifts the energy axis of the input dataset to be aligned with the zero-loss peak.
378
+
379
+ Parameters:
380
+ -----------
381
+ dataset : sidpy.Dataset
382
+ The input dataset containing the energy axis to be aligned.
383
+
384
+ Returns:
385
+ --------
386
+ sidpy.Dataset
387
+ The dataset with the energy axis shifted to align the zero-loss peak.
388
+
389
+ """
390
+ shifts = get_zero_loss_energy(dataset)
391
+ # print(shifts, dataset)
392
+ new_si = shift_energy(dataset, shifts)
393
+ new_si.metadata.update({'zero_loss': {'shifted': shifts}})
394
+ return new_si
395
+
396
+
397
+
398
+
399
+ def get_resolution_functions(dataset: sidpy.Dataset, startFitEnergy: float=-1, endFitEnergy: float=+1,
400
+ n_workers: int=1, n_threads: int=8):
401
+ """
402
+ Analyze and fit low-loss EELS data within a specified energy range to determine zero-loss peaks.
403
+
404
+ This function processes a low-loss EELS dataset from transmission electron microscopy (TEM) data,
405
+ focusing on a specified energy range for analyzing and fitting the spectrum.
406
+ It determines fitting parameters and applies these to extract zero-loss peak information
407
+ from the dataset. The function handles both 2D and 3D datasets.
408
+
409
+ Parameters:
410
+ -----------
411
+ dataset (sidpy.Dataset): The dataset containing TEM spectral data.
412
+ startFitEnergy (float): The start energy of the fitting window.
413
+ endFitEnergy (float): The end energy of the fitting window.
414
+ n_workers (int, optional): The number of workers for parallel processing (default is 1).
415
+ n_threads (int, optional): The number of threads for parallel processing (default is 8).
416
+
417
+ Returns:
418
+ --------
419
+ tuple: A tuple containing:
420
+ - z_loss_dset (sidpy.Dataset): The dataset with added zero-loss peak information.
421
+ - z_loss_params (numpy.ndarray): Array of parameters used for the zero-loss peak fitting.
422
+
423
+ Raises:
424
+ -------
425
+ ValueError: If the input dataset does not have the expected dimensions or format.
426
+
427
+ Notes:
428
+ ------
429
+ - The function expects `dset` to have specific dimensionalities and will raise an error if they are not met.
430
+ - Parallel processing is employed to enhance performance, particularly for large datasets.
431
+ """
432
+ energy = dataset.get_spectral_dims(return_axis=True)[0].values
433
+ start_fit_pixel = np.searchsorted(energy, startFitEnergy)
434
+ end_fit_pixel = np.searchsorted(energy, endFitEnergy)
435
+ guess_width = (endFitEnergy - startFitEnergy)/2
436
+ if end_fit_pixel - start_fit_pixel < 5:
437
+ start_fit_pixel -= 2
438
+ end_fit_pixel += 2
439
+
440
+ def get_good_guess(zl_func, energy, spectrum):
441
+ popt, pcov = curve_fit(zl_func, energy, spectrum,
442
+ p0=[0, guess_amplitude, guess_width,
443
+ 0, guess_amplitude, guess_width])
444
+ return popt
445
+
446
+ fit_energy = energy[start_fit_pixel:end_fit_pixel]
447
+ # get a good guess for the fit parameters
448
+ if len(dataset.shape) == 3:
449
+ fit_dset = dataset[:, :, start_fit_pixel:end_fit_pixel]
450
+ guess_amplitude = np.sqrt(fit_dset.max())
451
+ guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=(0, 1))/fit_dset.shape[0]/fit_dset.shape[1])
452
+ elif len(dataset.shape) == 2:
453
+ fit_dset = dataset[:, start_fit_pixel:end_fit_pixel]
454
+ fit_energy = energy[start_fit_pixel:end_fit_pixel]
455
+ guess_amplitude = np.sqrt(fit_dset.max())
456
+ guess_params = get_good_guess(zl_func, fit_energy, fit_dset.sum(axis=0)/fit_dset.shape[0])
457
+ elif len(dataset.shape) == 1:
458
+ fit_dset = dataset[start_fit_pixel:end_fit_pixel]
459
+ fit_energy = energy[start_fit_pixel:end_fit_pixel]
460
+ guess_amplitude = np.sqrt(fit_dset.max())
461
+ guess_params = get_good_guess(zl_func, fit_energy, fit_dset)
462
+ z_loss_dset = dataset.copy()
463
+ z_loss_dset *= 0.0
464
+ z_loss_dset += zl_func(energy, *guess_params)
465
+ if 'zero_loss' not in z_loss_dset.metadata:
466
+ z_loss_dset.metadata['zero_loss'] = {}
467
+ z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
468
+ 'endFitEnergy': endFitEnergy,
469
+ 'fit_parameter': guess_params,
470
+ 'original_low_loss': dataset.title})
471
+ return z_loss_dset
472
+ else:
473
+ print('Error: need a spectrum or spectral image sidpy dataset')
474
+ print('Not dset.shape = ', dataset.shape)
475
+ return None
476
+
477
+ # define guess function for SidFitter
478
+ def guess_function(xvec, yvec):
479
+ return guess_params
480
+
481
+ # apply to all spectra
482
+ zero_loss_fitter = SidFitter(fit_dset, zl_func, num_workers=n_workers, guess_fn=guess_function, threads=n_threads,
483
+ return_cov=False, return_fit=False, return_std=False, km_guess=False, num_fit_parms=6)
484
+
485
+ [z_loss_params] = zero_loss_fitter.do_fit()
486
+ z_loss_dset = dataset.copy()
487
+ z_loss_dset *= 0.0
488
+
489
+ energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
490
+ z_loss_dset.shape[1], energy.shape[0]))
491
+ z_loss_peaks = zl_func(energy_grid, *z_loss_params)
492
+ z_loss_dset += z_loss_peaks
493
+
494
+ shifts = z_loss_params[:, :, 0] * z_loss_params[:, :, 3]
495
+ widths = z_loss_params[:, :, 2] * z_loss_params[:, :, 5]
496
+
497
+ z_loss_dset.metadata['zero_loss'].update({'startFitEnergy': startFitEnergy,
498
+ 'endFitEnergy': endFitEnergy,
499
+ 'fit_parameter': z_loss_params,
500
+ 'original_low_loss': dataset.title})
501
+
502
+
503
+ return z_loss_dset
504
+
505
+
506
+ def drude(energy_scale, peak_position, peak_width, gamma):
507
+ """dielectric function according to Drude theory"""
508
+
509
+ eps = (1 - (peak_position ** 2 - peak_width * energy_scale * 1j) /
510
+ (energy_scale ** 2 + 2 * energy_scale * gamma * 1j)) # Mod drude term
511
+ return eps
512
+
513
+
514
+ def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
515
+ """dielectric function according to Drude-Lorentz theory"""
516
+
517
+ eps = eps_inf
518
+ for i in range(leng):
519
+ eps = eps + amplitude[i] * (1 / (e + ep[i] + gamma[i] * 1j) - 1 / (e - ep[i] + gamma[i] * 1j))
520
+ return eps
521
+
522
+
523
+ def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float, plot_result: bool = False, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
524
+ """
525
+ Fit plasmon peak positions and widths in a TEM dataset using a Drude model.
526
+
527
+ This function applies the Drude model to fit plasmon peaks in a dataset obtained
528
+ from transmission electron microscopy (TEM). It processes the dataset to determine
529
+ peak positions, widths, and amplitudes within a specified energy range. The function
530
+ can handle datasets with different dimensions and offers parallel processing capabilities.
531
+
532
+ Parameters:
533
+ dataset: sidpy.Dataset or numpy.ndarray
534
+ The dataset containing TEM spectral data.
535
+ startFitEnergy: float
536
+ The start energy of the fitting window.
537
+ endFitEnergy: float
538
+ The end energy of the fitting window.
539
+ plot_result: bool, optional
540
+ If True, plots the fitting results (default is False).
541
+ number_workers: int, optional
542
+ The number of workers for parallel processing (default is 4).
543
+ number_threads: int, optional
544
+ The number of threads for parallel processing (default is 8).
545
+
546
+ Returns:
547
+ fitted_dataset: sidpy.Dataset or numpy.ndarray
548
+ The dataset with fitted plasmon peak parameters. The dimensions and
549
+ format depend on the input dataset.
550
+
551
+ Raises:
552
+ ValueError: If the input dataset does not have the expected dimensions or format.
553
+
554
+ Notes:
555
+ - The function uses the Drude model to fit plasmon peaks.
556
+ - The fitting parameters are peak position (Ep), peak width (Ew), and amplitude (A).
557
+ - If `plot_result` is True, the function plots Ep, Ew, and A as separate subplots.
558
+ """
559
+ # define Drude function for plasmon fitting
560
+ def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.ndarray:
561
+ E = E/E.max()
562
+ eps = 1 - Ep**2/(E**2+Ew**2) + 1j * Ew * Ep**2/E/(E**2+Ew**2)
563
+ elf = (-1/eps).imag
564
+ return A*elf
565
+
566
+ # define window for fitting
567
+ energy = dataset.get_spectral_dims(return_axis=True)[0].values
568
+ start_fit_pixel = np.searchsorted(energy, startFitEnergy)
569
+ end_fit_pixel = np.searchsorted(energy, endFitEnergy)
570
+
571
+ # rechunk dataset
572
+ if dataset.ndim == 3:
573
+ dataset = dataset.rechunk(chunks=(1, 1, -1))
574
+ fit_dset = dataset[:, :, start_fit_pixel:end_fit_pixel]
575
+ elif dataset.ndim == 2:
576
+ dataset = dataset.rechunk(chunks=(1, -1))
577
+ fit_dset = dataset[:, start_fit_pixel:end_fit_pixel]
578
+ else:
579
+ fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel])
580
+ guess_pos = np.argmax(fit_dset)
581
+ guess_amplitude = fit_dset[guess_pos]
582
+ guess_width = (endFitEnergy - startFitEnergy)/2
583
+ popt, pcov = curve_fit(energy_loss_function, energy, dataset,
584
+ p0=[guess_pos, guess_width, guess_amplitude])
585
+ return popt
586
+
587
+ # if it can be parallelized:
588
+ fitter = SidFitter(fit_dset, energy_loss_function, num_workers=number_workers,
589
+ threads=number_threads, return_cov=False, return_fit=False, return_std=False,
590
+ km_guess=False, num_fit_parms=3)
591
+ [fitted_dataset] = fitter.do_fit()
592
+
593
+ if plot_result:
594
+ fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=True, sharey=True)
595
+ ax1.imshow(fitted_dataset[:, :, 0], cmap='jet')
596
+ ax1.set_title('Ep - Peak Position')
597
+ ax2.imshow(fitted_dataset[:, :, 1], cmap='jet')
598
+ ax2.set_title('Ew - Peak Width')
599
+ ax3.imshow(fitted_dataset[:, :, 2], cmap='jet')
600
+ ax3.set_title('A - Amplitude')
601
+ plt.show()
602
+ return fitted_dataset
603
+
604
+
605
+
606
+ def drude_simulation(dset, e, ep, ew, tnm, eb):
607
+ """probabilities of dielectric function eps relative to zero-loss integral (i0 = 1)
608
+
609
+ Gives probabilities of dielectric function eps relative to zero-loss integral (i0 = 1) per eV
610
+ Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
611
+
612
+ # Given the plasmon energy (ep), plasmon fwhm (ew) and binding energy(eb),
613
+ # this program generates:
614
+ # EPS1, EPS2 from modified Eq. (3.40), ELF=Im(-1/EPS) from Eq. (3.42),
615
+ # single scattering from Eq. (4.26) and SRFINT from Eq. (4.31)
616
+ # The output is e, ssd into the file drude.ssd (for use in Flog etc.)
617
+ # and e,eps1 ,eps2 into drude.eps (for use in Kroeger etc.)
618
+ # Gives probabilities relative to zero-loss integral (i0 = 1) per eV
619
+ # Details in R.F.Egerton: EELS in the Electron Microscope, 3rd edition, Springer 2011
620
+ # Version 10.11.26
621
+
622
+ """
623
+ energy_scale = dset.get_spectral_dims(return_axis=True)[0].values
624
+
625
+ epc = energy_scale[1] - energy_scale[0] # input('ev per channel : ');
626
+
627
+ b = dset.metadata['collection_angle'] / 1000. # rad
628
+ epc = dset.energy_scale[1] - dset.energy_scale[0] # input('ev per channel : ');
629
+ e0 = dset.metadata['acceleration_voltage'] / 1000. # input('incident energy e0(kev) : ');
630
+
631
+ # effective kinetic energy: T = m_o v^2/2,
632
+ t = 1000.0 * e0 * (1. + e0 / 1022.12) / (1.0 + e0 / 511.06) ** 2 # eV # equ.5.2a or Appendix E p 427
633
+
634
+ # 2 gamma T
635
+ tgt = 1000 * e0 * (1022.12 + e0) / (511.06 + e0) # eV Appendix E p 427
636
+
637
+ rk0 = 2590 * (1.0 + e0 / 511.06) * np.sqrt(2.0 * t / 511060)
638
+
639
+ os = e[0]
640
+ ew_mod = eb
641
+ tags = dset.metadata
642
+
643
+ eps = 1 - (ep ** 2 - ew_mod * e * 1j) / (e ** 2 + 2 * e * ew * 1j) # Mod drude term
644
+
645
+ eps[np.nonzero(eps == 0.0)] = 1e-19
646
+ elf = np.imag(-1 / eps)
647
+
648
+ the = e / tgt # varies with energy loss! # Appendix E p 427
649
+ # srfelf = 4..*eps2./((1+eps1).^2+eps2.^2) - elf; %equivalent
650
+ srfelf = np.imag(-4. / (1.0 + eps)) - elf # for 2 surfaces
651
+ angdep = np.arctan(b / the) / the - b / (b * b + the * the)
652
+ srfint = angdep * srfelf / (3.1416 * 0.05292 * rk0 * t) # probability per eV
653
+ anglog = np.log(1.0 + b * b / the / the)
654
+ i0 = dset.sum() # *tags['counts2e']
655
+
656
+ # 2 * t = m_0 v**2 !!! a_0 = 0.05292 nm
657
+ volint = abs(tnm / (np.pi * 0.05292 * t * 2.0) * elf * anglog) # S equ 4.26% probability per eV
658
+ volint = volint * i0 / epc # S probability per channel
659
+ ssd = volint # + srfint;
660
+
661
+ if e[0] < -1.0:
662
+ xs = int(abs(-e[0] / epc))
663
+
664
+ ssd[0:xs] = 0.0
665
+ volint[0:xs] = 0.0
666
+ srfint[0:xs] = 0.0
667
+
668
+ # if os <0:
669
+ p_s = np.trapz(e, srfint) # 2 surfaces but includes negative Begrenzung contribution.
670
+ p_v = abs(np.trapz(e, abs(volint / tags['spec'].sum()))) # integrated volume probability
671
+ p_v = (volint / i0).sum() # our data have he same epc and the trapez formula does not include
672
+ lam = tnm / p_v # does NOT depend on free-electron approximation (no damping).
673
+ lamfe = 4.0 * 0.05292 * t / ep / np.log(1 + (b * tgt / ep) ** 2) # Eq.(3.44) approximation
674
+
675
+ tags['eps'] = eps
676
+ tags['lam'] = lam
677
+ tags['lamfe'] = lamfe
678
+ tags['p_v'] = p_v
679
+
680
+ return ssd # /np.pi
681
+
682
+
683
+ def kroeger_core(e_data, a_data, eps_data, acceleration_voltage_kev, thickness, relativistic=True):
684
+ """This function calculates the differential scattering probability
685
+
686
+ .. math::
687
+ \\frac{d^2P}{d \\Omega d_e}
688
+ of the low-loss region for total loss and volume plasmon loss
689
+
690
+ Args:
691
+ e_data (array): energy scale [eV]
692
+ a_data (array): angle or momentum range [rad]
693
+ eps_data (array) dielectric function
694
+ acceleration_voltage_kev (float): acceleration voltage [keV]
695
+ thickness (float): thickness in nm
696
+ relativistic (boolean): relativistic correction
697
+
698
+ Returns:
699
+ P (numpy array 2d): total loss probability
700
+ p_vol (numpy array 2d): volume loss probability
701
+
702
+ return P, P*scale*1e2,p_vol*1e2, p_simple*1e2
703
+ """
704
+
705
+ # $d^2P/(dEd\Omega) = \frac{1}{\pi^2 a_0 m_0 v^2} \Im \left[ \frac{t\mu^2}{\varepsilon \phi^2 } \right]
706
+ """
707
+ # Internally everything is calculated in si units
708
+ # acceleration_voltage_kev = 200 #keV
709
+ # thick = 32.0*10-9 # m
710
+
711
+ """
712
+ a_data = np.array(a_data)
713
+ e_data = np.array(e_data)
714
+ # adjust input to si units
715
+ wavelength = get_wave_length(acceleration_voltage_kev * 1e3) # in m
716
+ thickness = thickness * 1e-9 # input thickness now in m
717
+
718
+ # Define constants
719
+ # ec = 14.4;
720
+ m_0 = constants.value(u'electron mass') # REST electron mass in kg
721
+ # h = constants.Planck # Planck's constant
722
+ hbar = constants.hbar
723
+
724
+ c = constants.speed_of_light # speed of light m/s
725
+ bohr = constants.value(u'Bohr radius') # Bohr radius in meters
726
+ e = constants.value(u'elementary charge') # electron charge in Coulomb
727
+ # print('hbar =', hbar ,' [Js] =', hbar/e ,'[ eV s]')
728
+
729
+ # Calculate fixed terms of equation
730
+ va = 1 - (511. / (511. + acceleration_voltage_kev)) ** 2 # acceleration_voltage_kev is incident energy in keV
731
+ v = c * np.sqrt(va)
732
+
733
+ if relativistic:
734
+ beta = v / c # non-relativistic for =1
735
+ gamma = 1. / np.sqrt(1 - beta ** 2)
736
+ else:
737
+ beta = 1
738
+ gamma = 1 # set = 1 to correspond to E+B & Siegle
739
+
740
+ momentum = m_0 * v * gamma # used for xya, E&B have no gamma
741
+
742
+ # ##### Define mapped variables
743
+
744
+ # Define independent variables E, theta
745
+ [energy, theta] = np.meshgrid(e_data + 1e-12, a_data)
746
+ # Define CONJUGATE dielectric function variable eps
747
+ [eps, _] = np.meshgrid(np.conj(eps_data), a_data)
748
+
749
+ # ##### Calculate lambda in equation EB 2.3
750
+ theta2 = theta ** 2 + 1e-15
751
+
752
+ theta_e = energy * e / momentum / v # critical angle
753
+
754
+ lambda2 = theta2 - eps * theta_e ** 2 * beta ** 2 # Eq 2.3
755
+
756
+ lambd = np.sqrt(lambda2)
757
+ if (np.real(lambd) < 0).any():
758
+ print(' error negative lambda')
759
+
760
+ # ##### Calculate lambda0 in equation EB 2.4
761
+ # According to Kröger real(lambda0) is defined as positive!
762
+
763
+ phi2 = lambda2 + theta_e ** 2 # Eq. 2.2
764
+ lambda02 = theta2 - theta_e ** 2 * beta ** 2 # eta=1 Eq 2.4
765
+ lambda02[lambda02 < 0] = 0
766
+ lambda0 = np.sqrt(lambda02)
767
+ if not (np.real(lambda0) >= 0).any():
768
+ print(' error negative lambda0')
769
+
770
+ de = thickness * energy * e / (2.0 * hbar * v) # Eq 2.5
771
+ xya = lambd * de / theta_e # used in Eqs 2.6, 2.7, 4.4
772
+
773
+ lplus = lambda0 * eps + lambd * np.tanh(xya) # eta=1 %Eq 2.6
774
+ lminus = lambda0 * eps + lambd / np.tanh(xya) # eta=1 %Eq 2.7
775
+
776
+ mue2 = 1 - (eps * beta ** 2) # Eq. 4.5
777
+ phi20 = lambda02 + theta_e ** 2 # Eq 4.6
778
+ phi201 = theta2 + theta_e ** 2 * (1 - (eps + 1) * beta ** 2) # eta=1, eps-1 in E+b Eq.(4.7)
779
+
780
+ # Eq 4.2
781
+ a1 = phi201 ** 2 / eps
782
+ a2 = np.sin(de) ** 2 / lplus + np.cos(de) ** 2 / lminus
783
+ a = a1 * a2
784
+
785
+ # Eq 4.3
786
+ b1 = beta ** 2 * lambda0 * theta_e * phi201
787
+ b2 = (1. / lplus - 1. / lminus) * np.sin(2. * de)
788
+ b = b1 * b2
789
+
790
+ # Eq 4.4
791
+ c1 = -beta ** 4 * lambda0 * lambd * theta_e ** 2
792
+ c2 = np.cos(de) ** 2 * np.tanh(xya) / lplus
793
+ c3 = np.sin(de) ** 2 / np.tanh(xya) / lminus
794
+ c = c1 * (c2 + c3)
795
+
796
+ # Put all the pieces together...
797
+ p_coef = e / (bohr * np.pi ** 2 * m_0 * v ** 2)
798
+
799
+ p_v = thickness * mue2 / eps / phi2
800
+
801
+ p_s1 = 2. * theta2 * (eps - 1) ** 2 / phi20 ** 2 / phi2 ** 2 # ASSUMES eta=1
802
+ p_s2 = hbar / momentum
803
+ p_s3 = a + b + c
804
+
805
+ p_s = p_s1 * p_s2 * p_s3
806
+
807
+ # print(p_v.min(),p_v.max(),p_s.min(),p_s.max())
808
+ # Calculate P and p_vol (volume only)
809
+ dtheta = a_data[1] - a_data[0]
810
+ scale = np.sin(np.abs(theta)) * dtheta * 2 * np.pi
811
+
812
+ p = p_coef * np.imag(p_v - p_s) # Eq 4.1
813
+ p_vol = p_coef * np.imag(p_v) * scale
814
+
815
+ # lplus_min = e_data[np.argmin(np.real(lplus), axis=1)]
816
+ # lminus_min = e_data[np.argmin(np.imag(lminus), axis=1)]
817
+
818
+ p_simple = p_coef * np.imag(1 / eps) * thickness / (theta2 + theta_e ** 2) * scale
819
+ # Watch it: eps is conjugated dielectric function
820
+
821
+ return p, p * scale * 1e2, p_vol * 1e2, p_simple * 1e2 # ,lplus_min,lminus_min
822
+
823
+
824
+ #################################################################
825
+ # CORE - LOSS functions
826
+ #################################################################
827
+
828
+ def get_z(z: Union[int, str]) -> int:
829
+ """Returns the atomic number independent of input as a string or number
830
+
831
+ Parameter
832
+ ---------
833
+ z: int, str
834
+ atomic number of chemical symbol (0 if not valid)
835
+ Return:
836
+ ------
837
+ z_out: int
838
+ atomic number
839
+ """
840
+ x_sections = get_x_sections()
841
+
842
+ z_out = 0
843
+ if str(z).isdigit():
844
+ z_out = int(z)
845
+ elif isinstance(z, str):
846
+ for key in x_sections:
847
+ if x_sections[key]['name'].lower() == z.lower(): # Well one really should know how to write elemental
848
+ z_out = int(key)
849
+ else:
850
+ raise TypeError('A string or number is required')
851
+ return z_out
852
+
853
+
854
+ def get_x_sections(z: int=0) -> dict:
855
+ """Reads X-ray fluorescent cross-sections from a dictionary.
856
+
857
+ Parameters
858
+ ----------
859
+ z: int
860
+ atomic number if zero all cross-sections will be returned
861
+
862
+ Returns
863
+ -------
864
+ dictionary
865
+ cross-section of an element or of all elements if z = 0
866
+
867
+ """
868
+ if z < 1:
869
+ return x_sections
870
+ else:
871
+ z = str(z)
872
+ if z in x_sections:
873
+ return x_sections[z]
874
+ else:
875
+ return 0
876
+
877
+
878
+ def list_all_edges(z: Union[str, int]=0, verbose=False)->[str, dict]:
879
+ """List all ionization edges of an element with atomic number z
880
+
881
+ Parameters
882
+ ----------
883
+ z: int
884
+ atomic number
885
+ verbose: bool, optional
886
+ more info if set to True
887
+
888
+ Returns
889
+ -------
890
+ out_string: str
891
+ string with all major edges in energy range
892
+ """
893
+
894
+ element = str(get_z(z))
895
+ x_sections = get_x_sections()
896
+ out_string = ''
897
+ if verbose:
898
+ print('Major edges')
899
+ edge_list = {x_sections[element]['name']: {}}
900
+
901
+ for key in all_edges:
902
+ if key in x_sections[element]:
903
+ if 'onset' in x_sections[element][key]:
904
+ if verbose:
905
+ print(f" {x_sections[element]['name']}-{key}: {x_sections[element][key]['onset']:8.1f} eV ")
906
+ out_string = out_string + f" {x_sections[element]['name']}-{key}: " \
907
+ f"{x_sections[element][key]['onset']:8.1f} eV /n"
908
+ edge_list[x_sections[element]['name']][key] = x_sections[element][key]['onset']
909
+ return out_string, edge_list
910
+
911
+
912
+ def find_all_edges(edge_onset: float, maximal_chemical_shift: float=5.0, major_edges_only: bool=False) -> str:
913
+ """Find all (major and minor) edges within an energy range
914
+
915
+ Parameters
916
+ ----------
917
+ edge_onset: float
918
+ approximate energy of ionization edge
919
+ maximal_chemical_shift: float, default = 5eV
920
+ range of energy window around edge_onset to look for major edges
921
+ major_edges_only: boolean, default = False
922
+ only major edges are considered if True
923
+ Returns
924
+ -------
925
+ text: str
926
+ string with all edges in energy range
927
+
928
+ """
929
+
930
+ text = ''
931
+ x_sections = get_x_sections()
932
+ for element in x_sections:
933
+ for key in x_sections[element]:
934
+ if isinstance(x_sections[element][key], dict):
935
+ if 'onset' in x_sections[element][key]:
936
+ if abs(x_sections[element][key]['onset'] - edge_onset) < maximal_chemical_shift:
937
+ # print(element, x_sections[element]['name'], key, x_sections[element][key]['onset'])
938
+ new_text = f"\n {x_sections[element]['name']:2s}-{key}: " \
939
+ f"{x_sections[element][key]['onset']:8.1f} eV "
940
+ if major_edges_only:
941
+ if key in major_edges:
942
+ text += new_text
943
+ else:
944
+ text += new_text
945
+
946
+ return text
947
+
948
+
949
+ def find_associated_edges(dataset: sidpy.Dataset) -> None:
950
+ onsets = []
951
+ edges = []
952
+ if 'edges' in dataset.metadata:
953
+ for key, edge in dataset.metadata['edges'].items():
954
+ if key.isdigit():
955
+ element = edge['element']
956
+ pre_edge = 0. # edge['onset']-edge['start_exclude']
957
+ post_edge = edge['end_exclude'] - edge['onset']
958
+
959
+ for sym in edge['all_edges']: # TODO: Could be replaced with exclude
960
+ onsets.append(edge['all_edges'][sym]['onset'] + edge['chemical_shift']-pre_edge)
961
+ edges.append([key, f"{element}-{sym}", onsets[-1]])
962
+ for key, peak in dataset.metadata['peak_fit']['peaks'].items():
963
+ if key.isdigit():
964
+ distance = dataset.get_spectral_dims(return_axis=True)[0].values[-1]
965
+ index = -1
966
+ for ii, onset in enumerate(onsets):
967
+ if onset < peak['position'] < onset+post_edge:
968
+ if distance > np.abs(peak['position'] - onset):
969
+ distance = np.abs(peak['position'] - onset) # TODO: check whether absolute is good
970
+ distance_onset = peak['position'] - onset
971
+ index = ii
972
+ if index >= 0:
973
+ peak['associated_edge'] = edges[index][1] # check if more info is necessary
974
+ peak['distance_to_onset'] = distance_onset
975
+
976
+
977
+ def find_white_lines(dataset: sidpy.Dataset) -> None:
978
+ if 'edges' in dataset.metadata:
979
+ white_lines = {}
980
+ for index, peak in dataset.metadata['peak_fit']['peaks'].items():
981
+ if index.isdigit():
982
+ if 'associated_edge' in peak:
983
+ if peak['associated_edge'][-2:] in ['L3', 'L2', 'M5', 'M4']:
984
+ if peak['distance_to_onset'] < 10:
985
+ area = np.sqrt(2 * np.pi) * peak['amplitude'] * np.abs(peak['width']/np.sqrt(2 * np.log(2)))
986
+ if peak['associated_edge'] not in white_lines:
987
+ white_lines[peak['associated_edge']] = 0.
988
+ if area > 0:
989
+ white_lines[peak['associated_edge']] += area # TODO: only positive ones?
990
+ white_line_ratios = {}
991
+ white_line_sum = {}
992
+ for sym, area in white_lines.items():
993
+ if sym[-2:] in ['L2', 'M4', 'M2']:
994
+ if area > 0 and f"{sym[:-1]}{int(sym[-1]) + 1}" in white_lines:
995
+ if white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"] > 0:
996
+ white_line_ratios[f"{sym}/{sym[-2]}{int(sym[-1]) + 1}"] = area / white_lines[
997
+ f"{sym[:-1]}{int(sym[-1]) + 1}"]
998
+ white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] = (
999
+ area + white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"])
1000
+
1001
+ areal_density = 1.
1002
+ if 'edges' in dataset.metadata:
1003
+ for key, edge in dataset.metadata['edges'].items():
1004
+ if key.isdigit():
1005
+ if edge['element'] == sym.split('-')[0]:
1006
+ areal_density = edge['areal_density']
1007
+ break
1008
+ white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] /= areal_density
1009
+
1010
+ dataset.metadata['peak_fit']['white_lines'] = white_lines
1011
+ dataset.metadata['peak_fit']['white_line_ratios'] = white_line_ratios
1012
+ dataset.metadata['peak_fit']['white_line_sums'] = white_line_sum
1013
+
1014
+
1015
+ def second_derivative(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
1016
+ """Calculates second derivative of a sidpy.dataset"""
1017
+
1018
+ dim = dataset.get_spectral_dims()
1019
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1020
+ if dataset.data_type.name == 'SPECTRAL_IMAGE':
1021
+ spectrum = dataset.view.get_spectrum()
1022
+ else:
1023
+ spectrum = np.array(dataset)
1024
+
1025
+ spec = scipy.ndimage.gaussian_filter(spectrum, 3)
1026
+
1027
+ dispersion = get_slope(energy_scale)
1028
+ second_dif = np.roll(spec, -3) - 2 * spec + np.roll(spec, +3)
1029
+ second_dif[:3] = 0
1030
+ second_dif[-3:] = 0
1031
+
1032
+ # find if there is a strong edge at high energy_scale
1033
+ noise_level = 2. * np.std(second_dif[3:50])
1034
+ [indices, _] = scipy.signal.find_peaks(second_dif, noise_level)
1035
+ width = 50 / dispersion
1036
+ if width < 50:
1037
+ width = 50
1038
+ start_end_noise = int(len(energy_scale) - width)
1039
+ for index in indices[::-1]:
1040
+ if index > start_end_noise:
1041
+ start_end_noise = index - 70
1042
+
1043
+ noise_level_start = sensitivity * np.std(second_dif[3:50])
1044
+ noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
1045
+ slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
1046
+ noise_level = noise_level_start + np.arange(len(energy_scale)) * slope
1047
+ return second_dif, noise_level
1048
+
1049
+
1050
+ def find_edges(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
1051
+ """find edges within a sidpy.Dataset"""
1052
+
1053
+ dim = dataset.get_spectral_dims()
1054
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1055
+
1056
+ second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
1057
+
1058
+ [indices, peaks] = scipy.signal.find_peaks(second_dif, noise_level)
1059
+
1060
+ peaks['peak_positions'] = energy_scale[indices]
1061
+ peaks['peak_indices'] = indices
1062
+ edge_energies = [energy_scale[50]]
1063
+ edge_indices = []
1064
+
1065
+ [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
1066
+ minima = energy_scale[indices]
1067
+
1068
+ for peak_number in range(len(peaks['peak_positions'])):
1069
+ position = peaks['peak_positions'][peak_number]
1070
+ if position - edge_energies[-1] > 20:
1071
+ impossible = minima[minima < position]
1072
+ impossible = impossible[impossible > position - 5]
1073
+ if len(impossible) == 0:
1074
+ possible = minima[minima > position]
1075
+ possible = possible[possible < position + 5]
1076
+ if len(possible) > 0:
1077
+ edge_energies.append((position + possible[0])/2)
1078
+ edge_indices.append(np.searchsorted(energy_scale, (position + possible[0])/2))
1079
+
1080
+ selected_edges = []
1081
+ for peak in edge_indices:
1082
+ if 525 < energy_scale[peak] < 533:
1083
+ selected_edges.append('O-K1')
1084
+ else:
1085
+ selected_edge = ''
1086
+ edges = find_all_edges(energy_scale[peak], 20, major_edges_only=True)
1087
+ edges = edges.split('\n')
1088
+ minimum_dist = 100.
1089
+ for edge in edges[1:]:
1090
+ edge = edge[:-3].split(':')
1091
+ name = edge[0].strip()
1092
+ energy = float(edge[1].strip())
1093
+ if np.abs(energy - energy_scale[peak]) < minimum_dist:
1094
+ minimum_dist = np.abs(energy - energy_scale[peak])
1095
+ selected_edge = name
1096
+
1097
+ if selected_edge != '':
1098
+ selected_edges.append(selected_edge)
1099
+
1100
+ return selected_edges
1101
+
1102
+
1103
+ def assign_likely_edges(edge_channels: Union[list, np.ndarray], energy_scale: np.ndarray):
1104
+ edges_in_list = []
1105
+ result = {}
1106
+ for channel in edge_channels:
1107
+ if channel not in edge_channels[edges_in_list]:
1108
+ shift = 5
1109
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1110
+ while len(element_list) < 1:
1111
+ shift += 1
1112
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1113
+
1114
+ if len(element_list) > 1:
1115
+ while len(element_list) > 0:
1116
+ shift-=1
1117
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift, major_edges_only=True)
1118
+ element_list = find_all_edges(energy_scale[channel], maximal_chemical_shift=shift+1, major_edges_only=True)
1119
+ element = (element_list[:4]).strip()
1120
+ z = get_z(element)
1121
+ result[element] =[]
1122
+ _, edge_list = list_all_edges(z)
1123
+
1124
+ for peak in edge_list:
1125
+ for edge in edge_list[peak]:
1126
+ possible_minor_edge = np.argmin(np.abs(energy_scale[edge_channels]-edge_list[peak][edge]))
1127
+ if np.abs(energy_scale[edge_channels[possible_minor_edge]]-edge_list[peak][edge]) < 3:
1128
+ #print('nex', next_e)
1129
+ edges_in_list.append(possible_minor_edge)
1130
+
1131
+ result[element].append(edge)
1132
+
1133
+ return result
1134
+
1135
+
1136
+ def auto_id_edges(dataset):
1137
+ edge_channels = identify_edges(dataset)
1138
+ dim = dataset.get_spectral_dims()
1139
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1140
+ found_edges = assign_likely_edges(edge_channels, energy_scale)
1141
+ return found_edges
1142
+
1143
+
1144
+ def identify_edges(dataset: sidpy.Dataset, noise_level: float=2.0):
1145
+ """
1146
+ Using first derivative to determine edge onsets
1147
+ Any peak in first derivative higher than noise_level times standard deviation will be considered
1148
+
1149
+ Parameters
1150
+ ----------
1151
+ dataset: sidpy.Dataset
1152
+ the spectrum
1153
+ noise_level: float
1154
+ ths number times standard deviation in first derivative decides on whether an edge onset is significant
1155
+
1156
+ Return
1157
+ ------
1158
+ edge_channel: numpy.ndarray
1159
+
1160
+ """
1161
+ dim = dataset.get_spectral_dims()
1162
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1163
+ dispersion = get_slope(energy_scale)
1164
+ spec = scipy.ndimage.gaussian_filter(dataset, 3/dispersion) # smooth with 3eV wideGaussian
1165
+
1166
+ first_derivative = spec - np.roll(spec, +2)
1167
+ first_derivative[:3] = 0
1168
+ first_derivative[-3:] = 0
1169
+
1170
+ # find if there is a strong edge at high energy_scale
1171
+ noise_level = noise_level*np.std(first_derivative[3:50])
1172
+ [edge_channels, _] = scipy.signal.find_peaks(first_derivative, noise_level)
1173
+
1174
+ return edge_channels
1175
+
1176
+
1177
+ def add_element_to_dataset(dataset: sidpy.Dataset, z: Union[int, str]):
1178
+ """
1179
+ """
1180
+ # We check whether this element is already in the
1181
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1182
+
1183
+ zz = get_z(z)
1184
+ if 'edges' not in dataset.metadata:
1185
+ dataset.metadata['edges'] = {'model': {}, 'use_low_loss': False}
1186
+ index = 0
1187
+ for key, edge in dataset.metadata['edges'].items():
1188
+ if key.isdigit():
1189
+ index += 1
1190
+ if 'z' in edge:
1191
+ if zz == edge['z']:
1192
+ index = int(key)
1193
+ break
1194
+
1195
+ major_edge = ''
1196
+ minor_edge = ''
1197
+ all_edges = {}
1198
+ x_section = get_x_sections(zz)
1199
+ edge_start = 10 # int(15./ft.get_slope(self.energy_scale)+0.5)
1200
+ for key in x_section:
1201
+ if len(key) == 2 and key[0] in ['K', 'L', 'M', 'N', 'O'] and key[1].isdigit():
1202
+ if energy_scale[edge_start] < x_section[key]['onset'] < energy_scale[-edge_start]:
1203
+ if key in ['K1', 'L3', 'M5', 'M3']:
1204
+ major_edge = key
1205
+
1206
+ all_edges[key] = {'onset': x_section[key]['onset']}
1207
+
1208
+ if major_edge != '':
1209
+ key = major_edge
1210
+ elif minor_edge != '':
1211
+ key = minor_edge
1212
+ else:
1213
+ print(f'Could not find no edge of {zz} in spectrum')
1214
+ return False
1215
+
1216
+
1217
+ if str(index) not in dataset.metadata['edges']:
1218
+ dataset.metadata['edges'][str(index)] = {}
1219
+
1220
+ start_exclude = x_section[key]['onset'] - x_section[key]['excl before']
1221
+ end_exclude = x_section[key]['onset'] + x_section[key]['excl after']
1222
+
1223
+ dataset.metadata['edges'][str(index)] = {'z': zz, 'symmetry': key, 'element': elements[zz],
1224
+ 'onset': x_section[key]['onset'], 'end_exclude': end_exclude,
1225
+ 'start_exclude': start_exclude}
1226
+ dataset.metadata['edges'][str(index)]['all_edges'] = all_edges
1227
+ dataset.metadata['edges'][str(index)]['chemical_shift'] = 0.0
1228
+ dataset.metadata['edges'][str(index)]['areal_density'] = 0.0
1229
+ dataset.metadata['edges'][str(index)]['original_onset'] = dataset.metadata['edges'][str(index)]['onset']
1230
+ return True
1231
+
1232
+
1233
+ def make_edges(edges_present: dict, energy_scale: np.ndarray, e_0:float, coll_angle:float, low_loss:np.ndarray=None)->dict:
1234
+ """Makes the edges dictionary for quantification
1235
+
1236
+ Parameters
1237
+ ----------
1238
+ edges_present: list
1239
+ list of edges
1240
+ energy_scale: numpy array
1241
+ energy scale on which to make cross-section
1242
+ e_0: float
1243
+ acceleration voltage (in V)
1244
+ coll_angle: float
1245
+ collection angle in mrad
1246
+ low_loss: numpy array with same length as energy_scale
1247
+ low_less spectrum with which to convolve the cross-section (default=None)
1248
+
1249
+ Returns
1250
+ -------
1251
+ edges: dict
1252
+ dictionary with all information on cross-section
1253
+ """
1254
+ x_sections = get_x_sections()
1255
+ edges = {}
1256
+ for i, edge in enumerate(edges_present):
1257
+ element, symmetry = edge.split('-')
1258
+ z = 0
1259
+ for key in x_sections:
1260
+ if element == x_sections[key]['name']:
1261
+ z = int(key)
1262
+ edges[i] = {}
1263
+ edges[i]['z'] = z
1264
+ edges[i]['symmetry'] = symmetry
1265
+ edges[i]['element'] = element
1266
+
1267
+ for key in edges:
1268
+ xsec = x_sections[str(edges[key]['z'])]
1269
+ if 'chemical_shift' not in edges[key]:
1270
+ edges[key]['chemical_shift'] = 0
1271
+ if 'symmetry' not in edges[key]:
1272
+ edges[key]['symmetry'] = 'K1'
1273
+ if 'K' in edges[key]['symmetry']:
1274
+ edges[key]['symmetry'] = 'K1'
1275
+ elif 'L' in edges[key]['symmetry']:
1276
+ edges[key]['symmetry'] = 'L3'
1277
+ elif 'M' in edges[key]['symmetry']:
1278
+ edges[key]['symmetry'] = 'M5'
1279
+ else:
1280
+ edges[key]['symmetry'] = edges[key]['symmetry'][0:2]
1281
+
1282
+ edges[key]['original_onset'] = xsec[edges[key]['symmetry']]['onset']
1283
+ edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
1284
+ edges[key]['start_exclude'] = edges[key]['onset'] - xsec[edges[key]['symmetry']]['excl before']
1285
+ edges[key]['end_exclude'] = edges[key]['onset'] + xsec[edges[key]['symmetry']]['excl after']
1286
+
1287
+ edges = make_cross_sections(edges, energy_scale, e_0, coll_angle, low_loss)
1288
+
1289
+ return edges
1290
+
1291
+ def fit_dataset(dataset: sidpy.Dataset):
1292
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1293
+ if 'fit_area' not in dataset.metadata['edges']:
1294
+ dataset.metadata['edges']['fit_area'] = {}
1295
+ if 'fit_start' not in dataset.metadata['edges']['fit_area']:
1296
+ dataset.metadata['edges']['fit_area']['fit_start'] = energy_scale[50]
1297
+ if 'fit_end' not in dataset.metadata['edges']['fit_area']:
1298
+ dataset.metadata['edges']['fit_area']['fit_end'] = energy_scale[-2]
1299
+ dataset.metadata['edges']['use_low_loss'] = False
1300
+
1301
+ if 'experiment' in dataset.metadata:
1302
+ exp = dataset.metadata['experiment']
1303
+ if 'convergence_angle' not in exp:
1304
+ raise ValueError('need a convergence_angle in experiment of metadata dictionary ')
1305
+ alpha = exp['convergence_angle']
1306
+ beta = exp['collection_angle']
1307
+ beam_kv = exp['acceleration_voltage']
1308
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0]
1309
+ eff_beta = effective_collection_angle(energy_scale, alpha, beta, beam_kv)
1310
+ edges = make_cross_sections(dataset.metadata['edges'], np.array(energy_scale), beam_kv, eff_beta)
1311
+ dataset.metadata['edges'] = fit_edges2(dataset, energy_scale, edges)
1312
+ areal_density = []
1313
+ elements = []
1314
+ for key in edges:
1315
+ if key.isdigit(): # only edges have numbers in that dictionary
1316
+ elements.append(edges[key]['element'])
1317
+ areal_density.append(edges[key]['areal_density'])
1318
+ areal_density = np.array(areal_density)
1319
+ out_string = '\nRelative composition: \n'
1320
+ for i, element in enumerate(elements):
1321
+ out_string += f'{element}: {areal_density[i] / areal_density.sum() * 100:.1f}% '
1322
+
1323
+ print(out_string)
1324
+
1325
+
1326
+ def auto_chemical_composition(dataset:sidpy.Dataset)->None:
1327
+
1328
+ found_edges = auto_id_edges(dataset)
1329
+ for key in found_edges:
1330
+ add_element_to_dataset(dataset, key)
1331
+ fit_dataset(dataset)
1332
+
1333
+
1334
+ def make_cross_sections(edges:dict, energy_scale:np.ndarray, e_0:float, coll_angle:float, low_loss:np.ndarray=None)->dict:
1335
+ """Updates the edges dictionary with collection angle-integrated X-ray photo-absorption cross-sections
1336
+
1337
+ """
1338
+ for key in edges:
1339
+ if str(key).isdigit():
1340
+ edges[key]['data'] = xsec_xrpa(energy_scale, e_0 / 1000., edges[key]['z'], coll_angle,
1341
+ edges[key]['chemical_shift']) / 1e10 # from barnes to 1/nm^2
1342
+ if low_loss is not None:
1343
+ low_loss = np.roll(np.array(low_loss), 1024 - np.argmax(np.array(low_loss)))
1344
+ edges[key]['data'] = scipy.signal.convolve(edges[key]['data'], low_loss/low_loss.sum(), mode='same')
1345
+
1346
+ edges[key]['onset'] = edges[key]['original_onset'] + edges[key]['chemical_shift']
1347
+ edges[key]['X_section_type'] = 'XRPA'
1348
+ edges[key]['X_section_source'] = 'pyTEMlib'
1349
+
1350
+ return edges
1351
+
1352
+
1353
+ def power_law(energy: np.ndarray, a:float, r:float)->np.ndarray:
1354
+ """power law for power_law_background"""
1355
+ return a * np.power(energy, -r)
1356
+
1357
+
1358
+ def power_law_background(spectrum:np.ndarray, energy_scale:np.ndarray, fit_area:list, verbose:bool=False):
1359
+ """fit of power law to spectrum """
1360
+
1361
+ # Determine energy window for background fit in pixels
1362
+ startx = np.searchsorted(energy_scale, fit_area[0])
1363
+ endx = np.searchsorted(energy_scale, fit_area[1])
1364
+
1365
+ x = np.array(energy_scale)[startx:endx]
1366
+ y = np.array(spectrum)[startx:endx].flatten()
1367
+
1368
+ # Initial values of parameters
1369
+ p0 = np.array([1.0E+20, 3])
1370
+
1371
+ # background fitting
1372
+ def bgdfit(pp, yy, xx):
1373
+ err = yy - power_law(xx, pp[0], pp[1])
1374
+ return err
1375
+
1376
+ [p, _] = leastsq(bgdfit, p0, args=(y, x), maxfev=2000)
1377
+
1378
+ background_difference = y - power_law(x, p[0], p[1])
1379
+ background_noise_level = std_dev = np.std(background_difference)
1380
+ if verbose:
1381
+ print(f'Power-law background with amplitude A: {p[0]:.1f} and exponent -r: {p[1]:.2f}')
1382
+ print(background_difference.max() / background_noise_level)
1383
+
1384
+ print(f'Noise level in spectrum {std_dev:.3f} counts')
1385
+
1386
+ # Calculate background over the whole energy scale
1387
+ background = power_law(energy_scale, p[0], p[1])
1388
+ return background, p
1389
+
1390
+
1391
+ def cl_model(x, p, number_of_edges, xsec):
1392
+ """ core loss model for fitting"""
1393
+ y = (p[9] * np.power(x, (-p[10]))) + p[7] * x + p[8] * x * x
1394
+ for i in range(number_of_edges):
1395
+ y = y + p[i] * xsec[i, :]
1396
+ return y
1397
+
1398
+
1399
+ def fit_edges2(spectrum, energy_scale, edges):
1400
+ """fit edges for quantification"""
1401
+
1402
+ dispersion = energy_scale[1] - energy_scale[0]
1403
+ # Determine fitting ranges and masks to exclude ranges
1404
+ mask = np.ones(len(spectrum))
1405
+
1406
+ background_fit_start = edges['fit_area']['fit_start']
1407
+ if edges['fit_area']['fit_end'] > energy_scale[-1]:
1408
+ edges['fit_area']['fit_end'] = energy_scale[-1]
1409
+ background_fit_end = edges['fit_area']['fit_end']
1410
+
1411
+ startx = np.searchsorted(energy_scale, background_fit_start)
1412
+ endx = np.searchsorted(energy_scale, background_fit_end)
1413
+ mask[0:startx] = 0.0
1414
+ mask[endx:-1] = 0.0
1415
+ for key in edges:
1416
+ if key.isdigit():
1417
+ if edges[key]['start_exclude'] > background_fit_start + dispersion:
1418
+ if edges[key]['start_exclude'] < background_fit_end - dispersion * 2:
1419
+ if edges[key]['end_exclude'] > background_fit_end - dispersion:
1420
+ # we need at least one channel to fit.
1421
+ edges[key]['end_exclude'] = background_fit_end - dispersion
1422
+ startx = np.searchsorted(energy_scale, edges[key]['start_exclude'])
1423
+ if startx < 2:
1424
+ startx = 1
1425
+ endx = np.searchsorted(energy_scale, edges[key]['end_exclude'])
1426
+ mask[startx: endx] = 0.0
1427
+
1428
+ ########################
1429
+ # Background Fit
1430
+ ########################
1431
+ bgd_fit_area = [background_fit_start, background_fit_end]
1432
+ background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
1433
+
1434
+ #######################
1435
+ # Edge Fit
1436
+ #######################
1437
+ x = energy_scale
1438
+ blurred = gaussian_filter(spectrum, sigma=5)
1439
+
1440
+ y = blurred # now in probability
1441
+ y[np.where(y < 1e-8)] = 1e-8
1442
+
1443
+ xsec = []
1444
+ number_of_edges = 0
1445
+ for key in edges:
1446
+ if key.isdigit():
1447
+ xsec.append(edges[key]['data'])
1448
+ number_of_edges += 1
1449
+ xsec = np.array(xsec)
1450
+
1451
+
1452
+ def model(xx, pp):
1453
+ yy = pp[0] * xx**pp[1] + pp[2] + pp[3]* xx + pp[4] * xx * xx
1454
+ for i in range(number_of_edges):
1455
+ pp[i+5] = np.abs(pp[i+5])
1456
+ yy = yy + pp[i+5] * xsec[i, :]
1457
+ return yy
1458
+
1459
+ def residuals(pp, xx, yy):
1460
+ err = np.abs((yy - model(xx, pp)) * mask) / np.sqrt(np.abs(y))
1461
+ return err
1462
+
1463
+ scale = y[100]
1464
+ pin = np.array([A,-r, 10., 1., 0.00] + [scale/5] * number_of_edges)
1465
+ [p, _] = leastsq(residuals, pin, args=(x, y))
1466
+
1467
+ for key in edges:
1468
+ if key.isdigit():
1469
+ edges[key]['areal_density'] = p[int(key)+5]
1470
+ print(p)
1471
+ edges['model'] = {}
1472
+ edges['model']['background'] = ( p[0] * np.power(x, -p[1])+ p[2]+ x**p[3] + p[4] * x * x)
1473
+ edges['model']['background-poly_0'] = p[2]
1474
+ edges['model']['background-poly_1'] = p[3]
1475
+ edges['model']['background-poly_2'] = p[4]
1476
+ edges['model']['background-A'] = p[0]
1477
+ edges['model']['background-r'] = p[1]
1478
+ edges['model']['spectrum'] = model(x, p)
1479
+ edges['model']['blurred'] = blurred
1480
+ edges['model']['mask'] = mask
1481
+ edges['model']['fit_parameter'] = p
1482
+ edges['model']['fit_area_start'] = edges['fit_area']['fit_start']
1483
+ edges['model']['fit_area_end'] = edges['fit_area']['fit_end']
1484
+
1485
+ return edges
1486
+
1487
+
1488
+ def fit_edges(spectrum, energy_scale, region_tags, edges):
1489
+ """fit edges for quantification"""
1490
+
1491
+ # Determine fitting ranges and masks to exclude ranges
1492
+ mask = np.ones(len(spectrum))
1493
+
1494
+ background_fit_end = energy_scale[-1]
1495
+ for key in region_tags:
1496
+ end = region_tags[key]['start_x'] + region_tags[key]['width_x']
1497
+
1498
+ startx = np.searchsorted(energy_scale, region_tags[key]['start_x'])
1499
+ endx = np.searchsorted(energy_scale, end)
1500
+
1501
+ if key == 'fit_area':
1502
+ mask[0:startx] = 0.0
1503
+ mask[endx:-1] = 0.0
1504
+ else:
1505
+ mask[startx:endx] = 0.0
1506
+ if region_tags[key]['start_x'] < background_fit_end: # Which is the onset of the first edge?
1507
+ background_fit_end = region_tags[key]['start_x']
1508
+
1509
+ ########################
1510
+ # Background Fit
1511
+ ########################
1512
+ bgd_fit_area = [region_tags['fit_area']['start_x'], background_fit_end]
1513
+ background, [A, r] = power_law_background(spectrum, energy_scale, bgd_fit_area, verbose=False)
1514
+
1515
+ #######################
1516
+ # Edge Fit
1517
+ #######################
1518
+ x = energy_scale
1519
+ blurred = gaussian_filter(spectrum, sigma=5)
1520
+
1521
+ y = blurred # now in probability
1522
+ y[np.where(y < 1e-8)] = 1e-8
1523
+
1524
+ xsec = []
1525
+ number_of_edges = 0
1526
+ for key in edges:
1527
+ if key.isdigit():
1528
+ xsec.append(edges[key]['data'])
1529
+ number_of_edges += 1
1530
+ xsec = np.array(xsec)
1531
+
1532
+ def model(xx, pp):
1533
+ yy = background + pp[6] + pp[7] * xx + pp[8] * xx * xx
1534
+ for i in range(number_of_edges):
1535
+ pp[i] = np.abs(pp[i])
1536
+ yy = yy + pp[i] * xsec[i, :]
1537
+ return yy
1538
+
1539
+ def residuals(pp, xx, yy):
1540
+ err = np.abs((yy - model(xx, pp)) * mask) # / np.sqrt(np.abs(y))
1541
+ return err
1542
+
1543
+ scale = y[100]
1544
+ pin = np.array([scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, scale / 5, -scale / 10, 1.0, 0.001])
1545
+ [p, _] = leastsq(residuals, pin, args=(x, y))
1546
+
1547
+ for key in edges:
1548
+ if key.isdigit():
1549
+ edges[key]['areal_density'] = p[int(key) - 1]
1550
+
1551
+ edges['model'] = {}
1552
+ edges['model']['background'] = (background + p[6] + p[7] * x + p[8] * x * x)
1553
+ edges['model']['background-poly_0'] = p[6]
1554
+ edges['model']['background-poly_1'] = p[7]
1555
+ edges['model']['background-poly_2'] = p[8]
1556
+ edges['model']['background-A'] = A
1557
+ edges['model']['background-r'] = r
1558
+ edges['model']['spectrum'] = model(x, p)
1559
+ edges['model']['blurred'] = blurred
1560
+ edges['model']['mask'] = mask
1561
+ edges['model']['fit_parameter'] = p
1562
+ edges['model']['fit_area_start'] = region_tags['fit_area']['start_x']
1563
+ edges['model']['fit_area_end'] = region_tags['fit_area']['start_x'] + region_tags['fit_area']['width_x']
1564
+
1565
+ return edges
1566
+
1567
+
1568
+
1569
+ def get_spectrum(dataset, x=0, y=0, bin_x=1, bin_y=1):
1570
+ """
1571
+ Parameter
1572
+ ---------
1573
+ dataset: sidpy.Dataset object
1574
+ contains spectrum or spectrum image
1575
+ x: int default = 0
1576
+ x position of spectrum image
1577
+ y: int default = 0
1578
+ y position of spectrum
1579
+ bin_x: int default = 1
1580
+ binning of spectrum image in x-direction
1581
+ bin_y: int default = 1
1582
+ binning of spectrum image in y-direction
1583
+
1584
+ Returns:
1585
+ --------
1586
+ spectrum: sidpy.Dataset object
1587
+
1588
+ """
1589
+ if dataset.data_type.name == 'SPECTRUM':
1590
+ spectrum = dataset.copy()
1591
+ else:
1592
+ image_dims = dataset.get_image_dims()
1593
+ if x > dataset.shape[image_dims[0]] - bin_x:
1594
+ x = dataset.shape[image_dims[0]] - bin_x
1595
+ if y > dataset.shape[image_dims[1]] - bin_y:
1596
+ y = dataset.shape[image_dims[1]] - bin_y
1597
+ selection = []
1598
+ dimensions = dataset.get_dimension_types()
1599
+ for dim, dimension_type in enumerate(dimensions):
1600
+ # print(dim, axis.dimension_type)
1601
+ if dimension_type == 'SPATIAL':
1602
+ if dim == image_dims[0]:
1603
+ selection.append(slice(x, x + bin_x))
1604
+ else:
1605
+ selection.append(slice(y, y + bin_y))
1606
+ elif dimension_type == 'SPECTRAL':
1607
+ selection.append(slice(None))
1608
+ elif dimension_type == 'CHANNEL':
1609
+ selection.append(slice(None))
1610
+ else:
1611
+ selection.append(slice(0, 1))
1612
+
1613
+ spectrum = dataset[tuple(selection)].mean(axis=tuple(image_dims))
1614
+ spectrum.squeeze().compute()
1615
+ spectrum.data_type = 'Spectrum'
1616
+ return spectrum
1617
+
1618
+ def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
1619
+ """find peaks in spectrum"""
1620
+
1621
+ if dataset.data_type.name == 'SPECTRAL_IMAGE':
1622
+ spectrum = dataset.view.get_spectrum()
1623
+ else:
1624
+ spectrum = np.array(dataset)
1625
+
1626
+ energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
1627
+
1628
+ second_dif, noise_level = second_derivative(dataset, sensitivity=sensitivity)
1629
+ [indices, _] = scipy.signal.find_peaks(-second_dif, noise_level)
1630
+
1631
+ start_channel = np.searchsorted(energy_scale, fit_start)
1632
+ end_channel = np.searchsorted(energy_scale, fit_end)
1633
+ peaks = []
1634
+ for index in indices:
1635
+ if start_channel < index < end_channel:
1636
+ peaks.append(index - start_channel)
1637
+
1638
+ if 'model' in dataset.metadata:
1639
+ model = dataset.metadata['model'][start_channel:end_channel]
1640
+
1641
+ elif energy_scale[0] > 0:
1642
+ if 'edges' not in dataset.metadata:
1643
+ return
1644
+ if 'model' not in dataset.metadata['edges']:
1645
+ return
1646
+ model = dataset.metadata['edges']['model']['spectrum'][start_channel:end_channel]
1647
+
1648
+ else:
1649
+ model = np.zeros(end_channel - start_channel)
1650
+
1651
+ energy_scale = energy_scale[start_channel:end_channel]
1652
+
1653
+ difference = np.array(spectrum)[start_channel:end_channel] - model
1654
+ fit = np.zeros(len(energy_scale))
1655
+ p_out = []
1656
+ if len(peaks) > 0:
1657
+ p_in = np.ravel([[energy_scale[i], difference[i], .7] for i in peaks])
1658
+ [p_out, _] = scipy.optimize.leastsq(residuals_smooth, p_in, ftol=1e-3, args=(energy_scale,
1659
+ difference,
1660
+ False))
1661
+ fit = fit + model_smooth(energy_scale, p_out, False)
1662
+
1663
+ peak_model = np.zeros(len(spectrum))
1664
+ peak_model[start_channel:end_channel] = fit
1665
+
1666
+ return peak_model, p_out
1667
+
1668
+
1669
+ def find_maxima(y, number_of_peaks):
1670
+ """ find the first most prominent peaks
1671
+
1672
+ peaks are then sorted by energy
1673
+
1674
+ Parameters
1675
+ ----------
1676
+ y: numpy array
1677
+ (part) of spectrum
1678
+ number_of_peaks: int
1679
+
1680
+ Returns
1681
+ -------
1682
+ numpy array
1683
+ indices of peaks
1684
+ """
1685
+ blurred2 = gaussian_filter(y, sigma=2)
1686
+ peaks, _ = scipy.signal.find_peaks(blurred2)
1687
+ prominences = peak_prominences(blurred2, peaks)[0]
1688
+ prominences_sorted = np.argsort(prominences)
1689
+ peaks = peaks[prominences_sorted[-number_of_peaks:]]
1690
+
1691
+ peak_indices = np.argsort(peaks)
1692
+ return peaks[peak_indices]
1693
+
1694
+
1695
+ #
1696
+ def model3(x, p, number_of_peaks, peak_shape, p_zl, pin=None, restrict_pos=0, restrict_width=0):
1697
+ """ model for fitting low-loss spectrum"""
1698
+ if pin is None:
1699
+ pin = p
1700
+
1701
+ # if len([restrict_pos]) == 1:
1702
+ # restrict_pos = [restrict_pos]*number_of_peaks
1703
+ # if len([restrict_width]) == 1:
1704
+ # restrict_width = [restrict_width]*number_of_peaks
1705
+ y = np.zeros(len(x))
1706
+
1707
+ for i in range(number_of_peaks):
1708
+ index = int(i * 3)
1709
+ if restrict_pos > 0:
1710
+ if p[index] > pin[index] * (1.0 + restrict_pos):
1711
+ p[index] = pin[index] * (1.0 + restrict_pos)
1712
+ if p[index] < pin[index] * (1.0 - restrict_pos):
1713
+ p[index] = pin[index] * (1.0 - restrict_pos)
1714
+
1715
+ p[index + 1] = abs(p[index + 1])
1716
+ # print(p[index + 1])
1717
+ p[index + 2] = abs(p[index + 2])
1718
+ if restrict_width > 0:
1719
+ if p[index + 2] > pin[index + 2] * (1.0 + restrict_width):
1720
+ p[index + 2] = pin[index + 2] * (1.0 + restrict_width)
1721
+
1722
+ if peak_shape[i] == 'Lorentzian':
1723
+ y = y + lorentz(x, p[index:])
1724
+ elif peak_shape[i] == 'zl':
1725
+
1726
+ y = y + zl(x, p[index:], p_zl)
1727
+ else:
1728
+ y = y + gauss(x, p[index:])
1729
+ return y
1730
+
1731
+
1732
+ def sort_peaks(p, peak_shape):
1733
+ """sort fitting parameters by peak position"""
1734
+ number_of_peaks = int(len(p) / 3)
1735
+ p3 = np.reshape(p, (number_of_peaks, 3))
1736
+ sort_pin = np.argsort(p3[:, 0])
1737
+
1738
+ p = p3[sort_pin].flatten()
1739
+ peak_shape = np.array(peak_shape)[sort_pin].tolist()
1740
+
1741
+ return p, peak_shape
1742
+
1743
+
1744
+ def add_peaks(x, y, peaks, pin_in=None, peak_shape_in=None, shape='Gaussian'):
1745
+ """ add peaks to fitting parameters"""
1746
+ if pin_in is None:
1747
+ return
1748
+ if peak_shape_in is None:
1749
+ return
1750
+
1751
+ pin = pin_in.copy()
1752
+
1753
+ peak_shape = peak_shape_in.copy()
1754
+ if isinstance(shape, str): # if peak_shape is only a string make a list of it.
1755
+ shape = [shape]
1756
+
1757
+ if len(shape) == 1:
1758
+ shape = shape * len(peaks)
1759
+ for i, peak in enumerate(peaks):
1760
+ pin.append(x[peak])
1761
+ pin.append(y[peak])
1762
+ pin.append(.3)
1763
+ peak_shape.append(shape[i])
1764
+
1765
+ return pin, peak_shape
1766
+
1767
+
1768
+ def fit_model(x, y, pin, number_of_peaks, peak_shape, p_zl, restrict_pos=0, restrict_width=0):
1769
+ """model for fitting low-loss spectrum"""
1770
+
1771
+ pin_original = pin.copy()
1772
+
1773
+ def residuals3(pp, xx, yy):
1774
+ err = (yy - model3(xx, pp, number_of_peaks, peak_shape, p_zl, pin_original, restrict_pos,
1775
+ restrict_width)) / np.sqrt(np.abs(yy))
1776
+ return err
1777
+
1778
+ [p, _] = leastsq(residuals3, pin, args=(x, y))
1779
+ # p2 = p.tolist()
1780
+ # p3 = np.reshape(p2, (number_of_peaks, 3))
1781
+ # sort_pin = np.argsort(p3[:, 0])
1782
+
1783
+ # p = p3[sort_pin].flatten()
1784
+ # peak_shape = np.array(peak_shape)[sort_pin].tolist()
1785
+
1786
+ return p, peak_shape
1787
+
1788
+
1789
+
1790
+ def plot_dispersion(plotdata, units, a_data, e_data, title, max_p, ee, ef=4., ep=16.8, es=0, ibt=[]):
1791
+ """Plot loss function """
1792
+
1793
+ [x, y] = np.meshgrid(e_data + 1e-12, a_data[1024:2048] * 1000)
1794
+
1795
+ z = plotdata
1796
+ lev = np.array([0.01, 0.05, 0.1, 0.25, 0.5, 1, 2, 3, 4, 4.9]) * max_p / 5
1797
+
1798
+ wavelength = get_wave_length(ee)
1799
+ q = a_data[1024:2048] / (wavelength * 1e9) # in [1/nm]
1800
+ scale = np.array([0, a_data[-1], e_data[0], e_data[-1]])
1801
+ ev2hertz = constants.value('electron volt-hertz relationship')
1802
+
1803
+ if units[0] == 'mrad':
1804
+ units[0] = 'scattering angle [mrad]'
1805
+ scale[1] = scale[1] * 1000.
1806
+ light_line = constants.c * a_data # for mrad
1807
+ elif units[0] == '1/nm':
1808
+ units[0] = 'scattering vector [1/nm]'
1809
+ scale[1] = scale[1] / (wavelength * 1e9)
1810
+ light_line = 1 / (constants.c / ev2hertz) * 1e-9
1811
+
1812
+ if units[1] == 'eV':
1813
+ units[1] = 'energy loss [eV]'
1814
+
1815
+ if units[2] == 'ppm':
1816
+ units[2] = 'probability [ppm]'
1817
+ if units[2] == '1/eV':
1818
+ units[2] = 'probability [eV$^{-1}$ srad$^{-1}$]'
1819
+
1820
+ alpha = 3. / 5. * ef / ep
1821
+
1822
+ ax2 = plt.gca()
1823
+ fig2 = plt.gcf()
1824
+ im = ax2.imshow(z.T, clim=(0, max_p), origin='lower', aspect='auto', extent=scale)
1825
+ co = ax2.contour(y, x, z, levels=lev, colors='k', origin='lower')
1826
+ # ,extent=(-ang*1000.,ang*1000.,e_data[0],e_data[-1]))#, vmin = p_vol.min(), vmax = 1000)
1827
+
1828
+ fig2.colorbar(im, ax=ax2, label=units[2])
1829
+
1830
+ ax2.plot(a_data, light_line, c='r', label='light line')
1831
+ # ax2.plot(e_data*light_line*np.sqrt(np.real(eps_data)),e_data, color='steelblue',
1832
+ # label='$\omega = c q \sqrt{\epsilon_2}$')
1833
+
1834
+ # ax2.plot(q, Ep_disp, c='r')
1835
+ ax2.plot([11.5 * light_line, 0.12], [11.5, 11.5], c='r')
1836
+
1837
+ ax2.text(.05, 11.7, 'surface plasmon', color='r')
1838
+ ax2.plot([0.0, 0.12], [16.8, 16.8], c='r')
1839
+ ax2.text(.05, 17, 'volume plasmon', color='r')
1840
+ ax2.set_xlim(0, scale[1])
1841
+ ax2.set_ylim(0, 20)
1842
+ # Interband transitions
1843
+ ax2.plot([0.0, 0.25], [4.2, 4.2], c='g', label='interband transitions')
1844
+ ax2.plot([0.0, 0.25], [5.2, 5.2], c='g')
1845
+ ax2.set_ylabel(units[1])
1846
+ ax2.set_xlabel(units[0])
1847
+ ax2.legend(loc='lower right')
1848
+
1849
+
1850
+ def xsec_xrpa(energy_scale, e0, z, beta, shift=0):
1851
+ """ Calculate momentum-integrated cross-section for EELS from X-ray photo-absorption cross-sections.
1852
+
1853
+ X-ray photo-absorption cross-sections from NIST.
1854
+ Momentum-integrated cross-section for EELS according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
1855
+
1856
+ Parameters
1857
+ ----------
1858
+ energy_scale: numpy array
1859
+ energy scale of spectrum to be analyzed
1860
+ e0: float
1861
+ acceleration voltage in keV
1862
+ z: int
1863
+ atomic number of element
1864
+ beta: float
1865
+ effective collection angle in mrad
1866
+ shift: float
1867
+ chemical shift of edge in eV
1868
+ """
1869
+ beta = beta * 0.001 # collection half angle theta [rad]
1870
+ # theta_max = self.parent.spec[0].convAngle * 0.001 # collection half angle theta [rad]
1871
+ dispersion = energy_scale[1] - energy_scale[0]
1872
+
1873
+ x_sections = get_x_sections(z)
1874
+ enexs = x_sections['ene']
1875
+ datxs = x_sections['dat']
1876
+
1877
+ # enexs = enexs[:len(datxs)]
1878
+
1879
+ #####
1880
+ # Cross Section according to Egerton Ultramicroscopy 50 (1993) 13-28 equation (4)
1881
+ #####
1882
+
1883
+ # Relativistic correction factors
1884
+ t = 511060.0 * (1.0 - 1.0 / (1.0 + e0 / 511.06) ** 2) / 2.0
1885
+ gamma = 1 + e0 / 511.06
1886
+ a = 6.5 # e-14 *10**14
1887
+ b = beta
1888
+
1889
+ theta_e = enexs / (2 * gamma * t)
1890
+
1891
+ g = 2 * np.log(gamma) - np.log((b ** 2 + theta_e ** 2) / (b ** 2 + theta_e ** 2 / gamma ** 2)) - (
1892
+ gamma - 1) * b ** 2 / (b ** 2 + theta_e ** 2 / gamma ** 2)
1893
+ datxs = datxs * (a / enexs / t) * (np.log(1 + b ** 2 / theta_e ** 2) + g) / 1e8
1894
+
1895
+ datxs = datxs * dispersion # from per eV to per dispersion
1896
+ coeff = splrep(enexs, datxs, s=0) # now in areal density atoms / m^2
1897
+ xsec = np.zeros(len(energy_scale))
1898
+ # shift = 0# int(ek -onsetXRPS)#/dispersion
1899
+ lin = interp1d(enexs, datxs, kind='linear') # Linear instead of spline interpolation to avoid oscillations.
1900
+ if energy_scale[0] < enexs[0]:
1901
+ start = np.searchsorted(energy_scale, enexs[0])+1
1902
+ else:
1903
+ start = 0
1904
+ xsec[start:] = lin(energy_scale[start:] - shift)
1905
+
1906
+ return xsec
1907
+
1908
+
1909
+ ##########################
1910
+ # EELS Database
1911
+ ##########################
1912
+
1913
+
1914
+ def read_msa(msa_string):
1915
+ """read msa formated file"""
1916
+ parameters = {}
1917
+ y = []
1918
+ x = []
1919
+ # Read the keywords
1920
+ data_section = False
1921
+ msa_lines = msa_string.split('\n')
1922
+
1923
+ for line in msa_lines:
1924
+ if data_section is False:
1925
+ if len(line) > 0:
1926
+ if line[0] == "#":
1927
+ try:
1928
+ key, value = line.split(': ')
1929
+ value = value.strip()
1930
+ except ValueError:
1931
+ key = line
1932
+ value = None
1933
+ key = key.strip('#').strip()
1934
+
1935
+ if key != 'SPECTRUM':
1936
+ parameters[key] = value
1937
+ else:
1938
+ data_section = True
1939
+ else:
1940
+ # Read the data
1941
+
1942
+ if len(line) > 0 and line[0] != "#" and line.strip():
1943
+ if parameters['DATATYPE'] == 'XY':
1944
+ xy = line.replace(',', ' ').strip().split()
1945
+ y.append(float(xy[1]))
1946
+ x.append(float(xy[0]))
1947
+ elif parameters['DATATYPE'] == 'Y':
1948
+ print('y')
1949
+ data = [
1950
+ float(i) for i in line.replace(',', ' ').strip().split()]
1951
+ y.extend(data)
1952
+ parameters['data'] = np.array(y)
1953
+ if 'XPERCHAN' in parameters:
1954
+ parameters['XPERCHAN'] = str(parameters['XPERCHAN']).split(' ')[0]
1955
+ parameters['OFFSET'] = str(parameters['OFFSET']).split(' ')[0]
1956
+ parameters['energy_scale'] = np.arange(len(y)) * float(parameters['XPERCHAN']) + float(parameters['OFFSET'])
1957
+ return parameters
1958
+
1959
+
1960
+ def get_spectrum_eels_db(formula=None, edge=None, title=None, element=None):
1961
+ """
1962
+ get spectra from EELS database
1963
+ chemical formula and edge is accepted.
1964
+ Could expose more of the search parameters
1965
+ """
1966
+ valid_edges = ['K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5']
1967
+ if edge is not None and edge not in valid_edges:
1968
+ print('edge should be a in ', valid_edges)
1969
+
1970
+ spectrum_type = None
1971
+ title = title
1972
+ author = None
1973
+ element = element
1974
+ min_energy = None
1975
+ max_energy = None
1976
+ resolution = None
1977
+ min_energy_compare = "gt"
1978
+ max_energy_compare = "lt",
1979
+ resolution_compare = "lt"
1980
+ max_n = -1
1981
+ monochromated = None
1982
+ order = None
1983
+ order_direction = "ASC"
1984
+ verify_certificate = True
1985
+ # Verify arguments
1986
+
1987
+ if spectrum_type is not None and spectrum_type not in {'coreloss', 'lowloss', 'zeroloss', 'xrayabs'}:
1988
+ raise ValueError("spectrum_type must be one of \'coreloss\', \'lowloss\', "
1989
+ "\'zeroloss\', \'xrayabs\'.")
1990
+ # valid_edges = ['K', 'L1', 'L2,3', 'M2,3', 'M4,5', 'N2,3', 'N4,5', 'O2,3', 'O4,5']
1991
+
1992
+ params = {
1993
+ "type": spectrum_type,
1994
+ "title": title,
1995
+ "author": author,
1996
+ "edge": edge,
1997
+ "min_energy": min_energy,
1998
+ "max_energy": max_energy,
1999
+ "resolution": resolution,
2000
+ "resolution_compare": resolution_compare,
2001
+ "monochromated": monochromated,
2002
+ "formula": formula,
2003
+ 'element': element,
2004
+ "min_energy_compare": min_energy_compare,
2005
+ "max_energy_compare": max_energy_compare,
2006
+ "per_page": max_n,
2007
+ "order": order,
2008
+ "order_direction": order_direction,
2009
+ }
2010
+
2011
+ request = requests.get('http://api.eelsdb.eu/spectra', params=params, verify=True)
2012
+ # spectra = []
2013
+ jsons = request.json()
2014
+ if "message" in jsons:
2015
+ # Invalid query, EELSdb raises error.
2016
+ raise IOError(
2017
+ "Please report the following error to the HyperSpy developers: "
2018
+ "%s" % jsons["message"])
2019
+ reference_spectra = {}
2020
+ for json_spectrum in jsons:
2021
+ download_link = json_spectrum['download_link']
2022
+ # print(download_link)
2023
+ msa_string = requests.get(download_link, verify=verify_certificate).text
2024
+ # print(msa_string[:100])
2025
+ parameters = read_msa(msa_string)
2026
+ if 'XPERCHAN' in parameters:
2027
+ reference_spectra[parameters['TITLE']] = parameters
2028
+ print(parameters['TITLE'])
2029
+ print(f'found {len(reference_spectra.keys())} spectra in EELS database)')
2030
+
2031
+ return reference_spectra