pyTEMlib 0.2024.8.4__py3-none-any.whl → 0.2025.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyTEMlib might be problematic. Click here for more details.
- pyTEMlib/animation.py +1 -1
- pyTEMlib/atom_tools.py +2 -1
- pyTEMlib/core_loss_widget.py +337 -272
- pyTEMlib/eels_dialog.py +15 -10
- pyTEMlib/eels_tools.py +452 -125
- pyTEMlib/file_tools.py +314 -30
- pyTEMlib/image_tools.py +91 -15
- pyTEMlib/info_widget.py +211 -58
- pyTEMlib/info_widget3.py +1120 -0
- pyTEMlib/low_loss_widget.py +344 -41
- pyTEMlib/peak_dialog.py +141 -59
- pyTEMlib/probe_tools.py +65 -8
- pyTEMlib/version.py +2 -2
- {pyTEMlib-0.2024.8.4.dist-info → pyTEMlib-0.2025.2.1.dist-info}/METADATA +16 -7
- {pyTEMlib-0.2024.8.4.dist-info → pyTEMlib-0.2025.2.1.dist-info}/RECORD +19 -18
- {pyTEMlib-0.2024.8.4.dist-info → pyTEMlib-0.2025.2.1.dist-info}/WHEEL +1 -2
- {pyTEMlib-0.2024.8.4.dist-info → pyTEMlib-0.2025.2.1.dist-info}/LICENSE +0 -0
- {pyTEMlib-0.2024.8.4.dist-info → pyTEMlib-0.2025.2.1.dist-info}/entry_points.txt +0 -0
- {pyTEMlib-0.2024.8.4.dist-info → pyTEMlib-0.2025.2.1.dist-info}/top_level.txt +0 -0
pyTEMlib/eels_tools.py
CHANGED
|
@@ -33,6 +33,8 @@ from scipy.signal import peak_prominences
|
|
|
33
33
|
from scipy.ndimage import gaussian_filter
|
|
34
34
|
from scipy.optimize import curve_fit, leastsq
|
|
35
35
|
|
|
36
|
+
from numba import jit, float64
|
|
37
|
+
|
|
36
38
|
import requests
|
|
37
39
|
|
|
38
40
|
# ## And we use the image tool library of pyTEMlib
|
|
@@ -247,7 +249,7 @@ def model_smooth(x, p, only_positive_intensity=False):
|
|
|
247
249
|
|
|
248
250
|
return y
|
|
249
251
|
|
|
250
|
-
|
|
252
|
+
@jit
|
|
251
253
|
def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
|
|
252
254
|
"""Gaussian Function
|
|
253
255
|
|
|
@@ -267,6 +269,8 @@ def lorentz(x, center, amplitude, width):
|
|
|
267
269
|
lorentz_peak = 0.5 * width / np.pi / ((x - center) ** 2 + (width / 2) ** 2)
|
|
268
270
|
return amplitude * lorentz_peak / lorentz_peak.max()
|
|
269
271
|
|
|
272
|
+
def zero_loss_function(x, p):
|
|
273
|
+
return zl_func(x, *p)
|
|
270
274
|
|
|
271
275
|
def zl_func(x, center1, amplitude1, width1, center2, amplitude2, width2):
|
|
272
276
|
""" zero loss function as product of two lorentzians """
|
|
@@ -319,9 +323,9 @@ def get_zero_loss_energy(dataset):
|
|
|
319
323
|
start = startx - i
|
|
320
324
|
if spectrum[startx + i] < 0.3 * spectrum[startx]:
|
|
321
325
|
end = startx + i
|
|
322
|
-
if end - start <
|
|
323
|
-
end = startx +
|
|
324
|
-
start = startx -
|
|
326
|
+
if end - start < 7:
|
|
327
|
+
end = startx + 4
|
|
328
|
+
start = startx - 4
|
|
325
329
|
width = int((end-start)/2+0.5)
|
|
326
330
|
|
|
327
331
|
energy = dataset.get_spectral_dims(return_axis=True)[0].values
|
|
@@ -393,6 +397,15 @@ def align_zero_loss(dataset: sidpy.Dataset) -> sidpy.Dataset:
|
|
|
393
397
|
new_si.metadata.update({'zero_loss': {'shifted': shifts}})
|
|
394
398
|
return new_si
|
|
395
399
|
|
|
400
|
+
from numba import jit
|
|
401
|
+
|
|
402
|
+
def get_zero_losses(energy, z_loss_params):
|
|
403
|
+
z_loss_dset = np.zeros((z_loss_params.shape[0], z_loss_params.shape[1], energy.shape[0]))
|
|
404
|
+
for x in range(z_loss_params.shape[0]):
|
|
405
|
+
for y in range(z_loss_params.shape[1]):
|
|
406
|
+
z_loss_dset[x, y] += zl_func(energy, *z_loss_params[x, y])
|
|
407
|
+
return z_loss_dset
|
|
408
|
+
|
|
396
409
|
|
|
397
410
|
|
|
398
411
|
|
|
@@ -486,11 +499,12 @@ def get_resolution_functions(dataset: sidpy.Dataset, startFitEnergy: float=-1, e
|
|
|
486
499
|
z_loss_dset = dataset.copy()
|
|
487
500
|
z_loss_dset *= 0.0
|
|
488
501
|
|
|
489
|
-
energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
|
|
490
|
-
|
|
491
|
-
z_loss_peaks = zl_func(energy_grid, *z_loss_params)
|
|
492
|
-
|
|
493
|
-
|
|
502
|
+
#energy_grid = np.broadcast_to(energy.reshape((1, 1, -1)), (z_loss_dset.shape[0],
|
|
503
|
+
# z_loss_dset.shape[1], energy.shape[0]))
|
|
504
|
+
#z_loss_peaks = zl_func(energy_grid, *z_loss_params)
|
|
505
|
+
z_loss_params = np.array(z_loss_params)
|
|
506
|
+
z_loss_dset += get_zero_losses(np.array(energy), np.array(z_loss_params))
|
|
507
|
+
|
|
494
508
|
shifts = z_loss_params[:, :, 0] * z_loss_params[:, :, 3]
|
|
495
509
|
widths = z_loss_params[:, :, 2] * z_loss_params[:, :, 5]
|
|
496
510
|
|
|
@@ -520,7 +534,15 @@ def drude_lorentz(eps_inf, leng, ep, eb, gamma, e, amplitude):
|
|
|
520
534
|
return eps
|
|
521
535
|
|
|
522
536
|
|
|
523
|
-
def
|
|
537
|
+
def get_plasmon_losses(energy, params):
|
|
538
|
+
dset = np.zeros((params.shape[0], params.shape[1], energy.shape[0]))
|
|
539
|
+
for x in range(params.shape[0]):
|
|
540
|
+
for y in range(params.shape[1]):
|
|
541
|
+
dset[x, y] += energy_loss_function(energy, params[x, y])
|
|
542
|
+
return dset
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
|
|
524
546
|
"""
|
|
525
547
|
Fit plasmon peak positions and widths in a TEM dataset using a Drude model.
|
|
526
548
|
|
|
@@ -557,12 +579,14 @@ def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float
|
|
|
557
579
|
- If `plot_result` is True, the function plots Ep, Ew, and A as separate subplots.
|
|
558
580
|
"""
|
|
559
581
|
# define Drude function for plasmon fitting
|
|
582
|
+
|
|
583
|
+
anglog, T, _ = angle_correction(dataset)
|
|
560
584
|
def energy_loss_function(E: np.ndarray, Ep: float, Ew: float, A: float) -> np.ndarray:
|
|
561
|
-
|
|
585
|
+
|
|
562
586
|
eps = 1 - Ep**2/(E**2+Ew**2) + 1j * Ew * Ep**2/E/(E**2+Ew**2)
|
|
563
|
-
elf = (-1/eps).imag
|
|
587
|
+
elf = (-1/eps).imag
|
|
564
588
|
return A*elf
|
|
565
|
-
|
|
589
|
+
|
|
566
590
|
# define window for fitting
|
|
567
591
|
energy = dataset.get_spectral_dims(return_axis=True)[0].values
|
|
568
592
|
start_fit_pixel = np.searchsorted(energy, startFitEnergy)
|
|
@@ -576,32 +600,218 @@ def fit_plasmon(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float
|
|
|
576
600
|
dataset = dataset.rechunk(chunks=(1, -1))
|
|
577
601
|
fit_dset = dataset[:, start_fit_pixel:end_fit_pixel]
|
|
578
602
|
else:
|
|
579
|
-
fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel])
|
|
603
|
+
fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel]/ anglog[start_fit_pixel:end_fit_pixel])
|
|
580
604
|
guess_pos = np.argmax(fit_dset)
|
|
581
605
|
guess_amplitude = fit_dset[guess_pos]
|
|
582
|
-
guess_width =
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
606
|
+
guess_width =(endFitEnergy-startFitEnergy)/4
|
|
607
|
+
guess_pos = energy[guess_pos]
|
|
608
|
+
if guess_width >8:
|
|
609
|
+
guess_width=8
|
|
610
|
+
try:
|
|
611
|
+
popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
|
|
612
|
+
p0=[guess_pos, guess_width, guess_amplitude])
|
|
613
|
+
except:
|
|
614
|
+
end_fit_pixel = np.searchsorted(energy, 30)
|
|
615
|
+
fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel]/ anglog[start_fit_pixel:end_fit_pixel])
|
|
616
|
+
try:
|
|
617
|
+
popt, pcov = curve_fit(energy_loss_function, energy[start_fit_pixel:end_fit_pixel], fit_dset,
|
|
618
|
+
p0=[guess_pos, guess_width, guess_amplitude])
|
|
619
|
+
except:
|
|
620
|
+
popt=[0,0,0]
|
|
621
|
+
|
|
622
|
+
plasmon = dataset.like_data(energy_loss_function(energy, popt[0], popt[1], popt[2]))
|
|
623
|
+
plasmon *= anglog
|
|
624
|
+
start_plasmon = np.searchsorted(energy, 0)+1
|
|
625
|
+
plasmon[:start_plasmon] = 0.
|
|
626
|
+
|
|
627
|
+
epsilon = drude(energy, popt[0], popt[1], 1) * popt[2]
|
|
628
|
+
epsilon[:start_plasmon] = 0.
|
|
629
|
+
|
|
630
|
+
plasmon.metadata['plasmon'] = {'parameter': popt, 'epsilon':epsilon}
|
|
631
|
+
return plasmon
|
|
586
632
|
|
|
587
633
|
# if it can be parallelized:
|
|
588
634
|
fitter = SidFitter(fit_dset, energy_loss_function, num_workers=number_workers,
|
|
589
635
|
threads=number_threads, return_cov=False, return_fit=False, return_std=False,
|
|
590
636
|
km_guess=False, num_fit_parms=3)
|
|
591
|
-
[
|
|
637
|
+
[fit_parameter] = fitter.do_fit()
|
|
638
|
+
|
|
639
|
+
plasmon_dset = dataset * 0.0
|
|
640
|
+
fit_parameter = np.array(fit_parameter)
|
|
641
|
+
plasmon_dset += get_plasmon_losses(np.array(energy), fit_parameter)
|
|
642
|
+
if 'plasmon' not in plasmon_dset.metadata:
|
|
643
|
+
plasmon_dset.metadata['plasmon'] = {}
|
|
644
|
+
plasmon_dset.metadata['plasmon'].update({'startFitEnergy': startFitEnergy,
|
|
645
|
+
'endFitEnergy': endFitEnergy,
|
|
646
|
+
'fit_parameter': fit_parameter,
|
|
647
|
+
'original_low_loss': dataset.title})
|
|
648
|
+
|
|
649
|
+
return plasmon_dset
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
def angle_correction(spectrum):
|
|
653
|
+
|
|
654
|
+
acceleration_voltage = spectrum.metadata['experiment']['acceleration_voltage']
|
|
655
|
+
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0].values
|
|
656
|
+
# eff_beta = effective_collection_angle(energy_scale, spectrum.metadata['experiment']['convergence_angle'],
|
|
657
|
+
# spectrum.metadata['experiment']['collection_angle'],acceleration_voltage)
|
|
658
|
+
|
|
659
|
+
|
|
660
|
+
epc = energy_scale[1] - energy_scale[0] # input('ev per channel : ');
|
|
661
|
+
|
|
662
|
+
alpha = spectrum.metadata['experiment']['convergence_angle'] # input('Alpha (mrad) : ');
|
|
663
|
+
beta = spectrum.metadata['experiment']['collection_angle']# input('Beta (mrad) : ');
|
|
664
|
+
e = energy_scale
|
|
665
|
+
e0 = acceleration_voltage/1000 # input('E0 (keV) : ');
|
|
666
|
+
|
|
667
|
+
T = 1000.0*e0*(1.+e0/1022.12)/(1.0+e0/511.06)**2 # %eV # equ.5.2a or Appendix E p 427
|
|
668
|
+
|
|
669
|
+
tgt=e0*(1.+e0/1022.)/(1+e0/511.);
|
|
670
|
+
thetae=(e+1e-6)/tgt; # % avoid NaN for e=0
|
|
671
|
+
# % A2,B2,T2 ARE SQUARES OF ANGLES IN RADIANS**2
|
|
672
|
+
a2=alpha*alpha*1e-6 + 1e-7; # % avoid inf for alpha=0
|
|
673
|
+
b2=beta*beta*1e-6;
|
|
674
|
+
t2=thetae*thetae*1e-6;
|
|
675
|
+
eta1=np.sqrt((a2+b2+t2)**2-4*a2*b2)-a2-b2-t2;
|
|
676
|
+
eta2=2.*b2*np.log(0.5/t2*(np.sqrt((a2+t2-b2)**2+4.*b2*t2)+a2+t2-b2));
|
|
677
|
+
eta3=2.*a2*np.log(0.5/t2*(np.sqrt((b2+t2-a2)**2+4.*a2*t2)+b2+t2-a2));
|
|
678
|
+
eta=(eta1+eta2+eta3)/a2/np.log(4./t2);
|
|
679
|
+
f1=(eta1+eta2+eta3)/2./a2/np.log(1.+b2/t2);
|
|
680
|
+
f2=f1;
|
|
681
|
+
if(alpha/beta>1):
|
|
682
|
+
f2=f1*a2/b2;
|
|
683
|
+
|
|
684
|
+
bstar=thetae*np.sqrt(np.exp(f2*np.log(1.+b2/t2))-1.);
|
|
685
|
+
anglog = f2
|
|
686
|
+
"""
|
|
687
|
+
b = eff_beta/1000.0 # %rad
|
|
688
|
+
e0 = acceleration_voltage/1000.0 # %keV
|
|
689
|
+
T = 1000.0*e0*(1.+e0/1022.12)/(1.0+e0/511.06)**2 # %eV # equ.5.2a or Appendix E p 427
|
|
690
|
+
tgt = 1000*e0*(1022.12 + e0)/(511.06 + e0) # %eV Appendix E p 427
|
|
691
|
+
|
|
692
|
+
the = energy_scale/tgt # varies with energy loss! # Appendix E p 427
|
|
693
|
+
anglog = np.log(1.0+ b*b/the/the)
|
|
694
|
+
# 2 * T = m_0 v**2 !!! a_0 = 0.05292 nm epc is for sum over I0
|
|
695
|
+
"""
|
|
696
|
+
return anglog, (np.pi*0.05292* T / 2.0)/epc, bstar[0],
|
|
697
|
+
|
|
698
|
+
def energy_loss_function(energy_scale: np.ndarray, p: np.ndarray, anglog=1) -> np.ndarray:
|
|
699
|
+
eps = 1 - p[0]**2/(energy_scale**2+p[1]**2) + 1j * p[1] * p[0]**2/energy_scale/(energy_scale**2+p[1]**2)
|
|
700
|
+
elf = (-1/eps).imag
|
|
701
|
+
return elf*p[2]*anglog
|
|
702
|
+
|
|
703
|
+
def inelatic_mean_free_path(E_p, spectrum):
|
|
704
|
+
acceleration_voltage = spectrum.metadata['experiment']['acceleration_voltage']
|
|
705
|
+
energy_scale = spectrum.get_spectral_dims(return_axis=True)[0].values
|
|
706
|
+
|
|
707
|
+
e0 = acceleration_voltage/1000.0 # %keV
|
|
708
|
+
|
|
709
|
+
eff_beta = effective_collection_angle(energy_scale, spectrum.metadata['experiment']['convergence_angle'],
|
|
710
|
+
spectrum.metadata['experiment']['collection_angle'],acceleration_voltage)
|
|
711
|
+
beta = eff_beta/1000.0 # %rad
|
|
712
|
+
|
|
713
|
+
T = 1000.0*e0*(1.+e0/1022.12)/(1.0+e0/511.06)**2 # %eV # equ.5.2a or Appendix E p 427
|
|
714
|
+
tgt = 1000*e0*(1022.12 + e0)/(511.06 + e0) # %eV Appendix E p 427
|
|
715
|
+
theta_e = E_p/tgt # varies with energy loss! # Appendix E p 427
|
|
716
|
+
|
|
717
|
+
# 2 * T = m_0 v**2 !!!
|
|
718
|
+
a_0 = 0.05292 # nm
|
|
719
|
+
imfp = 4*T*a_0/E_p/np.log(1+beta**2/theta_e**2)
|
|
720
|
+
|
|
721
|
+
return imfp, theta_e
|
|
722
|
+
|
|
723
|
+
|
|
724
|
+
def multiple_scattering(energy_scale: np.ndarray, p: list, core_loss=False)-> np.ndarray:
|
|
725
|
+
p = np.abs(p)
|
|
726
|
+
tmfp = p[3]
|
|
727
|
+
if core_loss:
|
|
728
|
+
dif = 1
|
|
729
|
+
else:
|
|
730
|
+
dif = 16
|
|
731
|
+
LLene = np.linspace(1, 2048-1,2048)/dif
|
|
732
|
+
|
|
733
|
+
SSD = energy_loss_function(LLene, p)
|
|
734
|
+
ssd = np.fft.fft(SSD)
|
|
735
|
+
ssd2 = ssd.copy()
|
|
736
|
+
|
|
737
|
+
### sum contribution from each order of scattering:
|
|
738
|
+
PSD = np.zeros(len(LLene))
|
|
739
|
+
for order in range(15):
|
|
740
|
+
# This order convoluted spectum
|
|
741
|
+
# convoluted SSD is SSD2
|
|
742
|
+
SSD2 = np.fft.ifft(ssd).real
|
|
743
|
+
|
|
744
|
+
# scale right (could be done better? GERD)
|
|
745
|
+
# And add this order to final spectrum
|
|
746
|
+
PSD += SSD2*abs(sum(SSD)/sum(SSD2)) / scipy.special.factorial(order+1)*np.power(tmfp, (order+1))*np.exp(-tmfp) #using equation 4.1 of egerton ed2
|
|
747
|
+
|
|
748
|
+
# next order convolution
|
|
749
|
+
ssd = ssd * ssd2
|
|
750
|
+
|
|
751
|
+
PSD /=tmfp*np.exp(-tmfp)
|
|
752
|
+
BGDcoef = scipy.interpolate.splrep(LLene, PSD, s=0)
|
|
753
|
+
msd = scipy.interpolate.splev(energy_scale, BGDcoef)
|
|
754
|
+
start_plasmon = np.searchsorted(energy_scale, 0)+1
|
|
755
|
+
msd[:start_plasmon] = 0.
|
|
756
|
+
return msd
|
|
757
|
+
|
|
758
|
+
def fit_multiple_scattering(dataset: Union[sidpy.Dataset, np.ndarray], startFitEnergy: float, endFitEnergy: float,pin=None, number_workers: int = 4, number_threads: int = 8) -> Union[sidpy.Dataset, np.ndarray]:
|
|
759
|
+
"""
|
|
760
|
+
Fit multiple scattering of plasmon peak in a TEM dataset.
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
Parameters:
|
|
764
|
+
dataset: sidpy.Dataset or numpy.ndarray
|
|
765
|
+
The dataset containing TEM spectral data.
|
|
766
|
+
startFitEnergy: float
|
|
767
|
+
The start energy of the fitting window.
|
|
768
|
+
endFitEnergy: float
|
|
769
|
+
The end energy of the fitting window.
|
|
770
|
+
number_workers: int, optional
|
|
771
|
+
The number of workers for parallel processing (default is 4).
|
|
772
|
+
number_threads: int, optional
|
|
773
|
+
The number of threads for parallel processing (default is 8).
|
|
774
|
+
|
|
775
|
+
Returns:
|
|
776
|
+
fitted_dataset: sidpy.Dataset or numpy.ndarray
|
|
777
|
+
The dataset with fitted plasmon peak parameters. The dimensions and
|
|
778
|
+
format depend on the input dataset.
|
|
779
|
+
|
|
780
|
+
Raises:
|
|
781
|
+
ValueError: If the input dataset does not have the expected dimensions or format.
|
|
782
|
+
|
|
783
|
+
Notes:
|
|
784
|
+
- The function uses the Drude model to fit plasmon peaks.
|
|
785
|
+
- The fitting parameters are peak position (Ep), peak width (Ew), and amplitude (A).
|
|
786
|
+
- If `plot_result` is True, the function plots Ep, Ew, and A as separate subplots.
|
|
787
|
+
"""
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
# define window for fitting
|
|
791
|
+
energy = dataset.get_spectral_dims(return_axis=True)[0].values
|
|
792
|
+
start_fit_pixel = np.searchsorted(energy, startFitEnergy)
|
|
793
|
+
end_fit_pixel = np.searchsorted(energy, endFitEnergy)
|
|
794
|
+
|
|
795
|
+
def errf_multi(p, y, x):
|
|
796
|
+
elf = multiple_scattering(x, p)
|
|
797
|
+
err = y - elf
|
|
798
|
+
#print (p,sum(np.abs(err)))
|
|
799
|
+
return np.abs(err) # /np.sqrt(y)
|
|
800
|
+
|
|
801
|
+
if pin is None:
|
|
802
|
+
pin = np.array([9,1,.7, 0.3])
|
|
592
803
|
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
ax3.imshow(fitted_dataset[:, :, 2], cmap='jet')
|
|
600
|
-
ax3.set_title('A - Amplitude')
|
|
601
|
-
plt.show()
|
|
602
|
-
return fitted_dataset
|
|
804
|
+
|
|
805
|
+
fit_dset = np.array(dataset[start_fit_pixel:end_fit_pixel])
|
|
806
|
+
popt, lsq = leastsq(errf_multi, pin, args=(fit_dset, energy[start_fit_pixel:end_fit_pixel]), maxfev=2000)
|
|
807
|
+
|
|
808
|
+
multi = dataset.like_data(multiple_scattering(energy, popt))
|
|
809
|
+
|
|
603
810
|
|
|
811
|
+
multi.metadata['multiple_scattering'] = {'parameter': popt}
|
|
812
|
+
return multi
|
|
604
813
|
|
|
814
|
+
|
|
605
815
|
|
|
606
816
|
def drude_simulation(dset, e, ep, ew, tnm, eb):
|
|
607
817
|
"""probabilities of dielectric function eps relative to zero-loss integral (i0 = 1)
|
|
@@ -875,7 +1085,7 @@ def get_x_sections(z: int=0) -> dict:
|
|
|
875
1085
|
return 0
|
|
876
1086
|
|
|
877
1087
|
|
|
878
|
-
def list_all_edges(z: Union[str, int]=0, verbose=False)->[str, dict]:
|
|
1088
|
+
def list_all_edges(z: Union[str, int]=0, verbose=False)->list[str, dict]:
|
|
879
1089
|
"""List all ionization edges of an element with atomic number z
|
|
880
1090
|
|
|
881
1091
|
Parameters
|
|
@@ -943,74 +1153,109 @@ def find_all_edges(edge_onset: float, maximal_chemical_shift: float=5.0, major_e
|
|
|
943
1153
|
else:
|
|
944
1154
|
text += new_text
|
|
945
1155
|
|
|
946
|
-
return text
|
|
947
|
-
|
|
1156
|
+
return text
|
|
948
1157
|
|
|
949
1158
|
def find_associated_edges(dataset: sidpy.Dataset) -> None:
|
|
950
1159
|
onsets = []
|
|
951
1160
|
edges = []
|
|
952
|
-
if '
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
onsets.append(edge['
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
if
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
|
|
980
|
-
|
|
981
|
-
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1161
|
+
if 'core_loss' in dataset.metadata:
|
|
1162
|
+
if'edges' in dataset.metadata['core_loss']:
|
|
1163
|
+
for key, edge in dataset.metadata['core_loss']['edges'].items():
|
|
1164
|
+
if key.isdigit():
|
|
1165
|
+
"""for sym in edge['all_edges']: # TODO: Could be replaced with exclude
|
|
1166
|
+
onsets.append(edge['all_edges'][sym]['onset'] + edge['chemical_shift'])
|
|
1167
|
+
edges.append([key, f"{edge['element']}-{sym}", onsets[-1]])
|
|
1168
|
+
"""
|
|
1169
|
+
onsets.append(edge['onset'])
|
|
1170
|
+
dataset.metadata['core_loss']['edges'][key]['associated_peaks'] = {}
|
|
1171
|
+
if 'peak_fit' in dataset.metadata:
|
|
1172
|
+
p = dataset.metadata['peak_fit']['peak_out_list']
|
|
1173
|
+
for key, peak in enumerate(p):
|
|
1174
|
+
distances = (onsets-peak[0])*-1
|
|
1175
|
+
distances[distances < -0.3] = 1e6
|
|
1176
|
+
if np.min(distances) < 50:
|
|
1177
|
+
index = np.argmin(distances)
|
|
1178
|
+
dataset.metadata['core_loss']['edges'][str(index)]['associated_peaks'][key] = peak
|
|
1179
|
+
|
|
1180
|
+
|
|
1181
|
+
"""for key, peak in dataset.metadata['peak_fit']['peaks'].items():
|
|
1182
|
+
if key.isdigit():
|
|
1183
|
+
distance = dataset.get_spectral_dims(return_axis=True)[0].values[-1]
|
|
1184
|
+
index = -1
|
|
1185
|
+
for ii, onset in enumerate(onsets):
|
|
1186
|
+
if onset < peak['position'] < onset+post_edge:
|
|
1187
|
+
if distance > np.abs(peak['position'] - onset):
|
|
1188
|
+
distance = np.abs(peak['position'] - onset) # TODO: check whether absolute is good
|
|
1189
|
+
distance_onset = peak['position'] - onset
|
|
1190
|
+
index = ii
|
|
1191
|
+
if index >= 0:
|
|
1192
|
+
peak['associated_edge'] = edges[index][1] # check if more info is necessary
|
|
1193
|
+
peak['distance_to_onset'] = distance_onset
|
|
1194
|
+
"""
|
|
1195
|
+
|
|
1196
|
+
def find_white_lines(dataset: sidpy.Dataset) -> dict:
|
|
1197
|
+
white_lines_out ={'sum': {}, 'ratio': {}}
|
|
1198
|
+
white_lines = []
|
|
1199
|
+
if 'peak_fit' in dataset.metadata:
|
|
1200
|
+
peaks = dataset.metadata['peak_fit']['peaks']
|
|
1201
|
+
else:
|
|
1202
|
+
return
|
|
1203
|
+
for index, edge in dataset.metadata['core_loss']['edges'].items():
|
|
1204
|
+
if index.isdigit():
|
|
1205
|
+
if 'associated_peaks' in edge:
|
|
1206
|
+
peaks = edge['associated_peaks']
|
|
1207
|
+
|
|
1208
|
+
if edge['symmetry'][-2:] in ['L3', 'M5']:
|
|
1209
|
+
if 'L3' in edge['all_edges']:
|
|
1210
|
+
end_range1 = edge['all_edges']['L2']['onset'] + edge['chemical_shift']
|
|
1211
|
+
end_range2 = edge['all_edges']['L2']['onset']*2 - edge['all_edges']['L3']['onset'] + edge['chemical_shift']
|
|
1212
|
+
white_lines = ['L3', 'L2']
|
|
1213
|
+
elif 'M5' in edge['all_edges']:
|
|
1214
|
+
end_range1 = edge['all_edges']['M4']['onset'] + edge['chemical_shift']
|
|
1215
|
+
end_range2 = edge['all_edges']['M4']['onset']*2 - edge['all_edges']['M5']['onset'] + edge['chemical_shift']
|
|
1216
|
+
white_lines = ['M5', 'M4']
|
|
1217
|
+
else:
|
|
1218
|
+
return
|
|
1219
|
+
white_line_areas = [0., 0.]
|
|
1220
|
+
for key, peak in peaks.items():
|
|
1221
|
+
if str(key).isdigit():
|
|
1222
|
+
if peak[0] < end_range1:
|
|
1223
|
+
white_line_areas[0] += np.sqrt(2 * np.pi) * peak[1] * np.abs(peak[2]/np.sqrt(2 * np.log(2)))
|
|
1224
|
+
elif peak[0] < end_range2:
|
|
1225
|
+
white_line_areas[1] += np.sqrt(2 * np.pi) * peak[1] * np.abs(peak[2]/np.sqrt(2 * np.log(2)))
|
|
1226
|
+
|
|
1227
|
+
edge['white_lines'] = {white_lines[0]: white_line_areas[0], white_lines[1]: white_line_areas[1]}
|
|
1228
|
+
|
|
1229
|
+
reference_counts = edge['areal_density']*dataset.metadata['core_loss']['xsections'][int(index)].sum()
|
|
1230
|
+
white_lines_out['sum'][f"{edge['element']}-{white_lines[0]}+{white_lines[1]}"] = (white_line_areas[0] + white_line_areas[1])/reference_counts
|
|
1231
|
+
white_lines_out['ratio'][f"{edge['element']}-{white_lines[0]}/{white_lines[1]}"] = white_line_areas[0] / white_line_areas[1]
|
|
1232
|
+
return white_lines_out
|
|
1233
|
+
|
|
1234
|
+
|
|
1235
|
+
"""white_line_ratios = {}
|
|
1236
|
+
white_line_sum = {}
|
|
1237
|
+
for sym, area in white_lines.items():
|
|
1238
|
+
if sym[-2:] in ['L2', 'M4', 'M2']:
|
|
1239
|
+
if area > 0 and f"{sym[:-1]}{int(sym[-1]) + 1}" in white_lines:
|
|
1240
|
+
if white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"] > 0:
|
|
1241
|
+
white_line_ratios[f"{sym}/{sym[-2]}{int(sym[-1]) + 1}"] = area / white_lines[
|
|
1242
|
+
f"{sym[:-1]}{int(sym[-1]) + 1}"]
|
|
1243
|
+
white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] = (
|
|
1244
|
+
area + white_lines[f"{sym[:-1]}{int(sym[-1]) + 1}"])
|
|
1245
|
+
|
|
1246
|
+
areal_density = 1.
|
|
1247
|
+
if 'edges' in dataset.metadata:
|
|
1248
|
+
for key, edge in dataset.metadata['edges'].items():
|
|
1249
|
+
if key.isdigit():
|
|
1250
|
+
if edge['element'] == sym.split('-')[0]:
|
|
1251
|
+
areal_density = edge['areal_density']
|
|
1252
|
+
break
|
|
1253
|
+
white_line_sum[f"{sym}+{sym[-2]}{int(sym[-1]) + 1}"] /= areal_density
|
|
1009
1254
|
|
|
1010
1255
|
dataset.metadata['peak_fit']['white_lines'] = white_lines
|
|
1011
1256
|
dataset.metadata['peak_fit']['white_line_ratios'] = white_line_ratios
|
|
1012
1257
|
dataset.metadata['peak_fit']['white_line_sums'] = white_line_sum
|
|
1013
|
-
|
|
1258
|
+
"""
|
|
1014
1259
|
|
|
1015
1260
|
def second_derivative(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
|
|
1016
1261
|
"""Calculates second derivative of a sidpy.dataset"""
|
|
@@ -1042,9 +1287,10 @@ def second_derivative(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
|
|
|
1042
1287
|
|
|
1043
1288
|
noise_level_start = sensitivity * np.std(second_dif[3:50])
|
|
1044
1289
|
noise_level_end = sensitivity * np.std(second_dif[start_end_noise: start_end_noise + 50])
|
|
1045
|
-
slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
|
|
1046
|
-
noise_level = noise_level_start
|
|
1047
|
-
return second_dif, noise_level
|
|
1290
|
+
#slope = (noise_level_end - noise_level_start) / (len(energy_scale) - 400)
|
|
1291
|
+
#noise_level = noise_level_start #+ np.arange(len(energy_scale)) * slope
|
|
1292
|
+
return second_dif , noise_level
|
|
1293
|
+
|
|
1048
1294
|
|
|
1049
1295
|
|
|
1050
1296
|
def find_edges(dataset: sidpy.Dataset, sensitivity: float=2.5) -> None:
|
|
@@ -1337,6 +1583,8 @@ def make_cross_sections(edges:dict, energy_scale:np.ndarray, e_0:float, coll_ang
|
|
|
1337
1583
|
"""
|
|
1338
1584
|
for key in edges:
|
|
1339
1585
|
if str(key).isdigit():
|
|
1586
|
+
if edges[key]['z'] <1:
|
|
1587
|
+
break
|
|
1340
1588
|
edges[key]['data'] = xsec_xrpa(energy_scale, e_0 / 1000., edges[key]['z'], coll_angle,
|
|
1341
1589
|
edges[key]['chemical_shift']) / 1e10 # from barnes to 1/nm^2
|
|
1342
1590
|
if low_loss is not None:
|
|
@@ -1388,12 +1636,13 @@ def power_law_background(spectrum:np.ndarray, energy_scale:np.ndarray, fit_area:
|
|
|
1388
1636
|
return background, p
|
|
1389
1637
|
|
|
1390
1638
|
|
|
1391
|
-
def cl_model(
|
|
1639
|
+
def cl_model(xx, pp, number_of_edges, xsec):
|
|
1392
1640
|
""" core loss model for fitting"""
|
|
1393
|
-
|
|
1641
|
+
yy = pp[0] * xx**pp[1] + pp[2] + pp[3]* xx + pp[4] * xx * xx
|
|
1394
1642
|
for i in range(number_of_edges):
|
|
1395
|
-
|
|
1396
|
-
|
|
1643
|
+
pp[i+5] = np.abs(pp[i+5])
|
|
1644
|
+
yy = yy + pp[i+5] * xsec[i, :]
|
|
1645
|
+
return yy
|
|
1397
1646
|
|
|
1398
1647
|
|
|
1399
1648
|
def fit_edges2(spectrum, energy_scale, edges):
|
|
@@ -1467,7 +1716,7 @@ def fit_edges2(spectrum, energy_scale, edges):
|
|
|
1467
1716
|
for key in edges:
|
|
1468
1717
|
if key.isdigit():
|
|
1469
1718
|
edges[key]['areal_density'] = p[int(key)+5]
|
|
1470
|
-
print(p)
|
|
1719
|
+
# print(p)
|
|
1471
1720
|
edges['model'] = {}
|
|
1472
1721
|
edges['model']['background'] = ( p[0] * np.power(x, -p[1])+ p[2]+ x**p[3] + p[4] * x * x)
|
|
1473
1722
|
edges['model']['background-poly_0'] = p[2]
|
|
@@ -1481,9 +1730,20 @@ def fit_edges2(spectrum, energy_scale, edges):
|
|
|
1481
1730
|
edges['model']['fit_parameter'] = p
|
|
1482
1731
|
edges['model']['fit_area_start'] = edges['fit_area']['fit_start']
|
|
1483
1732
|
edges['model']['fit_area_end'] = edges['fit_area']['fit_end']
|
|
1484
|
-
|
|
1733
|
+
edges['model']['xsec'] = xsec
|
|
1485
1734
|
return edges
|
|
1486
1735
|
|
|
1736
|
+
|
|
1737
|
+
def core_loss_model(energy_scale, pp, number_of_edges, xsec):
|
|
1738
|
+
""" core loss model for fitting"""
|
|
1739
|
+
xx = np.array(energy_scale)
|
|
1740
|
+
yy = pp[0] * xx**pp[1] + pp[2] + pp[3]* xx + pp[4] * xx * xx
|
|
1741
|
+
for i in range(number_of_edges):
|
|
1742
|
+
pp[i+5] = np.abs(pp[i+5])
|
|
1743
|
+
yy = yy + pp[i+5] * xsec[i, :]
|
|
1744
|
+
return yy
|
|
1745
|
+
|
|
1746
|
+
|
|
1487
1747
|
|
|
1488
1748
|
def fit_edges(spectrum, energy_scale, region_tags, edges):
|
|
1489
1749
|
"""fit edges for quantification"""
|
|
@@ -1615,19 +1875,35 @@ def get_spectrum(dataset, x=0, y=0, bin_x=1, bin_y=1):
|
|
|
1615
1875
|
spectrum.data_type = 'Spectrum'
|
|
1616
1876
|
return spectrum
|
|
1617
1877
|
|
|
1618
|
-
def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
|
|
1878
|
+
def find_peaks(dataset, energy_scale): #, fit_start, fit_end, sensitivity=2):
|
|
1619
1879
|
"""find peaks in spectrum"""
|
|
1620
1880
|
|
|
1881
|
+
peaks, prop = scipy.signal.find_peaks(np.abs(dataset)+1, width=5)
|
|
1882
|
+
results_half = scipy.signal.peak_widths(np.abs(dataset)+1, peaks, rel_height=0.5)[0]
|
|
1883
|
+
|
|
1884
|
+
disp = energy_scale[1] - energy_scale[0]
|
|
1885
|
+
if len(peaks) > 0:
|
|
1886
|
+
p_in = np.ravel([[energy_scale[peaks[i]], dataset[peaks[i]], results_half[i]*disp] for i in range(len(peaks))])
|
|
1887
|
+
return p_in # model, p_in
|
|
1888
|
+
|
|
1889
|
+
def nothing():
|
|
1890
|
+
pass
|
|
1891
|
+
"""
|
|
1621
1892
|
if dataset.data_type.name == 'SPECTRAL_IMAGE':
|
|
1622
|
-
|
|
1893
|
+
if hasattr(dataset.view, 'get_spectrum'):
|
|
1894
|
+
spectrum = dataset.view.get_spectrum()
|
|
1895
|
+
else:
|
|
1896
|
+
spectrum = np.array(dataset[0,0])
|
|
1897
|
+
|
|
1623
1898
|
else:
|
|
1624
1899
|
spectrum = np.array(dataset)
|
|
1625
1900
|
|
|
1626
1901
|
energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
|
|
1627
1902
|
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1903
|
+
"""
|
|
1904
|
+
|
|
1905
|
+
|
|
1906
|
+
"""
|
|
1631
1907
|
start_channel = np.searchsorted(energy_scale, fit_start)
|
|
1632
1908
|
end_channel = np.searchsorted(energy_scale, fit_end)
|
|
1633
1909
|
peaks = []
|
|
@@ -1636,34 +1912,26 @@ def find_peaks(dataset, fit_start, fit_end, sensitivity=2):
|
|
|
1636
1912
|
peaks.append(index - start_channel)
|
|
1637
1913
|
|
|
1638
1914
|
if 'model' in dataset.metadata:
|
|
1639
|
-
model = dataset.metadata['model']
|
|
1915
|
+
model = dataset.metadata['model']
|
|
1640
1916
|
|
|
1641
1917
|
elif energy_scale[0] > 0:
|
|
1642
1918
|
if 'edges' not in dataset.metadata:
|
|
1643
1919
|
return
|
|
1644
1920
|
if 'model' not in dataset.metadata['edges']:
|
|
1645
1921
|
return
|
|
1646
|
-
model = dataset.metadata['edges']['model']['spectrum']
|
|
1922
|
+
model = dataset.metadata['edges']['model']['spectrum']
|
|
1647
1923
|
|
|
1648
1924
|
else:
|
|
1649
|
-
model = np.zeros(
|
|
1925
|
+
model = np.zeros(len(energy_scale))
|
|
1650
1926
|
|
|
1651
1927
|
energy_scale = energy_scale[start_channel:end_channel]
|
|
1652
1928
|
|
|
1653
|
-
difference = np.array(spectrum)[start_channel:end_channel]
|
|
1929
|
+
difference = np.array(spectrum - model)[start_channel:end_channel]
|
|
1654
1930
|
fit = np.zeros(len(energy_scale))
|
|
1655
|
-
p_out = []
|
|
1656
1931
|
if len(peaks) > 0:
|
|
1657
1932
|
p_in = np.ravel([[energy_scale[i], difference[i], .7] for i in peaks])
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
False))
|
|
1661
|
-
fit = fit + model_smooth(energy_scale, p_out, False)
|
|
1662
|
-
|
|
1663
|
-
peak_model = np.zeros(len(spectrum))
|
|
1664
|
-
peak_model[start_channel:end_channel] = fit
|
|
1665
|
-
|
|
1666
|
-
return peak_model, p_out
|
|
1933
|
+
"""
|
|
1934
|
+
|
|
1667
1935
|
|
|
1668
1936
|
|
|
1669
1937
|
def find_maxima(y, number_of_peaks):
|
|
@@ -1691,7 +1959,6 @@ def find_maxima(y, number_of_peaks):
|
|
|
1691
1959
|
peak_indices = np.argsort(peaks)
|
|
1692
1960
|
return peaks[peak_indices]
|
|
1693
1961
|
|
|
1694
|
-
|
|
1695
1962
|
#
|
|
1696
1963
|
def model3(x, p, number_of_peaks, peak_shape, p_zl, pin=None, restrict_pos=0, restrict_width=0):
|
|
1697
1964
|
""" model for fitting low-loss spectrum"""
|
|
@@ -1764,18 +2031,78 @@ def add_peaks(x, y, peaks, pin_in=None, peak_shape_in=None, shape='Gaussian'):
|
|
|
1764
2031
|
|
|
1765
2032
|
return pin, peak_shape
|
|
1766
2033
|
|
|
2034
|
+
@jit
|
|
2035
|
+
def gauss(x, p): # p[0]==mean, p[1]= amplitude p[2]==fwhm,
|
|
2036
|
+
"""Gaussian Function
|
|
2037
|
+
|
|
2038
|
+
p[0]==mean, p[1]= amplitude p[2]==fwhm
|
|
2039
|
+
area = np.sqrt(2* np.pi)* p[1] * np.abs(p[2] / 2.3548)
|
|
2040
|
+
FWHM = 2 * np.sqrt(2 np.log(2)) * sigma = 2.3548 * sigma
|
|
2041
|
+
sigma = FWHM/3548
|
|
2042
|
+
"""
|
|
2043
|
+
if p[2] == 0:
|
|
2044
|
+
return x * 0.
|
|
2045
|
+
else:
|
|
2046
|
+
return p[1] * np.exp(-(x - p[0]) ** 2 / (2.0 * (p[2] / 2.3548) ** 2))
|
|
2047
|
+
|
|
2048
|
+
|
|
2049
|
+
@jit
|
|
2050
|
+
def gmm(x, p):
|
|
2051
|
+
y = np.zeros(len(x))
|
|
2052
|
+
number_of_peaks= int(len(p)/3)
|
|
2053
|
+
for i in range(number_of_peaks):
|
|
2054
|
+
index = i*3
|
|
2055
|
+
p[index + 1] = p[index + 1]
|
|
2056
|
+
# print(p[index + 1])
|
|
2057
|
+
p[index + 2] = abs(p[index + 2])
|
|
2058
|
+
y = y + gauss(x, p[index:index+3])
|
|
2059
|
+
return y
|
|
2060
|
+
|
|
2061
|
+
@jit
|
|
2062
|
+
def residuals3(pp, xx, yy):
|
|
2063
|
+
err = (yy - gmm(xx, pp))
|
|
2064
|
+
return err
|
|
1767
2065
|
|
|
2066
|
+
def gaussian_mixture_model(dataset, p_in=None):
|
|
2067
|
+
peak_model = None
|
|
2068
|
+
if isinstance(dataset, sidpy.Dataset):
|
|
2069
|
+
if dataset.data_type.name == 'SPECTRAL_IMAGE':
|
|
2070
|
+
if hasattr(dataset.view, 'get_spectrum'):
|
|
2071
|
+
spectrum = dataset.view.get_spectrum()
|
|
2072
|
+
else:
|
|
2073
|
+
spectrum = dataset[0,0]
|
|
2074
|
+
spectrum.data_type == 'SPECTRUM'
|
|
2075
|
+
else:
|
|
2076
|
+
spectrum = dataset
|
|
2077
|
+
spectrum.data_type = 'SPECTRUM'
|
|
2078
|
+
energy_scale = dataset.get_spectral_dims(return_axis=True)[0].values
|
|
2079
|
+
else:
|
|
2080
|
+
spectrum = np.array(dataset)
|
|
2081
|
+
energy_scale = np.arange(len(spectrum))
|
|
2082
|
+
spectrum = np.array(spectrum)
|
|
2083
|
+
#spectrum -= np.min(spectrum)-1
|
|
2084
|
+
if p_in is None:
|
|
2085
|
+
p_in = find_peaks(spectrum, energy_scale)
|
|
2086
|
+
|
|
2087
|
+
p = fit_gmm(energy_scale, np.array(spectrum), list(p_in))
|
|
2088
|
+
|
|
2089
|
+
peak_model = gmm(energy_scale, p)
|
|
2090
|
+
return peak_model, p
|
|
2091
|
+
|
|
2092
|
+
def fit_gmm(x, y, pin):
|
|
2093
|
+
"""fit a Gaussian mixture model to a spectrum"""
|
|
2094
|
+
[p, _] = leastsq(residuals3, pin, args=(x, y),maxfev = 10000)
|
|
2095
|
+
return p
|
|
2096
|
+
|
|
2097
|
+
|
|
1768
2098
|
def fit_model(x, y, pin, number_of_peaks, peak_shape, p_zl, restrict_pos=0, restrict_width=0):
|
|
1769
2099
|
"""model for fitting low-loss spectrum"""
|
|
1770
2100
|
|
|
1771
2101
|
pin_original = pin.copy()
|
|
1772
2102
|
|
|
1773
|
-
|
|
1774
|
-
err = (yy - model3(xx, pp, number_of_peaks, peak_shape, p_zl, pin_original, restrict_pos,
|
|
1775
|
-
restrict_width)) / np.sqrt(np.abs(yy))
|
|
1776
|
-
return err
|
|
2103
|
+
|
|
1777
2104
|
|
|
1778
|
-
[p, _] =
|
|
2105
|
+
[p, _] = scipy.optimize.leastsq(residuals3, pin, args=(x, y),maxfev = 19400)
|
|
1779
2106
|
# p2 = p.tolist()
|
|
1780
2107
|
# p3 = np.reshape(p2, (number_of_peaks, 3))
|
|
1781
2108
|
# sort_pin = np.argsort(p3[:, 0])
|
|
@@ -2028,4 +2355,4 @@ def get_spectrum_eels_db(formula=None, edge=None, title=None, element=None):
|
|
|
2028
2355
|
print(parameters['TITLE'])
|
|
2029
2356
|
print(f'found {len(reference_spectra.keys())} spectra in EELS database)')
|
|
2030
2357
|
|
|
2031
|
-
return reference_spectra
|
|
2358
|
+
return reference_spectra
|