pyTEMlib 0.2023.8.0__py2.py3-none-any.whl → 0.2024.2.0__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyTEMlib might be problematic. Click here for more details.
- pyTEMlib/config_dir.py +0 -1
- pyTEMlib/crystal_tools.py +22 -26
- pyTEMlib/eds_tools.py +499 -46
- pyTEMlib/eels_dialog.py +284 -899
- pyTEMlib/eels_dialog_utilities.py +218 -341
- pyTEMlib/eels_tools.py +1526 -1583
- pyTEMlib/file_tools.py +52 -48
- pyTEMlib/graph_tools.py +3 -4
- pyTEMlib/image_tools.py +171 -41
- pyTEMlib/info_widget.py +618 -276
- pyTEMlib/kinematic_scattering.py +77 -512
- pyTEMlib/peak_dialog.py +162 -288
- pyTEMlib/version.py +2 -2
- pyTEMlib/xrpa_x_sections.py +173 -97
- {pyTEMlib-0.2023.8.0.dist-info → pyTEMlib-0.2024.2.0.dist-info}/LICENSE +1 -1
- {pyTEMlib-0.2023.8.0.dist-info → pyTEMlib-0.2024.2.0.dist-info}/METADATA +2 -2
- pyTEMlib-0.2024.2.0.dist-info/RECORD +35 -0
- {pyTEMlib-0.2023.8.0.dist-info → pyTEMlib-0.2024.2.0.dist-info}/WHEEL +1 -1
- pyTEMlib/eels_dlg.py +0 -252
- pyTEMlib/info_dialog.py +0 -665
- pyTEMlib/info_dlg.py +0 -239
- pyTEMlib/interactive_eels.py +0 -35
- pyTEMlib/viz.py +0 -481
- pyTEMlib-0.2023.8.0.dist-info/RECORD +0 -40
- {pyTEMlib-0.2023.8.0.dist-info → pyTEMlib-0.2024.2.0.dist-info}/entry_points.txt +0 -0
- {pyTEMlib-0.2023.8.0.dist-info → pyTEMlib-0.2024.2.0.dist-info}/top_level.txt +0 -0
pyTEMlib/file_tools.py
CHANGED
|
@@ -47,8 +47,12 @@ Dimension = sidpy.Dimension
|
|
|
47
47
|
get_slope = sidpy.base.num_utils.get_slope
|
|
48
48
|
__version__ = '2022.3.3'
|
|
49
49
|
|
|
50
|
+
from traitlets import Unicode, Bool, validate, TraitError
|
|
51
|
+
import ipywidgets
|
|
50
52
|
|
|
51
|
-
|
|
53
|
+
|
|
54
|
+
@ipywidgets.register
|
|
55
|
+
class FileWidget(ipywidgets.DOMWidget):
|
|
52
56
|
"""Widget to select directories or widgets from a list
|
|
53
57
|
|
|
54
58
|
Works in google colab.
|
|
@@ -95,7 +99,7 @@ class FileWidget(object):
|
|
|
95
99
|
self.dir_list = ['.']
|
|
96
100
|
self.extensions = extension
|
|
97
101
|
self.file_name = ''
|
|
98
|
-
self.datasets ={}
|
|
102
|
+
self.datasets = {}
|
|
99
103
|
self.dataset = None
|
|
100
104
|
|
|
101
105
|
self.select_files = widgets.Select(
|
|
@@ -108,28 +112,29 @@ class FileWidget(object):
|
|
|
108
112
|
)
|
|
109
113
|
|
|
110
114
|
select_button = widgets.Button(description='Select Main',
|
|
111
|
-
|
|
112
|
-
|
|
115
|
+
layout=widgets.Layout(width='auto', grid_area='header'),
|
|
116
|
+
style=widgets.ButtonStyle(button_color='lightblue'))
|
|
113
117
|
|
|
114
118
|
add_button = widgets.Button(description='Add',
|
|
115
|
-
|
|
116
|
-
|
|
119
|
+
layout=widgets.Layout(width='auto', grid_area='header'),
|
|
120
|
+
style=widgets.ButtonStyle(button_color='lightblue'))
|
|
117
121
|
|
|
118
122
|
self.path_choice = widgets.Dropdown(options=['None'],
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
123
|
+
value='None',
|
|
124
|
+
description='directory:',
|
|
125
|
+
disabled=False,
|
|
126
|
+
button_style='',
|
|
127
|
+
layout=widgets.Layout(width='90%'))
|
|
124
128
|
self.dataset_list = ['None']
|
|
125
129
|
self.loaded_datasets = widgets.Dropdown(options=self.dataset_list,
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
+
value=self.dataset_list[0],
|
|
131
|
+
description='loaded datasets:',
|
|
132
|
+
disabled=False,
|
|
133
|
+
button_style='')
|
|
130
134
|
|
|
131
135
|
self.set_options()
|
|
132
|
-
ui = widgets.VBox([self.path_choice, self.select_files, widgets.HBox([select_button, add_button,
|
|
136
|
+
ui = widgets.VBox([self.path_choice, self.select_files, widgets.HBox([select_button, add_button,
|
|
137
|
+
self.loaded_datasets])])
|
|
133
138
|
display(ui)
|
|
134
139
|
|
|
135
140
|
self.select_files.observe(self.get_file_name, names='value')
|
|
@@ -137,18 +142,21 @@ class FileWidget(object):
|
|
|
137
142
|
|
|
138
143
|
select_button.on_click(self.select_main)
|
|
139
144
|
add_button.on_click(self.add_dataset)
|
|
140
|
-
self.loaded_datasets.observe(self.
|
|
145
|
+
self.loaded_datasets.observe(self.select_dataset)
|
|
141
146
|
|
|
142
147
|
def select_main(self, value=0):
|
|
143
148
|
self.datasets = {}
|
|
144
|
-
self.loaded_datasets.value = self.dataset_list[0]
|
|
149
|
+
#self.loaded_datasets.value = self.dataset_list[0]
|
|
150
|
+
self.dataset_list = []
|
|
145
151
|
self.datasets = open_file(self.file_name)
|
|
146
152
|
self.dataset_list = []
|
|
147
153
|
for key in self.datasets.keys():
|
|
148
154
|
self.dataset_list.append(f'{key}: {self.datasets[key].title}')
|
|
149
155
|
self.loaded_datasets.options = self.dataset_list
|
|
150
156
|
self.loaded_datasets.value = self.dataset_list[0]
|
|
157
|
+
self.debug = 5
|
|
151
158
|
self.dataset = self.datasets[list(self.datasets.keys())[0]]
|
|
159
|
+
self.debug = 6
|
|
152
160
|
self.selected_dataset = self.dataset
|
|
153
161
|
|
|
154
162
|
def add_dataset(self, value=0):
|
|
@@ -168,11 +176,12 @@ class FileWidget(object):
|
|
|
168
176
|
self.select_files.index = 0
|
|
169
177
|
self.set_options()
|
|
170
178
|
|
|
171
|
-
def
|
|
179
|
+
def select_dataset(self, value=0):
|
|
172
180
|
|
|
173
181
|
key = self.loaded_datasets.value.split(':')[0]
|
|
174
182
|
if key != 'None':
|
|
175
183
|
self.selected_dataset = self.datasets[key]
|
|
184
|
+
self.selected_key = key
|
|
176
185
|
|
|
177
186
|
def set_options(self):
|
|
178
187
|
self.dir_name = os.path.abspath(os.path.join(self.dir_name, self.dir_list[self.select_files.index]))
|
|
@@ -368,6 +377,9 @@ def get_last_path():
|
|
|
368
377
|
|
|
369
378
|
if len(path) < 2:
|
|
370
379
|
path = '.'
|
|
380
|
+
else:
|
|
381
|
+
if not os.path.exists(path):
|
|
382
|
+
path = '.'
|
|
371
383
|
return path
|
|
372
384
|
|
|
373
385
|
|
|
@@ -451,6 +463,7 @@ def open_file_dialog_qt(file_types=None): # , multiple_files=False):
|
|
|
451
463
|
save_path(filename)
|
|
452
464
|
return filename
|
|
453
465
|
|
|
466
|
+
|
|
454
467
|
def save_file_dialog_qt(file_types=None): # , multiple_files=False):
|
|
455
468
|
"""Opens a File dialog which is used in open_file() function
|
|
456
469
|
|
|
@@ -488,10 +501,6 @@ def save_file_dialog_qt(file_types=None): # , multiple_files=False):
|
|
|
488
501
|
file_types = 'TEM files (*.dm3 *.dm4 *.emd *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \
|
|
489
502
|
'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
|
|
490
503
|
|
|
491
|
-
|
|
492
|
-
# file_types = [("TEM files",["*.dm*","*.hf*","*.ndata" ]),("pyNSID files","*.hf5"),("DM files","*.dm*"),
|
|
493
|
-
# ("Nion files",["*.h5","*.ndata"]),("all files","*.*")]
|
|
494
|
-
|
|
495
504
|
# Determine last path used
|
|
496
505
|
path = get_last_path()
|
|
497
506
|
|
|
@@ -639,28 +648,15 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False): # save_file
|
|
|
639
648
|
file = datasets[0].h5_dataset.file
|
|
640
649
|
master_group = datasets[0].h5_dataset.parent.parent.parent
|
|
641
650
|
for key in master_group.keys():
|
|
642
|
-
|
|
643
651
|
if key not in dataset_dict:
|
|
644
652
|
dataset_dict[key] = h5_group_to_dict(master_group[key])
|
|
645
|
-
print()
|
|
646
653
|
if not write_hdf_file:
|
|
647
654
|
file.close()
|
|
648
|
-
# datasets[0].h5_dataset = None
|
|
649
655
|
return dataset_dict
|
|
650
|
-
|
|
651
|
-
"""
|
|
652
|
-
should go to no dataset found
|
|
653
|
-
if 'Raw_Data' in h5_group:
|
|
654
|
-
dataset = read_old_h5group(h5_group)
|
|
655
|
-
dataset.h5_dataset = h5_group['Raw_Data']
|
|
656
|
-
"""
|
|
657
|
-
|
|
658
|
-
elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi']:
|
|
659
|
-
|
|
656
|
+
elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi', '.edaxh5']:
|
|
660
657
|
# tags = open_file(filename)
|
|
661
658
|
if extension in ['.dm3', '.dm4']:
|
|
662
659
|
reader = SciFiReaders.DMReader(filename)
|
|
663
|
-
|
|
664
660
|
elif extension in ['.emi']:
|
|
665
661
|
try:
|
|
666
662
|
import hyperspy.api as hs
|
|
@@ -673,12 +669,12 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False): # save_file
|
|
|
673
669
|
dset = SciFiReaders.convert_hyperspy(datum)
|
|
674
670
|
if datum.data.ndim == 1:
|
|
675
671
|
dset.title = dset.title + f'_{spectrum_number}_Spectrum'
|
|
676
|
-
spectrum_number +=1
|
|
672
|
+
spectrum_number += 1
|
|
677
673
|
elif datum.data.ndim == 3:
|
|
678
|
-
dset.title = dset.title +'_SI'
|
|
674
|
+
dset.title = dset.title + '_SI'
|
|
679
675
|
dset = dset.T
|
|
680
676
|
dset.title = dset.title[11:]
|
|
681
|
-
dataset_dict[f'Channel_{index:03d}']=dset
|
|
677
|
+
dataset_dict[f'Channel_{index:03d}'] = dset
|
|
682
678
|
return dataset_dict
|
|
683
679
|
except ImportError:
|
|
684
680
|
print('This file type needs hyperspy to be installed to be able to be read')
|
|
@@ -686,6 +682,10 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False): # save_file
|
|
|
686
682
|
elif extension == '.emd':
|
|
687
683
|
reader = SciFiReaders.EMDReader(filename)
|
|
688
684
|
|
|
685
|
+
elif 'edax' in extension.lower():
|
|
686
|
+
if 'h5' in extension:
|
|
687
|
+
reader = SciFiReaders.EDAXReader(filename)
|
|
688
|
+
|
|
689
689
|
elif extension in ['.ndata', '.h5']:
|
|
690
690
|
reader = SciFiReaders.NionReader(filename)
|
|
691
691
|
|
|
@@ -699,18 +699,22 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False): # save_file
|
|
|
699
699
|
|
|
700
700
|
if extension in ['.dm3', '.dm4']:
|
|
701
701
|
title = (basename.strip().replace('-', '_')).split('/')[-1]
|
|
702
|
-
if not isinstance(dset,
|
|
702
|
+
if not isinstance(dset, dict):
|
|
703
703
|
print('Please use new SciFiReaders Package for full functionality')
|
|
704
|
+
if isinstance(dset, sidpy.Dataset):
|
|
704
705
|
dset = [dset]
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
706
|
+
|
|
707
|
+
if isinstance(dset, dict):
|
|
708
|
+
dataset_dict = dset
|
|
708
709
|
|
|
709
|
-
|
|
710
|
+
elif isinstance(dset, list):
|
|
710
711
|
if len(dset) < 1:
|
|
711
712
|
print('no dataset found in file')
|
|
712
713
|
return {}
|
|
713
714
|
else:
|
|
715
|
+
if 'PageSetup' in dset[0].original_metadata:
|
|
716
|
+
del dset[0].original_metadata['PageSetup']
|
|
717
|
+
dset[0].original_metadata['original_title'] = title
|
|
714
718
|
dataset_dict = {}
|
|
715
719
|
for index, dataset in enumerate(dset):
|
|
716
720
|
if extension == '.emi':
|
|
@@ -963,7 +967,7 @@ def add_dataset_from_file(datasets, filename=None, key_name='Log', single_datase
|
|
|
963
967
|
actual last used name of dictionary key
|
|
964
968
|
"""
|
|
965
969
|
|
|
966
|
-
datasets2 =
|
|
970
|
+
datasets2 = open_file(filename=filename)
|
|
967
971
|
first_dataset = datasets2[list(datasets2)[0]]
|
|
968
972
|
if isinstance(first_dataset, sidpy.Dataset):
|
|
969
973
|
|
|
@@ -973,10 +977,10 @@ def add_dataset_from_file(datasets, filename=None, key_name='Log', single_datase
|
|
|
973
977
|
if int(key[-3:]) >= index:
|
|
974
978
|
index = int(key[-3:])+1
|
|
975
979
|
if single_dataset:
|
|
976
|
-
datasets[key_name+f'_{index:03}'] =
|
|
980
|
+
datasets[key_name+f'_{index:03}'] = first_dataset
|
|
977
981
|
else:
|
|
978
982
|
for dataset in datasets2.values():
|
|
979
|
-
datasets[key_name+f'_{index:03}'] =
|
|
983
|
+
datasets[key_name+f'_{index:03}'] = dataset
|
|
980
984
|
index += 1
|
|
981
985
|
index -= 1
|
|
982
986
|
else:
|
pyTEMlib/graph_tools.py
CHANGED
|
@@ -797,15 +797,14 @@ def breadth_first_search(graph, initial, projected_crystal):
|
|
|
797
797
|
|
|
798
798
|
# get lattice vectors to hopp along through graph
|
|
799
799
|
projected_unit_cell = projected_crystal.cell[:2, :2]
|
|
800
|
-
a_lattice_vector = projected_unit_cell[0]
|
|
801
|
-
b_lattice_vector = projected_unit_cell[1]
|
|
802
|
-
print(a_lattice_vector, b_lattice_vector)
|
|
800
|
+
a_lattice_vector = projected_unit_cell[0]
|
|
801
|
+
b_lattice_vector = projected_unit_cell[1]
|
|
803
802
|
main = np.array([a_lattice_vector, -a_lattice_vector, b_lattice_vector, -b_lattice_vector]) # vectors of unit cell
|
|
804
803
|
near = np.append(main, projection_tags['near_base'], axis=0) # all nearest atoms
|
|
805
804
|
# get k next nearest neighbours for each node
|
|
806
805
|
neighbour_tree = scipy.spatial.cKDTree(graph)
|
|
807
806
|
distances, indices = neighbour_tree.query(graph, # let's get all neighbours
|
|
808
|
-
k=
|
|
807
|
+
k=50) # projection_tags['number_of_nearest_neighbours']*2 + 1)
|
|
809
808
|
# print(projection_tags['number_of_nearest_neighbours'] * 2 + 1)
|
|
810
809
|
visited = [] # the atoms we visited
|
|
811
810
|
ideal = [] # atoms at ideal lattice
|
pyTEMlib/image_tools.py
CHANGED
|
@@ -6,7 +6,7 @@ MIT license except where stated differently
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import numpy as np
|
|
9
|
-
|
|
9
|
+
import matplotlib
|
|
10
10
|
import matplotlib as mpl
|
|
11
11
|
import matplotlib.pylab as plt
|
|
12
12
|
import matplotlib.widgets as mwidgets
|
|
@@ -23,6 +23,7 @@ from tqdm.auto import trange, tqdm
|
|
|
23
23
|
from itertools import product
|
|
24
24
|
|
|
25
25
|
from scipy import fftpack
|
|
26
|
+
import scipy
|
|
26
27
|
# from scipy import signal
|
|
27
28
|
from scipy.interpolate import interp1d # , interp2d
|
|
28
29
|
import scipy.optimize as optimization
|
|
@@ -45,6 +46,10 @@ from skimage.feature import blob_log # blob_dog, blob_doh
|
|
|
45
46
|
|
|
46
47
|
from sklearn.feature_extraction import image
|
|
47
48
|
from sklearn.utils.extmath import randomized_svd
|
|
49
|
+
from sklearn.cluster import DBSCAN
|
|
50
|
+
|
|
51
|
+
from collections import Counter
|
|
52
|
+
|
|
48
53
|
|
|
49
54
|
_SimpleITK_present = True
|
|
50
55
|
try:
|
|
@@ -77,7 +82,7 @@ def get_wavelength(e0):
|
|
|
77
82
|
return const.h/np.sqrt(2*const.m_e*eV*(1+eV/(2*const.m_e*const.c**2)))*10**9
|
|
78
83
|
|
|
79
84
|
|
|
80
|
-
def fourier_transform(dset):
|
|
85
|
+
def fourier_transform(dset: sidpy.Dataset) -> sidpy.Dataset:
|
|
81
86
|
"""
|
|
82
87
|
Reads information into dictionary 'tags', performs 'FFT', and provides a smoothed FT and reciprocal
|
|
83
88
|
and intensity limits for visualization.
|
|
@@ -111,7 +116,7 @@ def fourier_transform(dset):
|
|
|
111
116
|
if len(image_dim) != 2:
|
|
112
117
|
raise ValueError('need at least two SPATIAL dimension for an image stack')
|
|
113
118
|
|
|
114
|
-
for i in range(dset.
|
|
119
|
+
for i in range(dset.ndim):
|
|
115
120
|
if i in image_dim:
|
|
116
121
|
selection.append(slice(None))
|
|
117
122
|
if len(stack_dim) == 0:
|
|
@@ -202,7 +207,7 @@ def power_spectrum(dset, smoothing=3):
|
|
|
202
207
|
return power_spec
|
|
203
208
|
|
|
204
209
|
|
|
205
|
-
def diffractogram_spots(dset, spot_threshold):
|
|
210
|
+
def diffractogram_spots(dset, spot_threshold, return_center=True, eps=0.1):
|
|
206
211
|
"""Find spots in diffractogram and sort them by distance from center
|
|
207
212
|
|
|
208
213
|
Uses blob_log from scipy.spatial
|
|
@@ -213,6 +218,10 @@ def diffractogram_spots(dset, spot_threshold):
|
|
|
213
218
|
diffractogram
|
|
214
219
|
spot_threshold: float
|
|
215
220
|
threshold for blob finder
|
|
221
|
+
return_center: bool, optional
|
|
222
|
+
return center of image if true
|
|
223
|
+
eps: float, optional
|
|
224
|
+
threshold for blob finder
|
|
216
225
|
|
|
217
226
|
Returns
|
|
218
227
|
-------
|
|
@@ -242,7 +251,28 @@ def diffractogram_spots(dset, spot_threshold):
|
|
|
242
251
|
spots = spots_random[spots_index]
|
|
243
252
|
# third row is angles
|
|
244
253
|
spots[:, 2] = np.arctan2(spots[:, 0], spots[:, 1])
|
|
245
|
-
|
|
254
|
+
|
|
255
|
+
center = [0, 0]
|
|
256
|
+
|
|
257
|
+
if return_center:
|
|
258
|
+
points = spots[:, 0:2]
|
|
259
|
+
|
|
260
|
+
# Calculate the midpoints between all points
|
|
261
|
+
reshaped_points = points[:, np.newaxis, :]
|
|
262
|
+
midpoints = (reshaped_points + reshaped_points.transpose(1, 0, 2)) / 2.0
|
|
263
|
+
midpoints = midpoints.reshape(-1, 2)
|
|
264
|
+
|
|
265
|
+
# Find the most dense cluster of midpoints
|
|
266
|
+
dbscan = DBSCAN(eps=eps, min_samples=2)
|
|
267
|
+
labels = dbscan.fit_predict(midpoints)
|
|
268
|
+
cluster_counter = Counter(labels)
|
|
269
|
+
largest_cluster_label = max(cluster_counter, key=cluster_counter.get)
|
|
270
|
+
largest_cluster_points = midpoints[labels == largest_cluster_label]
|
|
271
|
+
|
|
272
|
+
# Average of these midpoints must be the center
|
|
273
|
+
center = np.mean(largest_cluster_points, axis=0)
|
|
274
|
+
|
|
275
|
+
return spots, center
|
|
246
276
|
|
|
247
277
|
|
|
248
278
|
def adaptive_fourier_filter(dset, spots, low_pass=3, reflection_radius=0.3):
|
|
@@ -344,16 +374,17 @@ def complete_registration(main_dataset, storage_channel=None):
|
|
|
344
374
|
print('Rigid_Registration')
|
|
345
375
|
|
|
346
376
|
rigid_registered_dataset = rigid_registration(main_dataset)
|
|
347
|
-
if storage_channel is None:
|
|
348
|
-
storage_channel = main_dataset.h5_dataset.parent.parent
|
|
349
377
|
|
|
350
|
-
|
|
378
|
+
if storage_channel is not None:
|
|
379
|
+
registration_channel = ft.log_results(storage_channel, rigid_registered_dataset)
|
|
351
380
|
|
|
352
381
|
print('Non-Rigid_Registration')
|
|
353
382
|
|
|
354
383
|
non_rigid_registered = demon_registration(rigid_registered_dataset)
|
|
355
|
-
|
|
384
|
+
if storage_channel is not None:
|
|
385
|
+
registration_channel = ft.log_results(storage_channel, non_rigid_registered)
|
|
356
386
|
|
|
387
|
+
non_rigid_registered.h5_dataset = registration_channel
|
|
357
388
|
return non_rigid_registered, rigid_registered_dataset
|
|
358
389
|
|
|
359
390
|
|
|
@@ -413,8 +444,6 @@ def demon_registration(dataset, verbose=False):
|
|
|
413
444
|
resampler.SetInterpolator(sitk.sitkBSpline)
|
|
414
445
|
resampler.SetDefaultPixelValue(0)
|
|
415
446
|
|
|
416
|
-
done = 0
|
|
417
|
-
|
|
418
447
|
for i in trange(nimages):
|
|
419
448
|
|
|
420
449
|
moving = sitk.GetImageFromArray(dataset[i])
|
|
@@ -492,7 +521,7 @@ def rigid_registration(dataset):
|
|
|
492
521
|
' pixels in y-direction')
|
|
493
522
|
|
|
494
523
|
fixed = dataset[tuple(selection)].squeeze().compute()
|
|
495
|
-
fft_fixed =
|
|
524
|
+
fft_fixed = np.fft.fft2(fixed)
|
|
496
525
|
|
|
497
526
|
relative_drift = [[0., 0.]]
|
|
498
527
|
|
|
@@ -502,12 +531,10 @@ def rigid_registration(dataset):
|
|
|
502
531
|
fft_moving = np.fft.fft2(moving)
|
|
503
532
|
image_product = fft_fixed * fft_moving.conj()
|
|
504
533
|
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
|
|
505
|
-
shift =np.array(ndimage.maximum_position(cc_image.real))-cc_image.shape[0]/2
|
|
534
|
+
shift = np.array(ndimage.maximum_position(cc_image.real))-cc_image.shape[0]/2
|
|
506
535
|
fft_fixed = fft_moving
|
|
507
536
|
relative_drift.append(shift)
|
|
508
537
|
rig_reg, drift = rig_reg_drift(dataset, relative_drift)
|
|
509
|
-
|
|
510
|
-
|
|
511
538
|
crop_reg, input_crop = crop_image_stack(rig_reg, drift)
|
|
512
539
|
|
|
513
540
|
rigid_registered = dataset.like_data(crop_reg)
|
|
@@ -515,18 +542,12 @@ def rigid_registration(dataset):
|
|
|
515
542
|
rigid_registered.source = dataset.title
|
|
516
543
|
rigid_registered.metadata = {'analysis': 'rigid sub-pixel registration', 'drift': drift,
|
|
517
544
|
'input_crop': input_crop, 'input_shape': dataset.shape[1:]}
|
|
518
|
-
if hasattr(rigid_registered, 'z'):
|
|
519
|
-
del rigid_registered.z
|
|
520
|
-
if hasattr(rigid_registered, 'x'):
|
|
521
|
-
del rigid_registered.x
|
|
522
|
-
if hasattr(rigid_registered, 'y'):
|
|
523
|
-
del rigid_registered.y
|
|
524
|
-
# rigid_registered._axes = {}
|
|
525
545
|
rigid_registered.set_dimension(0, dataset._axes[frame_dim[0]])
|
|
526
546
|
rigid_registered.set_dimension(1, dataset._axes[spatial_dim[0]][input_crop[0]:input_crop[1]])
|
|
527
547
|
rigid_registered.set_dimension(2, dataset._axes[spatial_dim[1]][input_crop[2]:input_crop[3]])
|
|
528
548
|
return rigid_registered.rechunk({0: 'auto', 1: -1, 2: -1})
|
|
529
549
|
|
|
550
|
+
|
|
530
551
|
def rig_reg_drift(dset, rel_drift):
|
|
531
552
|
""" Shifting images on top of each other
|
|
532
553
|
|
|
@@ -579,7 +600,8 @@ def rig_reg_drift(dset, rel_drift):
|
|
|
579
600
|
for i in range(rig_reg.shape[0]):
|
|
580
601
|
selection[frame_dim[0]] = slice(i, i+1)
|
|
581
602
|
# Now we shift
|
|
582
|
-
rig_reg[i, :, :] = ndimage.shift(dset[tuple(selection)].squeeze().compute(),
|
|
603
|
+
rig_reg[i, :, :] = ndimage.shift(dset[tuple(selection)].squeeze().compute(),
|
|
604
|
+
[drift[i, 0], drift[i, 1]], order=3)
|
|
583
605
|
return rig_reg, drift
|
|
584
606
|
|
|
585
607
|
|
|
@@ -605,6 +627,7 @@ def crop_image_stack(rig_reg, drift):
|
|
|
605
627
|
|
|
606
628
|
return rig_reg[:, xpmin:xpmax, ypmin:ypmax], [xpmin, xpmax, ypmin, ypmax]
|
|
607
629
|
|
|
630
|
+
|
|
608
631
|
class ImageWithLineProfile:
|
|
609
632
|
"""Image with line profile"""
|
|
610
633
|
|
|
@@ -673,6 +696,112 @@ class ImageWithLineProfile:
|
|
|
673
696
|
self.ax[1].draw()
|
|
674
697
|
|
|
675
698
|
|
|
699
|
+
class LineSelector(matplotlib.widgets.PolygonSelector):
|
|
700
|
+
def __init__(self, ax, onselect, line_width=1, **kwargs):
|
|
701
|
+
super().__init__(ax, onselect, **kwargs)
|
|
702
|
+
bounds = ax.viewLim.get_points()
|
|
703
|
+
np.max(bounds[0])
|
|
704
|
+
self.line_verts = np.array([[np.max(bounds[1])/2, np.max(bounds[0])/5], [np.max(bounds[1])/2,
|
|
705
|
+
np.max(bounds[0])/5+1],
|
|
706
|
+
[np.max(bounds[1])/5, np.max(bounds[0])/2], [np.max(bounds[1])/5,
|
|
707
|
+
np.max(bounds[0])/2]])
|
|
708
|
+
self.verts = self.line_verts
|
|
709
|
+
self.line_width = line_width
|
|
710
|
+
|
|
711
|
+
def set_linewidth(self, line_width=None):
|
|
712
|
+
if line_width is not None:
|
|
713
|
+
self.line_width = line_width
|
|
714
|
+
|
|
715
|
+
m = -(self.line_verts[0, 1]-self.line_verts[3, 1])/(self.line_verts[0, 0]-self.line_verts[3, 0])
|
|
716
|
+
c = 1/np.sqrt(1+m**2)
|
|
717
|
+
s = c*m
|
|
718
|
+
self.line_verts[1] = [self.line_verts[0, 0]+self.line_width*s, self.line_verts[0, 1]+self.line_width*c]
|
|
719
|
+
self.line_verts[2] = [self.line_verts[3, 0]+self.line_width*s, self.line_verts[3, 1]+self.line_width*c]
|
|
720
|
+
|
|
721
|
+
self.verts = self.line_verts.copy()
|
|
722
|
+
|
|
723
|
+
def onmove(self, event):
|
|
724
|
+
super().onmove(event)
|
|
725
|
+
if np.max(np.linalg.norm(self.line_verts-self.verts, axis=1)) > 1:
|
|
726
|
+
self.moved_point = np.argmax(np.linalg.norm(self.line_verts-self.verts, axis=1))
|
|
727
|
+
|
|
728
|
+
self.new_point = self.verts[self.moved_point]
|
|
729
|
+
moved_point = int(np.floor(self.moved_point/2)*3)
|
|
730
|
+
self.moved_point = moved_point
|
|
731
|
+
self.line_verts[moved_point] = self.new_point
|
|
732
|
+
self.set_linewidth()
|
|
733
|
+
|
|
734
|
+
def get_profile(dataset, line):
|
|
735
|
+
xv, yv = get_line_selection_points(line)
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
if dataset.data_type.name == 'IMAGE':
|
|
739
|
+
dataset.get_image_dims()
|
|
740
|
+
xv /= (dataset.x[1] - dataset.x[0])
|
|
741
|
+
yv /= (dataset.y[1] - dataset.y[0])
|
|
742
|
+
profile = scipy.ndimage.map_coordinates(np.array(dataset), [xv,yv])
|
|
743
|
+
|
|
744
|
+
profile_dataset = sidpy.Dataset.from_array(profile.sum(axis=0))
|
|
745
|
+
profile_dataset.data_type='spectrum'
|
|
746
|
+
profile_dataset.units = dataset.units
|
|
747
|
+
profile_dataset.quantity = dataset.quantity
|
|
748
|
+
profile_dataset.set_dimension(0, sidpy.Dimension(np.linspace(xv[0,0], xv[-1,-1], profile_dataset.shape[0]),
|
|
749
|
+
name='x', units=dataset.x.units, quantity=dataset.x.quantity,
|
|
750
|
+
dimension_type='spatial'))
|
|
751
|
+
|
|
752
|
+
profile_dataset
|
|
753
|
+
|
|
754
|
+
if dataset.data_type.name == 'SPECTRAL_IMAGE':
|
|
755
|
+
spectral_axis = dataset.get_spectral_dims(return_axis=True)[0]
|
|
756
|
+
profile = np.zeros([xv.shape[1], 2, len(spectral_axis)])
|
|
757
|
+
data =np.array(dataset)
|
|
758
|
+
|
|
759
|
+
for index_x in range(xv.shape[1]):
|
|
760
|
+
for index_y in range(xv.shape[0]):
|
|
761
|
+
x = xv[index_y, index_x]
|
|
762
|
+
y = yv[index_y, index_x]
|
|
763
|
+
profile[index_x, 0] +=data[int(x),int(y)]
|
|
764
|
+
profile_dataset = sidpy.Dataset.from_array(profile)
|
|
765
|
+
profile_dataset.data_type='spectral_image'
|
|
766
|
+
profile_dataset.units = dataset.units
|
|
767
|
+
profile_dataset.quantity = dataset.quantity
|
|
768
|
+
profile_dataset.set_dimension(0, sidpy.Dimension(np.linspace(xv[0,0], xv[-1,-1], profile_dataset.shape[0]),
|
|
769
|
+
name='x', units=dataset.x.units, quantity=dataset.x.quantity,
|
|
770
|
+
dimension_type='spatial'))
|
|
771
|
+
profile_dataset.set_dimension(1, sidpy.Dimension([0, 1],
|
|
772
|
+
name='y', units=dataset.x.units, quantity=dataset.x.quantity,
|
|
773
|
+
dimension_type='spatial'))
|
|
774
|
+
|
|
775
|
+
profile_dataset.set_dimension(2, spectral_axis)
|
|
776
|
+
return profile_dataset
|
|
777
|
+
|
|
778
|
+
|
|
779
|
+
def get_line_selection_points(line):
|
|
780
|
+
|
|
781
|
+
start_point = line.line_verts[3]
|
|
782
|
+
right_point = line.line_verts[0]
|
|
783
|
+
low_point = line.line_verts[2]
|
|
784
|
+
|
|
785
|
+
if start_point[0] > right_point[0]:
|
|
786
|
+
start_point = line.line_verts[0]
|
|
787
|
+
right_point = line.line_verts[3]
|
|
788
|
+
low_point = line.line_verts[1]
|
|
789
|
+
m = (right_point[1] - start_point[1]) / (right_point[0] - start_point[0])
|
|
790
|
+
length_x = int(abs(start_point[0]-right_point[0]))
|
|
791
|
+
length_v = int(np.linalg.norm(start_point-right_point))
|
|
792
|
+
|
|
793
|
+
linewidth = int(abs(start_point[1]-low_point[1]))
|
|
794
|
+
x = np.linspace(0,length_x, length_v)
|
|
795
|
+
y = np.linspace(0,linewidth, line.line_width)
|
|
796
|
+
xv, yv = np.meshgrid(x, y)
|
|
797
|
+
|
|
798
|
+
yy = yv +x*m+start_point[1]
|
|
799
|
+
xx = (xv.T -y*m ).T + start_point[0]
|
|
800
|
+
|
|
801
|
+
return xx, yy
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
|
|
676
805
|
def histogram_plot(image_tags):
|
|
677
806
|
"""interactive histogram"""
|
|
678
807
|
nbins = 75
|
|
@@ -687,10 +816,9 @@ def histogram_plot(image_tags):
|
|
|
687
816
|
vmax = image_tags['maximum_intensity']
|
|
688
817
|
if 'color_map' not in image_tags:
|
|
689
818
|
image_tags['color_map'] = color_map_list[0]
|
|
690
|
-
cmap = plt.cm.get_cmap(image_tags['color_map'])
|
|
691
819
|
|
|
820
|
+
cmap = plt.cm.get_cmap(image_tags['color_map'])
|
|
692
821
|
colors = cmap(np.linspace(0., 1., nbins))
|
|
693
|
-
|
|
694
822
|
norm2 = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
|
|
695
823
|
hist, bin_edges = np.histogram(data, np.linspace(vmin, vmax, nbins), density=True)
|
|
696
824
|
|
|
@@ -699,9 +827,7 @@ def histogram_plot(image_tags):
|
|
|
699
827
|
def onselect(vmin, vmax):
|
|
700
828
|
ax1.clear()
|
|
701
829
|
cmap = plt.cm.get_cmap(image_tags['color_map'])
|
|
702
|
-
|
|
703
830
|
colors = cmap(np.linspace(0., 1., nbins))
|
|
704
|
-
|
|
705
831
|
norm2 = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
|
|
706
832
|
hist2, bin_edges2 = np.histogram(data, np.linspace(vmin, vmax, nbins), density=True)
|
|
707
833
|
|
|
@@ -737,7 +863,7 @@ def histogram_plot(image_tags):
|
|
|
737
863
|
else:
|
|
738
864
|
vmax = data.max()
|
|
739
865
|
vmin = data.min()
|
|
740
|
-
|
|
866
|
+
onselect(vmin, vmax)
|
|
741
867
|
|
|
742
868
|
fig2 = plt.figure()
|
|
743
869
|
|
|
@@ -884,24 +1010,30 @@ def cartesian2polar(x, y, grid, r, t, order=3):
|
|
|
884
1010
|
return ndimage.map_coordinates(grid, np.array([new_ix, new_iy]), order=order).reshape(new_x.shape)
|
|
885
1011
|
|
|
886
1012
|
|
|
887
|
-
def warp(diff
|
|
888
|
-
"""
|
|
1013
|
+
def warp(diff):
|
|
1014
|
+
"""Takes a centered diffraction pattern (as a sidpy dataset)and warps it to a polar grid"""
|
|
1015
|
+
"""Centered diff can be produced with it.diffractogram_spots(return_center = True)"""
|
|
889
1016
|
|
|
890
1017
|
# Define original polar grid
|
|
891
|
-
nx =
|
|
892
|
-
ny =
|
|
1018
|
+
nx = np.shape(diff)[0]
|
|
1019
|
+
ny = np.shape(diff)[1]
|
|
893
1020
|
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
1021
|
+
# Define center pixel
|
|
1022
|
+
pix2nm = np.gradient(diff.u.values)[0]
|
|
1023
|
+
center_pixel = [abs(min(diff.u.values)), abs(min(diff.v.values))]//pix2nm
|
|
1024
|
+
|
|
1025
|
+
x = np.linspace(1, nx, nx, endpoint=True)-center_pixel[0]
|
|
1026
|
+
y = np.linspace(1, ny, ny, endpoint=True)-center_pixel[1]
|
|
1027
|
+
z = diff
|
|
897
1028
|
|
|
898
1029
|
# Define new polar grid
|
|
899
|
-
nr = min([
|
|
1030
|
+
nr = int(min([center_pixel[0], center_pixel[1], diff.shape[0]-center_pixel[0], diff.shape[1]-center_pixel[1]])-1)
|
|
900
1031
|
nt = 360*3
|
|
901
1032
|
|
|
902
1033
|
r = np.linspace(1, nr, nr)
|
|
903
1034
|
t = np.linspace(0., np.pi, nt, endpoint=False)
|
|
904
|
-
|
|
1035
|
+
|
|
1036
|
+
return cartesian2polar(x, y, z, r, t, order=3)
|
|
905
1037
|
|
|
906
1038
|
|
|
907
1039
|
def calculate_ctf(wavelength, cs, defocus, k):
|
|
@@ -957,8 +1089,8 @@ def get_rotation(experiment_spots, crystal_spots):
|
|
|
957
1089
|
|
|
958
1090
|
# get crystal spots of same length and sort them by angle as well
|
|
959
1091
|
r_crystal, phi_crystal, crystal_indices = xy2polar(crystal_spots)
|
|
960
|
-
angle_index = np.argmin(np.abs(r_experiment-r_crystal[1])
|
|
961
|
-
rotation_angle = phi_experiment[angle_index]%(2*np.pi) - phi_crystal[1]
|
|
1092
|
+
angle_index = np.argmin(np.abs(r_experiment-r_crystal[1]))
|
|
1093
|
+
rotation_angle = phi_experiment[angle_index] % (2*np.pi) - phi_crystal[1]
|
|
962
1094
|
print(phi_experiment[angle_index])
|
|
963
1095
|
st = np.sin(rotation_angle)
|
|
964
1096
|
ct = np.cos(rotation_angle)
|
|
@@ -967,7 +1099,6 @@ def get_rotation(experiment_spots, crystal_spots):
|
|
|
967
1099
|
return rotation_matrix, rotation_angle
|
|
968
1100
|
|
|
969
1101
|
|
|
970
|
-
|
|
971
1102
|
def calibrate_image_scale(fft_tags, spots_reference, spots_experiment):
|
|
972
1103
|
"""depreciated get change of scale from comparison of spots to Bragg angles """
|
|
973
1104
|
gx = fft_tags['spatial_scale_x']
|
|
@@ -989,7 +1120,6 @@ def calibrate_image_scale(fft_tags, spots_reference, spots_experiment):
|
|
|
989
1120
|
return dg
|
|
990
1121
|
|
|
991
1122
|
|
|
992
|
-
|
|
993
1123
|
def align_crystal_reflections(spots, crystals):
|
|
994
1124
|
""" Depreciated - use diffraction spots"""
|
|
995
1125
|
|