pyTEMlib 0.2024.9.0__py3-none-any.whl → 0.2025.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyTEMlib might be problematic. Click here for more details.
- pyTEMlib/animation.py +1 -1
- pyTEMlib/atom_tools.py +2 -1
- pyTEMlib/core_loss_widget.py +337 -272
- pyTEMlib/eels_dialog.py +15 -10
- pyTEMlib/eels_tools.py +452 -125
- pyTEMlib/file_tools.py +319 -30
- pyTEMlib/image_tools.py +91 -15
- pyTEMlib/info_widget.py +211 -58
- pyTEMlib/info_widget3.py +1120 -0
- pyTEMlib/low_loss_widget.py +344 -41
- pyTEMlib/peak_dialog.py +141 -59
- pyTEMlib/probe_tools.py +65 -8
- pyTEMlib/version.py +2 -2
- {pyTEMlib-0.2024.9.0.dist-info → pytemlib-0.2025.2.2.dist-info}/METADATA +15 -5
- {pyTEMlib-0.2024.9.0.dist-info → pytemlib-0.2025.2.2.dist-info}/RECORD +19 -18
- {pyTEMlib-0.2024.9.0.dist-info → pytemlib-0.2025.2.2.dist-info}/WHEEL +1 -1
- {pyTEMlib-0.2024.9.0.dist-info → pytemlib-0.2025.2.2.dist-info}/LICENSE +0 -0
- {pyTEMlib-0.2024.9.0.dist-info → pytemlib-0.2025.2.2.dist-info}/entry_points.txt +0 -0
- {pyTEMlib-0.2024.9.0.dist-info → pytemlib-0.2025.2.2.dist-info}/top_level.txt +0 -0
pyTEMlib/file_tools.py
CHANGED
|
@@ -44,13 +44,202 @@ except ModuleNotFoundError:
|
|
|
44
44
|
|
|
45
45
|
Dimension = sidpy.Dimension
|
|
46
46
|
|
|
47
|
-
|
|
48
|
-
|
|
47
|
+
# Austin commented the line below - it is not used anywhere in the code, and it gives import errors 9-14-2024
|
|
48
|
+
# get_slope = sidpy.base.num_utils.get_slopes
|
|
49
|
+
__version__ = '2024.9.14'
|
|
49
50
|
|
|
50
51
|
from traitlets import Unicode, Bool, validate, TraitError
|
|
51
52
|
import ipywidgets
|
|
52
53
|
|
|
53
54
|
|
|
55
|
+
@ipywidgets.register
|
|
56
|
+
class FileWidget2(ipywidgets.DOMWidget):
|
|
57
|
+
"""Widget to select directories or widgets from a list
|
|
58
|
+
|
|
59
|
+
Works in google colab.
|
|
60
|
+
The widget converts the name of the nion file to the one in Nion's swift software,
|
|
61
|
+
because it is otherwise incomprehensible
|
|
62
|
+
|
|
63
|
+
Attributes
|
|
64
|
+
----------
|
|
65
|
+
dir_name: str
|
|
66
|
+
name of starting directory
|
|
67
|
+
extension: list of str
|
|
68
|
+
extensions of files to be listed in widget
|
|
69
|
+
|
|
70
|
+
Methods
|
|
71
|
+
-------
|
|
72
|
+
get_directory
|
|
73
|
+
set_options
|
|
74
|
+
get_file_name
|
|
75
|
+
|
|
76
|
+
Example
|
|
77
|
+
-------
|
|
78
|
+
>>from google.colab import drive
|
|
79
|
+
>>drive.mount("/content/drive")
|
|
80
|
+
>>file_list = pyTEMlib.file_tools.FileWidget()
|
|
81
|
+
next code cell:
|
|
82
|
+
>>dataset = pyTEMlib.file_tools.open_file(file_list.file_name)
|
|
83
|
+
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
def __init__(self, dir_name=None, extension=['*'], sum_frames=False):
|
|
87
|
+
self.save_path = False
|
|
88
|
+
self.dir_dictionary = {}
|
|
89
|
+
self.dir_list = ['.', '..']
|
|
90
|
+
self.display_list = ['.', '..']
|
|
91
|
+
self.sum_frames = sum_frames
|
|
92
|
+
|
|
93
|
+
self.dir_name = '.'
|
|
94
|
+
if dir_name is None:
|
|
95
|
+
self.dir_name = get_last_path()
|
|
96
|
+
self.save_path = True
|
|
97
|
+
elif os.path.isdir(dir_name):
|
|
98
|
+
self.dir_name = dir_name
|
|
99
|
+
|
|
100
|
+
self.get_directory(self.dir_name)
|
|
101
|
+
self.dir_list = ['.']
|
|
102
|
+
self.extensions = extension
|
|
103
|
+
self.file_name = ''
|
|
104
|
+
self.datasets = {}
|
|
105
|
+
self.dataset = None
|
|
106
|
+
|
|
107
|
+
self.select_files = widgets.Select(
|
|
108
|
+
options=self.dir_list,
|
|
109
|
+
value=self.dir_list[0],
|
|
110
|
+
description='Select file:',
|
|
111
|
+
disabled=False,
|
|
112
|
+
rows=10,
|
|
113
|
+
layout=widgets.Layout(width='70%')
|
|
114
|
+
)
|
|
115
|
+
self.path_choice = widgets.Dropdown(options=['None'],
|
|
116
|
+
value='None',
|
|
117
|
+
description='directory:',
|
|
118
|
+
disabled=False,
|
|
119
|
+
button_style='',
|
|
120
|
+
layout=widgets.Layout(width='90%'))
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
self.set_options()
|
|
125
|
+
ui = widgets.VBox([self.path_choice, self.select_files])
|
|
126
|
+
display(ui)
|
|
127
|
+
|
|
128
|
+
self.select_files.observe(self.get_file_name, names='value')
|
|
129
|
+
self.path_choice.observe(self.set_dir, names='value')
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_directory(self, directory=None):
|
|
134
|
+
self.dir_name = directory
|
|
135
|
+
self.dir_dictionary = {}
|
|
136
|
+
self.dir_list = []
|
|
137
|
+
self.dir_list = ['.', '..'] + os.listdir(directory)
|
|
138
|
+
|
|
139
|
+
def set_dir(self, value=0):
|
|
140
|
+
self.dir_name = self.path_choice.value
|
|
141
|
+
self.select_files.index = 0
|
|
142
|
+
self.set_options()
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def set_options(self):
|
|
146
|
+
self.dir_name = os.path.abspath(os.path.join(self.dir_name, self.dir_list[self.select_files.index]))
|
|
147
|
+
dir_list = os.listdir(self.dir_name)
|
|
148
|
+
file_dict = update_directory_list(self.dir_name)
|
|
149
|
+
|
|
150
|
+
sort = np.argsort(file_dict['directory_list'])
|
|
151
|
+
self.dir_list = ['.', '..']
|
|
152
|
+
self.display_list = ['.', '..']
|
|
153
|
+
for j in sort:
|
|
154
|
+
self.display_list.append(f" * {file_dict['directory_list'][j]}")
|
|
155
|
+
self.dir_list.append(file_dict['directory_list'][j])
|
|
156
|
+
|
|
157
|
+
sort = np.argsort(file_dict['display_file_list'])
|
|
158
|
+
|
|
159
|
+
for i, j in enumerate(sort):
|
|
160
|
+
if '--' in dir_list[j]:
|
|
161
|
+
self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
|
|
162
|
+
else:
|
|
163
|
+
self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
|
|
164
|
+
self.dir_list.append(file_dict['file_list'][j])
|
|
165
|
+
|
|
166
|
+
self.dir_label = os.path.split(self.dir_name)[-1] + ':'
|
|
167
|
+
self.select_files.options = self.display_list
|
|
168
|
+
|
|
169
|
+
path = self.dir_name
|
|
170
|
+
old_path = ' '
|
|
171
|
+
path_list = []
|
|
172
|
+
while path != old_path:
|
|
173
|
+
path_list.append(path)
|
|
174
|
+
old_path = path
|
|
175
|
+
path = os.path.split(path)[0]
|
|
176
|
+
self.path_choice.options = path_list
|
|
177
|
+
self.path_choice.value = path_list[0]
|
|
178
|
+
|
|
179
|
+
def get_file_name(self, b):
|
|
180
|
+
|
|
181
|
+
if os.path.isdir(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
|
|
182
|
+
self.set_options()
|
|
183
|
+
|
|
184
|
+
elif os.path.isfile(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
|
|
185
|
+
self.file_name = os.path.join(self.dir_name, self.dir_list[self.select_files.index])
|
|
186
|
+
|
|
187
|
+
class FileWidget3(FileWidget2):
|
|
188
|
+
def __init__(self, dir_name=None, extension=['*'], sum_frames=False):
|
|
189
|
+
if dir_name is None:
|
|
190
|
+
dir_name = get_last_path()
|
|
191
|
+
self.save_path = True
|
|
192
|
+
super().__init__(dir_name=dir_name, extension=extension, sum_frames=sum_frames)
|
|
193
|
+
|
|
194
|
+
select_button = widgets.Button(description='Select Main',
|
|
195
|
+
layout=widgets.Layout(width='auto', grid_area='header'),
|
|
196
|
+
style=widgets.ButtonStyle(button_color='lightblue'))
|
|
197
|
+
|
|
198
|
+
add_button = widgets.Button(description='Add',
|
|
199
|
+
layout=widgets.Layout(width='auto', grid_area='header'),
|
|
200
|
+
style=widgets.ButtonStyle(button_color='lightblue'))
|
|
201
|
+
|
|
202
|
+
self.dataset_list = ['None']
|
|
203
|
+
self.loaded_datasets = widgets.Dropdown(options=self.dataset_list,
|
|
204
|
+
value=self.dataset_list[0],
|
|
205
|
+
description='loaded datasets:',
|
|
206
|
+
disabled=False,
|
|
207
|
+
button_style='')
|
|
208
|
+
|
|
209
|
+
ui = widgets.HBox([select_button, add_button, self.loaded_datasets])
|
|
210
|
+
display(ui)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
select_button.on_click(self.select_main)
|
|
214
|
+
add_button.on_click(self.add_dataset)
|
|
215
|
+
self.loaded_datasets.observe(self.select_dataset)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def select_dataset(self, value=0):
|
|
219
|
+
key = self.loaded_datasets.value.split(':')[0]
|
|
220
|
+
if key != 'None':
|
|
221
|
+
self.selected_dataset = self.datasets[key]
|
|
222
|
+
self.selected_key = key
|
|
223
|
+
|
|
224
|
+
def select_main(self, value=0):
|
|
225
|
+
self.datasets = {}
|
|
226
|
+
self.dataset_list = []
|
|
227
|
+
self.datasets = open_file(self.file_name, sum_frames=self.sum_frames)
|
|
228
|
+
self.dataset_list = []
|
|
229
|
+
for key in self.datasets.keys():
|
|
230
|
+
self.dataset_list.append(f'{key}: {self.datasets[key].title}')
|
|
231
|
+
self.loaded_datasets.options = self.dataset_list
|
|
232
|
+
self.loaded_datasets.value = self.dataset_list[0]
|
|
233
|
+
self.dataset = self.datasets[list(self.datasets.keys())[0]]
|
|
234
|
+
self.selected_dataset = self.dataset
|
|
235
|
+
|
|
236
|
+
def add_dataset(self, value=0):
|
|
237
|
+
key = add_dataset_from_file(self.datasets, self.file_name, 'Channel')
|
|
238
|
+
self.dataset_list.append(f'{key}: {self.datasets[key].title}')
|
|
239
|
+
self.loaded_datasets.options = self.dataset_list
|
|
240
|
+
self.loaded_datasets.value = self.dataset_list[-1]
|
|
241
|
+
|
|
242
|
+
|
|
54
243
|
@ipywidgets.register
|
|
55
244
|
class FileWidget(ipywidgets.DOMWidget):
|
|
56
245
|
"""Widget to select directories or widgets from a list
|
|
@@ -252,6 +441,8 @@ class ChooseDataset(object):
|
|
|
252
441
|
else:
|
|
253
442
|
self.reader = None
|
|
254
443
|
self.get_dataset_list()
|
|
444
|
+
if len(self.dataset_list) < 1:
|
|
445
|
+
self.dataset_list = ['None']
|
|
255
446
|
self.select_image = widgets.Dropdown(options=self.dataset_list,
|
|
256
447
|
value=self.dataset_list[0],
|
|
257
448
|
description='select dataset:',
|
|
@@ -266,6 +457,7 @@ class ChooseDataset(object):
|
|
|
266
457
|
|
|
267
458
|
def get_dataset_list(self):
|
|
268
459
|
""" Get by Log number sorted list of datasets"""
|
|
460
|
+
dataset_list = []
|
|
269
461
|
if not isinstance(self.datasets, dict):
|
|
270
462
|
dataset_list = self.reader.read()
|
|
271
463
|
self.datasets = {}
|
|
@@ -287,10 +479,11 @@ class ChooseDataset(object):
|
|
|
287
479
|
|
|
288
480
|
def set_dataset(self, b):
|
|
289
481
|
index = self.select_image.index
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
482
|
+
if index < len(self.dataset_names):
|
|
483
|
+
self.key = self.dataset_names[index]
|
|
484
|
+
self.dataset = self.datasets[self.key]
|
|
485
|
+
self.dataset.title = self.dataset.title.split('/')[-1]
|
|
486
|
+
self.dataset.title = self.dataset.title.split('/')[-1]
|
|
294
487
|
|
|
295
488
|
|
|
296
489
|
def add_to_dict(file_dict, name):
|
|
@@ -512,7 +705,7 @@ def save_file_dialog_qt(file_types=None): # , multiple_files=False):
|
|
|
512
705
|
return filename
|
|
513
706
|
|
|
514
707
|
|
|
515
|
-
def save_dataset(dataset, filename=None, h5_group=None):
|
|
708
|
+
def save_dataset(dataset, filename=None, qt=False, h5_group=None):
|
|
516
709
|
""" Saves a dataset to a file in pyNSID format
|
|
517
710
|
Parameters
|
|
518
711
|
----------
|
|
@@ -523,7 +716,7 @@ def save_dataset(dataset, filename=None, h5_group=None):
|
|
|
523
716
|
h5_group: hd5py.Group
|
|
524
717
|
not used yet
|
|
525
718
|
"""
|
|
526
|
-
if filename is None:
|
|
719
|
+
if filename is None or qt==True:
|
|
527
720
|
filename = save_file_dialog_qt()
|
|
528
721
|
h5_filename = get_h5_filename(filename)
|
|
529
722
|
h5_file = h5py.File(h5_filename, mode='a')
|
|
@@ -594,8 +787,57 @@ def h5_group_to_dict(group, group_dict={}):
|
|
|
594
787
|
return group_dict
|
|
595
788
|
|
|
596
789
|
|
|
790
|
+
def read_annotation(image):
|
|
791
|
+
if 'MAGE' not in image.data_type.name:
|
|
792
|
+
return {}
|
|
793
|
+
scale_x = np.abs(image.x[1]-image.x[0])
|
|
794
|
+
scale_y = np.abs(image.y[1]-image.y[0])
|
|
795
|
+
rec_scale = np.array([scale_x, scale_y,scale_x, scale_y])
|
|
796
|
+
if 'DocumentObjectList' not in image.original_metadata:
|
|
797
|
+
return {}
|
|
798
|
+
if '0' not in image.original_metadata['DocumentObjectList']:
|
|
799
|
+
return {}
|
|
800
|
+
annotations = {}
|
|
801
|
+
tags = image.original_metadata['DocumentObjectList']['0']
|
|
802
|
+
for key in tags:
|
|
803
|
+
if 'AnnotationGroupList' in key:
|
|
804
|
+
an_tags = tags[key]
|
|
805
|
+
for key2 in an_tags:
|
|
806
|
+
if isinstance(an_tags[key2], dict):
|
|
807
|
+
if an_tags[key2]['AnnotationType'] == 13: #type 'text'
|
|
808
|
+
annotations[key2] = {'type': 'text'}
|
|
809
|
+
if 'Label' in an_tags:
|
|
810
|
+
annotations[key2]['label'] = an_tags['Label']
|
|
811
|
+
rect = np.array(an_tags[key2]['Rectangle']) * rec_scale
|
|
812
|
+
annotations[key2]['position'] = [rect[1],rect[0]]
|
|
813
|
+
annotations[key2]['text'] = an_tags['Text']
|
|
814
|
+
|
|
815
|
+
elif an_tags[key2]['AnnotationType']==6:
|
|
816
|
+
annotations[key2] = {'type': 'circle'}
|
|
817
|
+
if 'Label' in an_tags:
|
|
818
|
+
annotations[key2]['label'] = an_tags['Label']
|
|
819
|
+
rect = np.array(an_tags[key2]['Rectangle']) * rec_scale
|
|
820
|
+
|
|
821
|
+
annotations[key2]['radius'] =rect[3]-rect[1]
|
|
822
|
+
annotations[key2]['position'] = [rect[1],rect[0]]
|
|
823
|
+
|
|
824
|
+
elif an_tags[key2]['AnnotationType'] == 23:
|
|
825
|
+
annotations[key2] = {'type': 'spectral_image'}
|
|
826
|
+
if 'Label' in an_tags[key2]:
|
|
827
|
+
annotations[key2]['label'] = an_tags[key2]['Label']
|
|
828
|
+
rect = np.array(an_tags[key2]['Rectangle']) * rec_scale
|
|
829
|
+
|
|
830
|
+
annotations[key2]['width'] =rect[3]-rect[1]
|
|
831
|
+
annotations[key2]['height'] =rect[2]-rect[0]
|
|
832
|
+
annotations[key2]['position'] = [rect[1],rect[0]]
|
|
833
|
+
annotations[key2]['Rectangle'] = np.array(an_tags[key2]['Rectangle'])
|
|
834
|
+
if len(annotations)>0:
|
|
835
|
+
image.metadata['annotations'] = annotations
|
|
836
|
+
return annotations
|
|
837
|
+
|
|
838
|
+
|
|
597
839
|
def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=False): # save_file=False,
|
|
598
|
-
"""Opens a file if the extension is .hf5, .ndata, .dm3 or .dm4
|
|
840
|
+
"""Opens a file if the extension is .emd, .mrc, .hf5, .ndata, .dm3 or .dm4
|
|
599
841
|
|
|
600
842
|
If no filename is provided the QT open_file windows opens (if QT_available==True)
|
|
601
843
|
Everything will be stored in a NSID style hf5 file.
|
|
@@ -640,24 +882,33 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
|
|
|
640
882
|
print('no hdf5 dataset found in file')
|
|
641
883
|
return {}
|
|
642
884
|
else:
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
dataset_dict
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
885
|
+
if isinstance(datasets, dict):
|
|
886
|
+
dataset_dict = datasets
|
|
887
|
+
|
|
888
|
+
else:
|
|
889
|
+
dataset_dict = {}
|
|
890
|
+
for index, dataset in enumerate(datasets):
|
|
891
|
+
title = str(dataset.title).split('/')[-1]
|
|
892
|
+
# dataset.title = str(dataset.title).split('/')[-1]
|
|
893
|
+
dataset_dict[title] = dataset
|
|
894
|
+
if index == 0:
|
|
895
|
+
file = datasets[0].h5_dataset.file
|
|
896
|
+
master_group = datasets[0].h5_dataset.parent.parent.parent
|
|
897
|
+
for key in master_group.keys():
|
|
898
|
+
if key not in dataset_dict:
|
|
899
|
+
dataset_dict[key] = h5_group_to_dict(master_group[key])
|
|
900
|
+
if not write_hdf_file:
|
|
901
|
+
file.close()
|
|
902
|
+
for dset in dataset_dict.values():
|
|
903
|
+
if isinstance(dset, sidpy.Dataset):
|
|
904
|
+
if 'Measurement' in dset.title:
|
|
905
|
+
dset.title = dset.title.split('/')[-1]
|
|
656
906
|
return dataset_dict
|
|
657
|
-
elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi', '.edaxh5']:
|
|
907
|
+
elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi', '.edaxh5', '.mrc']:
|
|
658
908
|
# tags = open_file(filename)
|
|
659
909
|
if extension in ['.dm3', '.dm4']:
|
|
660
910
|
reader = SciFiReaders.DMReader(filename)
|
|
911
|
+
|
|
661
912
|
elif extension in ['.emi']:
|
|
662
913
|
try:
|
|
663
914
|
import hyperspy.api as hs
|
|
@@ -682,7 +933,7 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
|
|
|
682
933
|
return
|
|
683
934
|
elif extension == '.emd':
|
|
684
935
|
reader = SciFiReaders.EMDReader(filename, sum_frames=sum_frames)
|
|
685
|
-
|
|
936
|
+
|
|
686
937
|
elif 'edax' in extension.lower():
|
|
687
938
|
if 'h5' in extension:
|
|
688
939
|
reader = SciFiReaders.EDAXReader(filename)
|
|
@@ -690,6 +941,9 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
|
|
|
690
941
|
elif extension in ['.ndata', '.h5']:
|
|
691
942
|
reader = SciFiReaders.NionReader(filename)
|
|
692
943
|
|
|
944
|
+
elif extension in ['.mrc']:
|
|
945
|
+
reader = SciFiReaders.MRCReader(filename)
|
|
946
|
+
|
|
693
947
|
else:
|
|
694
948
|
raise NotImplementedError('extension not supported')
|
|
695
949
|
|
|
@@ -703,10 +957,44 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
|
|
|
703
957
|
if not isinstance(dset, dict):
|
|
704
958
|
print('Please use new SciFiReaders Package for full functionality')
|
|
705
959
|
if isinstance(dset, sidpy.Dataset):
|
|
706
|
-
dset =
|
|
707
|
-
|
|
960
|
+
dset = {'Channel_000': dset}
|
|
961
|
+
for key in dset:
|
|
962
|
+
read_annotation(dset[key])
|
|
963
|
+
if extension == '.emd':
|
|
964
|
+
for key1 in dset:
|
|
965
|
+
for key in dset[key1].original_metadata:
|
|
966
|
+
if key == 'Instrument':
|
|
967
|
+
model = dset[key1].original_metadata[key]['InstrumentModel']
|
|
968
|
+
id = dset[key1].original_metadata[key]['InstrumentId']
|
|
969
|
+
dset[key1].metadata['experiment']['instrument'] = model + str(id)
|
|
970
|
+
if key == 'Optics':
|
|
971
|
+
if 'LastMeasuredScreenCurrent' in dset[key1].original_metadata[key]:
|
|
972
|
+
dset[key1].metadata['experiment']['current'] = float(dset[key1].original_metadata[key]['LastMeasuredScreenCurrent'])
|
|
973
|
+
if key == 'Scan':
|
|
974
|
+
if 'DwellTime' in dset[key1].original_metadata[key]:
|
|
975
|
+
dset[key1].metadata['experiment']['pixel_time'] = float(dset[key1].original_metadata[key]['DwellTime'])
|
|
976
|
+
if 'FrameTime' in dset[key1].original_metadata[key]:
|
|
977
|
+
dset[key1].metadata['experiment']['exposure_time'] = float(dset[key1].original_metadata[key]['FrameTime'])
|
|
978
|
+
if key == 'Sample':
|
|
979
|
+
if 'SampleDescription' in dset[key1].original_metadata[key]:
|
|
980
|
+
dset[key1].metadata['experiment']['sample'] = dset[key1].original_metadata[key]['SampleDescription']
|
|
981
|
+
if 'SampleId' in dset[key1].original_metadata[key]:
|
|
982
|
+
dset[key1].metadata['experiment']['sample_id'] = dset[key1].original_metadata[key]['SampleId']
|
|
983
|
+
if key == 'Detectors':
|
|
984
|
+
if 'detector' in dset[key1].metadata['experiment']:
|
|
985
|
+
used_detector = dset[key1].metadata['experiment']['detector']
|
|
986
|
+
for detector in dset[key1].original_metadata[key].values():
|
|
987
|
+
if 'DetectorName' in detector:
|
|
988
|
+
if used_detector in detector['DetectorName']:
|
|
989
|
+
if 'CollectionAngleRange' in detector:
|
|
990
|
+
begin = detector['CollectionAngleRange']['begin']
|
|
991
|
+
end = detector['CollectionAngleRange']['end']
|
|
992
|
+
dset[key1].metadata['experiment']['collection_angle'] = float(begin)
|
|
993
|
+
dset[key1].metadata['experiment']['collection_angle_end'] = float(end)
|
|
708
994
|
if isinstance(dset, dict):
|
|
709
995
|
dataset_dict = dset
|
|
996
|
+
for dataset in dataset_dict.values():
|
|
997
|
+
dataset.metadata['filename'] = filename
|
|
710
998
|
|
|
711
999
|
elif isinstance(dset, list):
|
|
712
1000
|
if len(dset) < 1:
|
|
@@ -734,10 +1022,11 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
|
|
|
734
1022
|
|
|
735
1023
|
# Temporary Fix for dual eels spectra in dm files
|
|
736
1024
|
# Todo: Fic in ScifyReaders
|
|
737
|
-
for dset in dataset_dict.values():
|
|
738
|
-
if '
|
|
739
|
-
|
|
740
|
-
|
|
1025
|
+
for dset in dataset_dict.values():
|
|
1026
|
+
if 'experiment' in dset.metadata:
|
|
1027
|
+
if 'single_exposure_time' in dset.metadata['experiment']:
|
|
1028
|
+
dset.metadata['experiment']['exposure_time'] = dset.metadata['experiment']['number_of_frames'] * \
|
|
1029
|
+
dset.metadata['experiment']['single_exposure_time']
|
|
741
1030
|
if write_hdf_file:
|
|
742
1031
|
h5_master_group = save_dataset(dataset_dict, filename=filename)
|
|
743
1032
|
|
pyTEMlib/image_tools.py
CHANGED
|
@@ -15,7 +15,6 @@ import matplotlib.widgets as mwidgets
|
|
|
15
15
|
import sidpy
|
|
16
16
|
import pyTEMlib.file_tools as ft
|
|
17
17
|
import pyTEMlib.sidpy_tools
|
|
18
|
-
# import pyTEMlib.probe_tools
|
|
19
18
|
|
|
20
19
|
from tqdm.auto import trange, tqdm
|
|
21
20
|
|
|
@@ -55,6 +54,12 @@ from skimage.filters import threshold_otsu, sobel
|
|
|
55
54
|
from scipy.optimize import leastsq
|
|
56
55
|
from sklearn.cluster import DBSCAN
|
|
57
56
|
|
|
57
|
+
from ase.build import fcc110
|
|
58
|
+
|
|
59
|
+
from scipy.ndimage import rotate
|
|
60
|
+
from scipy.interpolate import RegularGridInterpolator
|
|
61
|
+
from scipy.signal import fftconvolve
|
|
62
|
+
|
|
58
63
|
|
|
59
64
|
_SimpleITK_present = True
|
|
60
65
|
try:
|
|
@@ -68,6 +73,72 @@ if not _SimpleITK_present:
|
|
|
68
73
|
'install with: conda install -c simpleitk simpleitk ')
|
|
69
74
|
|
|
70
75
|
|
|
76
|
+
def get_atomic_pseudo_potential(fov, atoms, size=512, rotation=0):
|
|
77
|
+
# Big assumption: the atoms are not near the edge of the unit cell
|
|
78
|
+
# If any atoms are close to the edge (ex. [0,0]) then the potential will be clipped
|
|
79
|
+
# before calling the function, shift the atoms to the center of the unit cell
|
|
80
|
+
|
|
81
|
+
pixel_size = fov / size
|
|
82
|
+
max_size = int(size * np.sqrt(2) + 1) # Maximum size to accommodate rotation
|
|
83
|
+
|
|
84
|
+
# Create unit cell potential
|
|
85
|
+
positions = atoms.get_positions()[:, :2]
|
|
86
|
+
atomic_numbers = atoms.get_atomic_numbers()
|
|
87
|
+
unit_cell_size = atoms.cell.cellpar()[:2]
|
|
88
|
+
|
|
89
|
+
unit_cell_potential = np.zeros((max_size, max_size))
|
|
90
|
+
for pos, atomic_number in zip(positions, atomic_numbers):
|
|
91
|
+
x = pos[0] / pixel_size
|
|
92
|
+
y = pos[1] / pixel_size
|
|
93
|
+
atom_width = 0.5 # Angstrom
|
|
94
|
+
gauss_width = atom_width/pixel_size # important for images at various fov. Room for improvement with theory
|
|
95
|
+
gauss = pyTEMlib.probe_tools.make_gauss(max_size, max_size, width = gauss_width, x0=x, y0=y)
|
|
96
|
+
unit_cell_potential += gauss * atomic_number # gauss is already normalized to 1
|
|
97
|
+
|
|
98
|
+
# Create interpolation function for unit cell potential
|
|
99
|
+
x_grid = np.linspace(0, fov * max_size / size, max_size)
|
|
100
|
+
y_grid = np.linspace(0, fov * max_size / size, max_size)
|
|
101
|
+
interpolator = RegularGridInterpolator((x_grid, y_grid), unit_cell_potential, bounds_error=False, fill_value=0)
|
|
102
|
+
|
|
103
|
+
# Vectorized computation of the full potential map with max_size
|
|
104
|
+
x_coords, y_coords = np.meshgrid(np.linspace(0, fov, max_size), np.linspace(0, fov, max_size), indexing="ij")
|
|
105
|
+
xtal_x = x_coords % unit_cell_size[0]
|
|
106
|
+
xtal_y = y_coords % unit_cell_size[1]
|
|
107
|
+
potential_map = interpolator((xtal_x.ravel(), xtal_y.ravel())).reshape(max_size, max_size)
|
|
108
|
+
|
|
109
|
+
# Rotate and crop the potential map
|
|
110
|
+
potential_map = rotate(potential_map, rotation, reshape=False)
|
|
111
|
+
center = potential_map.shape[0] // 2
|
|
112
|
+
potential_map = potential_map[center - size // 2:center + size // 2, center - size // 2:center + size // 2]
|
|
113
|
+
|
|
114
|
+
potential_map = scipy.ndimage.gaussian_filter(potential_map,3)
|
|
115
|
+
|
|
116
|
+
return potential_map
|
|
117
|
+
|
|
118
|
+
def convolve_probe(ab, potential):
|
|
119
|
+
# the pixel sizes should be the exact same as the potential
|
|
120
|
+
final_sizes = potential.shape
|
|
121
|
+
|
|
122
|
+
# Perform FFT-based convolution
|
|
123
|
+
pad_height = pad_width = potential.shape[0] // 2
|
|
124
|
+
potential = np.pad(potential, ((pad_height, pad_height), (pad_width, pad_width)), mode='constant')
|
|
125
|
+
|
|
126
|
+
probe, A_k, chi = pyTEMlib.probe_tools.get_probe(ab, potential.shape[0], potential.shape[1], scale = 'mrad', verbose= False)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
convolved = fftconvolve(potential, probe, mode='same')
|
|
130
|
+
|
|
131
|
+
# Crop to original potential size
|
|
132
|
+
start_row = pad_height
|
|
133
|
+
start_col = pad_width
|
|
134
|
+
end_row = start_row + final_sizes[0]
|
|
135
|
+
end_col = start_col + final_sizes[1]
|
|
136
|
+
|
|
137
|
+
image = convolved[start_row:end_row, start_col:end_col]
|
|
138
|
+
|
|
139
|
+
return probe, image
|
|
140
|
+
|
|
141
|
+
|
|
71
142
|
# Wavelength in 1/nm
|
|
72
143
|
def get_wavelength(e0):
|
|
73
144
|
"""
|
|
@@ -280,20 +351,21 @@ def diffractogram_spots(dset, spot_threshold, return_center=True, eps=0.1):
|
|
|
280
351
|
return spots, center
|
|
281
352
|
|
|
282
353
|
|
|
283
|
-
def center_diffractogram(dset, return_plot = True,
|
|
354
|
+
def center_diffractogram(dset, return_plot = True, smoothing = 1, min_samples = 10, beamstop_size = 0.1):
|
|
284
355
|
try:
|
|
285
356
|
diff = np.array(dset).T.astype(np.float16)
|
|
286
357
|
diff[diff < 0] = 0
|
|
287
|
-
|
|
288
|
-
if histogram_factor is not None:
|
|
289
|
-
hist, bins = np.histogram(np.ravel(diff), bins=256, range=(0, 1), density=True)
|
|
290
|
-
threshold = threshold_otsu(diff, hist = hist * histogram_factor)
|
|
291
|
-
else:
|
|
292
|
-
threshold = threshold_otsu(diff)
|
|
358
|
+
threshold = threshold_otsu(diff)
|
|
293
359
|
binary = (diff > threshold).astype(float)
|
|
294
360
|
smoothed_image = ndimage.gaussian_filter(binary, sigma=smoothing) # Smooth before edge detection
|
|
295
361
|
smooth_threshold = threshold_otsu(smoothed_image)
|
|
296
362
|
smooth_binary = (smoothed_image > smooth_threshold).astype(float)
|
|
363
|
+
|
|
364
|
+
# add a circle to mask the beamstop
|
|
365
|
+
x, y = np.meshgrid(np.arange(dset.shape[0]), np.arange(dset.shape[1]))
|
|
366
|
+
circle = (x - dset.shape[0] / 2) ** 2 + (y - dset.shape[1] / 2) ** 2 < (beamstop_size * dset.shape[0] / 2) ** 2
|
|
367
|
+
smooth_binary[circle] = 1
|
|
368
|
+
|
|
297
369
|
# Find the edges using the Sobel operator
|
|
298
370
|
edges = sobel(smooth_binary)
|
|
299
371
|
edge_points = np.argwhere(edges)
|
|
@@ -322,18 +394,21 @@ def center_diffractogram(dset, return_plot = True, histogram_factor = None, smoo
|
|
|
322
394
|
|
|
323
395
|
finally:
|
|
324
396
|
if return_plot:
|
|
325
|
-
fig, ax = plt.subplots(1,
|
|
397
|
+
fig, ax = plt.subplots(1, 5, figsize=(14, 4), sharex=True, sharey=True)
|
|
326
398
|
ax[0].set_title('Diffractogram')
|
|
327
399
|
ax[0].imshow(dset.T, cmap='viridis')
|
|
328
400
|
ax[1].set_title('Otsu Binary Image')
|
|
329
401
|
ax[1].imshow(binary, cmap='gray')
|
|
330
402
|
ax[2].set_title('Smoothed Binary Image')
|
|
331
|
-
ax[2].imshow(
|
|
332
|
-
|
|
333
|
-
ax[3].
|
|
334
|
-
ax[3].
|
|
403
|
+
ax[2].imshow(smoothed_image, cmap='gray')
|
|
404
|
+
|
|
405
|
+
ax[3].set_title('Smoothed Binary Image')
|
|
406
|
+
ax[3].imshow(smooth_binary, cmap='gray')
|
|
407
|
+
ax[4].set_title('Edge Detection and Fitting')
|
|
408
|
+
ax[4].imshow(edges, cmap='gray')
|
|
409
|
+
ax[4].scatter(center[0], center[1], c='r', s=10)
|
|
335
410
|
circle = plt.Circle(center, mean_radius, color='red', fill=False)
|
|
336
|
-
ax[
|
|
411
|
+
ax[4].add_artist(circle)
|
|
337
412
|
for axis in ax:
|
|
338
413
|
axis.axis('off')
|
|
339
414
|
fig.tight_layout()
|
|
@@ -522,6 +597,7 @@ def demon_registration(dataset, verbose=False):
|
|
|
522
597
|
demon_registered.source = dataset.title
|
|
523
598
|
|
|
524
599
|
demon_registered.metadata = {'analysis': 'non-rigid demon registration'}
|
|
600
|
+
demon_registered.metadata['experiment'] = dataset.metadata['experiment'].copy()
|
|
525
601
|
if 'input_crop' in dataset.metadata:
|
|
526
602
|
demon_registered.metadata['input_crop'] = dataset.metadata['input_crop']
|
|
527
603
|
if 'input_shape' in dataset.metadata:
|
|
@@ -610,6 +686,7 @@ def rigid_registration(dataset, sub_pixel=True):
|
|
|
610
686
|
rigid_registered.source = dataset.title
|
|
611
687
|
rigid_registered.metadata = {'analysis': 'rigid sub-pixel registration', 'drift': drift,
|
|
612
688
|
'input_crop': input_crop, 'input_shape': dataset.shape[1:]}
|
|
689
|
+
rigid_registered.metadata['experiment'] = dataset.metadata['experiment'].copy()
|
|
613
690
|
rigid_registered.set_dimension(0, sidpy.Dimension(np.arange(rigid_registered.shape[0]),
|
|
614
691
|
name='frame', units='frame', quantity='time',
|
|
615
692
|
dimension_type='temporal'))
|
|
@@ -666,7 +743,6 @@ def rig_reg_drift(dset, rel_drift):
|
|
|
666
743
|
rig_reg = np.zeros([dset.shape[frame_dim[0]], dset.shape[spatial_dim[0]], dset.shape[spatial_dim[1]]])
|
|
667
744
|
|
|
668
745
|
# absolute drift
|
|
669
|
-
print(rel_drift)
|
|
670
746
|
drift = np.array(rel_drift).copy()
|
|
671
747
|
|
|
672
748
|
drift[0] = [0, 0]
|