pyTEMlib 0.2025.4.2__py3-none-any.whl → 0.2025.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyTEMlib might be problematic. Click here for more details.
- build/lib/pyTEMlib/__init__.py +33 -0
- build/lib/pyTEMlib/animation.py +640 -0
- build/lib/pyTEMlib/atom_tools.py +238 -0
- build/lib/pyTEMlib/config_dir.py +31 -0
- build/lib/pyTEMlib/crystal_tools.py +1219 -0
- build/lib/pyTEMlib/diffraction_plot.py +756 -0
- build/lib/pyTEMlib/dynamic_scattering.py +293 -0
- build/lib/pyTEMlib/eds_tools.py +826 -0
- build/lib/pyTEMlib/eds_xsections.py +432 -0
- build/lib/pyTEMlib/eels_tools/__init__.py +44 -0
- build/lib/pyTEMlib/eels_tools/core_loss_tools.py +751 -0
- build/lib/pyTEMlib/eels_tools/eels_database.py +134 -0
- build/lib/pyTEMlib/eels_tools/low_loss_tools.py +655 -0
- build/lib/pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
- build/lib/pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
- build/lib/pyTEMlib/file_reader.py +274 -0
- build/lib/pyTEMlib/file_tools.py +811 -0
- build/lib/pyTEMlib/get_bote_salvat.py +69 -0
- build/lib/pyTEMlib/graph_tools.py +1153 -0
- build/lib/pyTEMlib/graph_viz.py +599 -0
- build/lib/pyTEMlib/image/__init__.py +37 -0
- build/lib/pyTEMlib/image/image_atoms.py +270 -0
- build/lib/pyTEMlib/image/image_clean.py +197 -0
- build/lib/pyTEMlib/image/image_distortion.py +299 -0
- build/lib/pyTEMlib/image/image_fft.py +277 -0
- build/lib/pyTEMlib/image/image_graph.py +926 -0
- build/lib/pyTEMlib/image/image_registration.py +316 -0
- build/lib/pyTEMlib/image/image_utilities.py +309 -0
- build/lib/pyTEMlib/image/image_window.py +421 -0
- build/lib/pyTEMlib/image_tools.py +699 -0
- build/lib/pyTEMlib/interactive_image.py +1 -0
- build/lib/pyTEMlib/kinematic_scattering.py +1196 -0
- build/lib/pyTEMlib/microscope.py +61 -0
- build/lib/pyTEMlib/probe_tools.py +906 -0
- build/lib/pyTEMlib/sidpy_tools.py +153 -0
- build/lib/pyTEMlib/simulation_tools.py +104 -0
- build/lib/pyTEMlib/test.py +437 -0
- build/lib/pyTEMlib/utilities.py +314 -0
- build/lib/pyTEMlib/version.py +5 -0
- build/lib/pyTEMlib/xrpa_x_sections.py +20976 -0
- pyTEMlib/__init__.py +25 -3
- pyTEMlib/animation.py +31 -22
- pyTEMlib/atom_tools.py +29 -34
- pyTEMlib/config_dir.py +2 -28
- pyTEMlib/crystal_tools.py +129 -165
- pyTEMlib/eds_tools.py +559 -342
- pyTEMlib/eds_xsections.py +432 -0
- pyTEMlib/eels_tools/__init__.py +44 -0
- pyTEMlib/eels_tools/core_loss_tools.py +751 -0
- pyTEMlib/eels_tools/eels_database.py +134 -0
- pyTEMlib/eels_tools/low_loss_tools.py +655 -0
- pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
- pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
- pyTEMlib/file_reader.py +274 -0
- pyTEMlib/file_tools.py +260 -1130
- pyTEMlib/get_bote_salvat.py +69 -0
- pyTEMlib/graph_tools.py +101 -174
- pyTEMlib/graph_viz.py +150 -0
- pyTEMlib/image/__init__.py +37 -0
- pyTEMlib/image/image_atoms.py +270 -0
- pyTEMlib/image/image_clean.py +197 -0
- pyTEMlib/image/image_distortion.py +299 -0
- pyTEMlib/image/image_fft.py +277 -0
- pyTEMlib/image/image_graph.py +926 -0
- pyTEMlib/image/image_registration.py +316 -0
- pyTEMlib/image/image_utilities.py +309 -0
- pyTEMlib/image/image_window.py +421 -0
- pyTEMlib/image_tools.py +154 -928
- pyTEMlib/kinematic_scattering.py +1 -1
- pyTEMlib/probe_tools.py +1 -1
- pyTEMlib/test.py +437 -0
- pyTEMlib/utilities.py +314 -0
- pyTEMlib/version.py +2 -3
- pyTEMlib/xrpa_x_sections.py +14 -10
- {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/METADATA +13 -16
- pytemlib-0.2025.9.1.dist-info/RECORD +86 -0
- {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/WHEEL +1 -1
- pytemlib-0.2025.9.1.dist-info/top_level.txt +6 -0
- pyTEMlib/core_loss_widget.py +0 -721
- pyTEMlib/eels_dialog.py +0 -754
- pyTEMlib/eels_dialog_utilities.py +0 -1199
- pyTEMlib/eels_tools.py +0 -2359
- pyTEMlib/file_tools_qt.py +0 -193
- pyTEMlib/image_dialog.py +0 -158
- pyTEMlib/image_dlg.py +0 -146
- pyTEMlib/info_widget.py +0 -1086
- pyTEMlib/info_widget3.py +0 -1120
- pyTEMlib/low_loss_widget.py +0 -479
- pyTEMlib/peak_dialog.py +0 -1129
- pyTEMlib/peak_dlg.py +0 -286
- pytemlib-0.2025.4.2.dist-info/RECORD +0 -38
- pytemlib-0.2025.4.2.dist-info/top_level.txt +0 -1
- {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/entry_points.txt +0 -0
- {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,811 @@
|
|
|
1
|
+
"""file_tools: All tools to load and save data
|
|
2
|
+
|
|
3
|
+
##################################
|
|
4
|
+
|
|
5
|
+
2018 01 31 Included Nion Swift files to be opened
|
|
6
|
+
major revision 2020 09 to include sidpy and pyNSID data formats
|
|
7
|
+
2022 change to ase format for structures: this changed the default unit of length to Angstrom!!!
|
|
8
|
+
|
|
9
|
+
##################################
|
|
10
|
+
"""
|
|
11
|
+
import typing
|
|
12
|
+
|
|
13
|
+
import os
|
|
14
|
+
import pickle
|
|
15
|
+
import numpy as np
|
|
16
|
+
import h5py
|
|
17
|
+
|
|
18
|
+
# For structure files of various flavor for instance POSCAR and other theory packages
|
|
19
|
+
import ase.io
|
|
20
|
+
|
|
21
|
+
# =============================================
|
|
22
|
+
# Include pycroscopy libraries #
|
|
23
|
+
# =============================================
|
|
24
|
+
import SciFiReaders
|
|
25
|
+
import pyNSID
|
|
26
|
+
import sidpy
|
|
27
|
+
import ipywidgets
|
|
28
|
+
import IPython
|
|
29
|
+
|
|
30
|
+
# =============================================
|
|
31
|
+
# Include pyTEMlib libraries #
|
|
32
|
+
# =============================================
|
|
33
|
+
from . import crystal_tools
|
|
34
|
+
from .config_dir import config_path
|
|
35
|
+
from .file_reader import adorned_to_sidpy, read_old_h5group
|
|
36
|
+
from .version import __version__
|
|
37
|
+
Dimension = sidpy.Dimension
|
|
38
|
+
|
|
39
|
+
__version__ = '2025.8.07'
|
|
40
|
+
|
|
41
|
+
ChooseDataset = sidpy.ChooseDataset
|
|
42
|
+
|
|
43
|
+
class FileWidget(sidpy.FileWidget):
|
|
44
|
+
"""Widget to select directories or widgets from a list
|
|
45
|
+
|
|
46
|
+
Works in google colab.
|
|
47
|
+
The widget converts the name of the nion file to the one in Nion's swift software,
|
|
48
|
+
because it is otherwise incomprehensible
|
|
49
|
+
|
|
50
|
+
Attributes
|
|
51
|
+
----------
|
|
52
|
+
dir_name: str
|
|
53
|
+
name of starting directory
|
|
54
|
+
extension: list of str
|
|
55
|
+
extensions of files to be listed in widget
|
|
56
|
+
|
|
57
|
+
Methods
|
|
58
|
+
-------
|
|
59
|
+
get_directory
|
|
60
|
+
set_options
|
|
61
|
+
get_file_name
|
|
62
|
+
|
|
63
|
+
Example
|
|
64
|
+
-------
|
|
65
|
+
>>from google.colab import drive
|
|
66
|
+
>>drive.mount("/content/drive")
|
|
67
|
+
>>file_list = pyTEMlib.file_tools.FileWidget()
|
|
68
|
+
next code cell:
|
|
69
|
+
>>datasets = file_list.datasets
|
|
70
|
+
>>dataset = file_list.selected_dataset
|
|
71
|
+
|
|
72
|
+
"""
|
|
73
|
+
def __init__(self, dir_name=None, extension=['*'], sum_frames=False):
|
|
74
|
+
if dir_name is None:
|
|
75
|
+
dir_name = get_last_path()
|
|
76
|
+
self.save_path = True
|
|
77
|
+
super().__init__(dir_name=dir_name, extension=extension, sum_frames=sum_frames)
|
|
78
|
+
select_button = ipywidgets.Button(description='Select Main',
|
|
79
|
+
layout=ipywidgets.Layout(width='auto', grid_area='header'),
|
|
80
|
+
style=ipywidgets.ButtonStyle(button_color='lightblue'))
|
|
81
|
+
|
|
82
|
+
add_button = ipywidgets.Button(description='Add',
|
|
83
|
+
layout=ipywidgets.Layout(width='auto', grid_area='header'),
|
|
84
|
+
style=ipywidgets.ButtonStyle(button_color='lightblue'))
|
|
85
|
+
self.dataset_list = ['None']
|
|
86
|
+
self.selected_dataset = None
|
|
87
|
+
self.datasets = {}
|
|
88
|
+
self.selected_key = ''
|
|
89
|
+
self.loaded_datasets = ipywidgets.Dropdown(options=self.dataset_list,
|
|
90
|
+
value=self.dataset_list[0],
|
|
91
|
+
description='loaded datasets:',
|
|
92
|
+
disabled=False)
|
|
93
|
+
|
|
94
|
+
ui = ipywidgets.HBox([select_button, add_button, self.loaded_datasets])
|
|
95
|
+
IPython.display.display(ui)
|
|
96
|
+
select_button.on_click(self.select_main)
|
|
97
|
+
add_button.on_click(self.add_dataset)
|
|
98
|
+
self.loaded_datasets.observe(self.select_dataset)
|
|
99
|
+
|
|
100
|
+
def select_dataset(self, value: int = 0):
|
|
101
|
+
"""Select a dataset from the dropdown."""
|
|
102
|
+
key = self.loaded_datasets.value.split(':')[0]
|
|
103
|
+
if key != 'None':
|
|
104
|
+
self.selected_dataset = self.datasets[key]
|
|
105
|
+
self.selected_key = key
|
|
106
|
+
|
|
107
|
+
def select_main(self, value: int = 0):
|
|
108
|
+
"""Select the main dataset."""
|
|
109
|
+
self.datasets = {}
|
|
110
|
+
self.dataset_list = []
|
|
111
|
+
self.datasets = open_file(self.file_name, sum_frames=self.sum_frames)
|
|
112
|
+
self.dataset_list = []
|
|
113
|
+
for key in self.datasets.keys():
|
|
114
|
+
self.dataset_list.append(f'{key}: {self.datasets[key].title}')
|
|
115
|
+
self.loaded_datasets.options = self.dataset_list
|
|
116
|
+
self.loaded_datasets.value = self.dataset_list[0]
|
|
117
|
+
self.dataset = self.datasets[list(self.datasets.keys())[0]]
|
|
118
|
+
self.selected_dataset = self.dataset
|
|
119
|
+
|
|
120
|
+
def add_dataset(self, value: int = 0):
|
|
121
|
+
"""Add another dataset to the list of loaded datasets."""
|
|
122
|
+
key = add_dataset_from_file(self.datasets, self.file_name, 'Channel')
|
|
123
|
+
self.dataset_list.append(f'{key}: {self.datasets[key].title}')
|
|
124
|
+
self.loaded_datasets.options = self.dataset_list
|
|
125
|
+
self.loaded_datasets.value = self.dataset_list[-1]
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def add_to_dict(file_dict: dict, name: str):
|
|
129
|
+
"""Add a file to the dictionary with its metadata."""
|
|
130
|
+
full_name = os.path.join(file_dict['directory'], name)
|
|
131
|
+
basename, extension = os.path.splitext(name)
|
|
132
|
+
size = os.path.getsize(full_name) * 2 ** -20
|
|
133
|
+
display_name = name
|
|
134
|
+
if len(extension) == 0:
|
|
135
|
+
display_file_list = f' {name} - {size:.1f} MB'
|
|
136
|
+
elif extension[0] == 'hf5':
|
|
137
|
+
if extension in ['.hf5']:
|
|
138
|
+
display_file_list = f" {name} - {size:.1f} MB"
|
|
139
|
+
elif extension in ['.h5', '.ndata']:
|
|
140
|
+
try:
|
|
141
|
+
reader = SciFiReaders.NionReader(full_name)
|
|
142
|
+
dataset_nion = reader.read()
|
|
143
|
+
key = list(dataset_nion.keys())[0]
|
|
144
|
+
display_name = dataset_nion[key].title
|
|
145
|
+
display_file_list = f" {display_name}{extension} - {size:.1f} MB"
|
|
146
|
+
except:
|
|
147
|
+
display_file_list = f" {name} - {size:.1f} MB"
|
|
148
|
+
else:
|
|
149
|
+
display_file_list = f' {name} - {size:.1f} MB'
|
|
150
|
+
file_dict[name] = {'display_string': display_file_list, 'basename': basename,
|
|
151
|
+
'extension': extension, 'size': size, 'display_name': display_name}
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def update_directory_list(directory_name: str) -> dict:
|
|
155
|
+
"""Update the directory list and return the file dictionary."""
|
|
156
|
+
dir_list = os.listdir(directory_name)
|
|
157
|
+
|
|
158
|
+
if '.pyTEMlib.files.pkl' in dir_list:
|
|
159
|
+
with open(os.path.join(directory_name, '.pyTEMlib.files.pkl'), 'rb') as f:
|
|
160
|
+
file_dict = pickle.load(f)
|
|
161
|
+
if directory_name != file_dict['directory']:
|
|
162
|
+
print('directory moved since last time read')
|
|
163
|
+
file_dict['directory'] = directory_name
|
|
164
|
+
dir_list.remove('.pyTEMlib.files.pkl')
|
|
165
|
+
else:
|
|
166
|
+
file_dict = {'directory': directory_name}
|
|
167
|
+
|
|
168
|
+
# add new files
|
|
169
|
+
file_dict['file_list'] = []
|
|
170
|
+
file_dict['display_file_list'] = []
|
|
171
|
+
file_dict['directory_list'] = []
|
|
172
|
+
|
|
173
|
+
for name in dir_list:
|
|
174
|
+
if os.path.isfile(os.path.join(file_dict['directory'], name)):
|
|
175
|
+
if name not in file_dict:
|
|
176
|
+
add_to_dict(file_dict, name)
|
|
177
|
+
file_dict['file_list'].append(name)
|
|
178
|
+
file_dict['display_file_list'].append(file_dict[name]['display_string'])
|
|
179
|
+
else:
|
|
180
|
+
file_dict['directory_list'].append(name)
|
|
181
|
+
remove_item = []
|
|
182
|
+
|
|
183
|
+
# delete items of deleted files
|
|
184
|
+
save_pickle = False
|
|
185
|
+
|
|
186
|
+
for name in file_dict.keys():
|
|
187
|
+
if name not in dir_list and name not in ['directory', 'file_list',
|
|
188
|
+
'directory_list', 'display_file_list']:
|
|
189
|
+
remove_item.append(name)
|
|
190
|
+
else:
|
|
191
|
+
if 'extension' in file_dict[name]:
|
|
192
|
+
save_pickle = True
|
|
193
|
+
for item in remove_item:
|
|
194
|
+
file_dict.pop(item)
|
|
195
|
+
|
|
196
|
+
if save_pickle:
|
|
197
|
+
with open(os.path.join(file_dict['directory'], '.pyTEMlib.files.pkl'), 'wb') as f:
|
|
198
|
+
pickle.dump(file_dict, f)
|
|
199
|
+
return file_dict
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
####
|
|
203
|
+
# General Open and Save Methods
|
|
204
|
+
####
|
|
205
|
+
|
|
206
|
+
def get_last_path() -> str:
|
|
207
|
+
"""Returns the path of the file last opened"""
|
|
208
|
+
try:
|
|
209
|
+
with open(config_path + '\\path.txt', 'r', encoding='utf-8') as file:
|
|
210
|
+
path = file.read()
|
|
211
|
+
except IOError:
|
|
212
|
+
path = ''
|
|
213
|
+
|
|
214
|
+
if len(path) < 2:
|
|
215
|
+
path = '.'
|
|
216
|
+
else:
|
|
217
|
+
if not os.path.exists(path):
|
|
218
|
+
path = '.'
|
|
219
|
+
return path
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def save_path(filename: str) -> str:
|
|
223
|
+
"""Save path of last opened file"""
|
|
224
|
+
|
|
225
|
+
if len(filename) > 1:
|
|
226
|
+
with open(config_path + '\\path.txt', 'w', encoding='utf-8') as file:
|
|
227
|
+
path, _ = os.path.split(filename)
|
|
228
|
+
file.write(path)
|
|
229
|
+
else:
|
|
230
|
+
path = '.'
|
|
231
|
+
return path
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
def save_dataset(dataset, filename, h5_group=None):
|
|
235
|
+
""" Saves a dataset to a file in pyNSID format
|
|
236
|
+
Parameters
|
|
237
|
+
----------
|
|
238
|
+
dataset: sidpy.Dataset
|
|
239
|
+
the data
|
|
240
|
+
filename: str
|
|
241
|
+
name of file to be opened
|
|
242
|
+
h5_group: hd5py.Group
|
|
243
|
+
not used yet
|
|
244
|
+
"""
|
|
245
|
+
h5_filename = get_h5_filename(filename)
|
|
246
|
+
h5_file = h5py.File(h5_filename, mode='a')
|
|
247
|
+
if isinstance(dataset, dict):
|
|
248
|
+
h5_group = save_dataset_dictionary(h5_file, dataset)
|
|
249
|
+
return h5_group
|
|
250
|
+
if isinstance(dataset, sidpy.Dataset):
|
|
251
|
+
h5_dataset = save_single_dataset(h5_file, dataset, h5_group=h5_group)
|
|
252
|
+
return h5_dataset.parent
|
|
253
|
+
|
|
254
|
+
raise TypeError('Only sidpy.datasets or dictionaries can be saved with pyTEMlib')
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def save_single_dataset(h5_file, dataset, h5_group=None):
|
|
258
|
+
"""
|
|
259
|
+
Saves a single sidpy.Dataset to an HDF5 file.
|
|
260
|
+
"""
|
|
261
|
+
if h5_group is None:
|
|
262
|
+
h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')
|
|
263
|
+
h5_group = sidpy.hdf.prov_utils.create_indexed_group(h5_measurement_group, 'Channel_')
|
|
264
|
+
|
|
265
|
+
elif isinstance(h5_group, str):
|
|
266
|
+
if h5_group not in h5_file:
|
|
267
|
+
h5_group = h5_file.create_group(h5_group)
|
|
268
|
+
else:
|
|
269
|
+
if h5_group[-1] == '/':
|
|
270
|
+
h5_group = h5_group[:-1]
|
|
271
|
+
|
|
272
|
+
channel = h5_group.split('/')[-1]
|
|
273
|
+
h5_measurement_group = h5_group[:-len(channel)]
|
|
274
|
+
h5_group = sidpy.hdf.prov_utils.create_indexed_group(h5_group, 'Channel_')
|
|
275
|
+
else:
|
|
276
|
+
raise ValueError('h5_group needs to be string or None')
|
|
277
|
+
|
|
278
|
+
h5_dataset = pyNSID.hdf_io.write_nsid_dataset(dataset, h5_group)
|
|
279
|
+
dataset.h5_dataset = h5_dataset
|
|
280
|
+
h5_dataset.file.flush()
|
|
281
|
+
return h5_dataset
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def save_dataset_dictionary(h5_file: h5py.File, datasets: dict) -> h5py.Group:
|
|
285
|
+
"""Saves a dictionary of datasets to an HDF5 file."""
|
|
286
|
+
h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')
|
|
287
|
+
for key, dataset in datasets.items():
|
|
288
|
+
if key[-1] == '/':
|
|
289
|
+
key = key[:-1]
|
|
290
|
+
if isinstance(dataset, sidpy.Dataset):
|
|
291
|
+
h5_group = h5_measurement_group.create_group(key)
|
|
292
|
+
h5_dataset = pyNSID.hdf_io.write_nsid_dataset(dataset, h5_group)
|
|
293
|
+
dataset.h5_dataset = h5_dataset
|
|
294
|
+
h5_dataset.file.flush()
|
|
295
|
+
elif isinstance(dataset, dict):
|
|
296
|
+
sidpy.hdf.hdf_utils.write_dict_to_h5_group(h5_measurement_group, dataset, key)
|
|
297
|
+
else:
|
|
298
|
+
print('could not save item ', key, 'of dataset dictionary')
|
|
299
|
+
return h5_measurement_group
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def h5_group_to_dict(group, group_dict={}):
|
|
303
|
+
"""
|
|
304
|
+
Converts an h5py group to a python dictionary.
|
|
305
|
+
"""
|
|
306
|
+
if not isinstance(group, h5py.Group):
|
|
307
|
+
raise TypeError('we need a h5py group to read from')
|
|
308
|
+
if not isinstance(group_dict, dict):
|
|
309
|
+
raise TypeError('group_dict needs to be a python dictionary')
|
|
310
|
+
|
|
311
|
+
group_dict[group.name.split('/')[-1]] = dict(group.attrs)
|
|
312
|
+
for key in group.keys():
|
|
313
|
+
h5_group_to_dict(group[key], group_dict[group.name.split('/')[-1]])
|
|
314
|
+
return group_dict
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
def read_dm_annotation(image: sidpy.Dataset) -> typing.Dict[str, typing.Any]:
|
|
318
|
+
"""
|
|
319
|
+
Reads annotations from a sidpy.Dataset that originated from a dm3 file.
|
|
320
|
+
"""
|
|
321
|
+
if 'MAGE' not in image.data_type.name:
|
|
322
|
+
return {}
|
|
323
|
+
scale_x = np.abs(image.x[1]-image.x[0])
|
|
324
|
+
scale_y = np.abs(image.y[1]-image.y[0])
|
|
325
|
+
rec_scale = np.array([scale_x, scale_y,scale_x, scale_y])
|
|
326
|
+
annotations = {}
|
|
327
|
+
tags = image.original_metadata.get('DocumentObjectList', {}).get('0', {}).get('AnnotationGroupList', {})
|
|
328
|
+
|
|
329
|
+
if not tags:
|
|
330
|
+
return annotations
|
|
331
|
+
|
|
332
|
+
for key in tags:
|
|
333
|
+
if isinstance(tags[key], dict):
|
|
334
|
+
if tags[key]['AnnotationType'] == 13: #type 'text'
|
|
335
|
+
annotations[key] = {'type': 'text'}
|
|
336
|
+
annotations[key]['label'] = tags[key].get('Label', '')
|
|
337
|
+
rect = np.array(tags[key]['Rectangle']) * rec_scale
|
|
338
|
+
annotations[key]['position'] = [rect[1], rect[0]]
|
|
339
|
+
annotations[key]['text'] = tags[key].get('Text', key)
|
|
340
|
+
elif tags[key]['AnnotationType']==6:
|
|
341
|
+
annotations[key] = {'type': 'circle'}
|
|
342
|
+
annotations[key]['label'] = tags[key].get('Label', '')
|
|
343
|
+
rect = np.array(tags[key]['Rectangle']) * rec_scale
|
|
344
|
+
annotations[key]['radius'] = rect[3]-rect[1]
|
|
345
|
+
annotations[key]['position'] = [rect[1],rect[0]]
|
|
346
|
+
elif tags[key]['AnnotationType'] == 23:
|
|
347
|
+
annotations[key] = {'type': 'spectral_image'}
|
|
348
|
+
annotations[key]['label'] = tags[key].get('Label', '')
|
|
349
|
+
rect = np.array(tags[key].get('Rectangle', [0 ,0, 0, 0])) * rec_scale
|
|
350
|
+
annotations[key]['width'] = rect[3]-rect[1]
|
|
351
|
+
annotations[key]['height'] = rect[2]-rect[0]
|
|
352
|
+
annotations[key]['position'] = [rect[1],rect[0]]
|
|
353
|
+
annotations[key]['Rectangle'] = np.array(tags[key].get('Rectangle', [0 ,0, 0, 0]))
|
|
354
|
+
if annotations:
|
|
355
|
+
image.metadata['annotations'] = annotations
|
|
356
|
+
return annotations
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
def open_file(filename, write_hdf_file=False, sum_frames=False, sum_eds=True):
|
|
360
|
+
"""Opens a file if the extension is .emd, .mrc, .hf5, .ndata, .dm3 or .dm4
|
|
361
|
+
|
|
362
|
+
Everything will be stored in a NSID style hf5 file.
|
|
363
|
+
Subroutines used:
|
|
364
|
+
- NSIDReader
|
|
365
|
+
- nsid.write_
|
|
366
|
+
- get_main_tags
|
|
367
|
+
- get_additional tags
|
|
368
|
+
|
|
369
|
+
Parameters
|
|
370
|
+
----------
|
|
371
|
+
filename: str
|
|
372
|
+
name of file to be opened
|
|
373
|
+
h5_group: hd5py.Group
|
|
374
|
+
not used yet #TODO: provide hook for usage of external chosen group
|
|
375
|
+
write_hdf_file: bool
|
|
376
|
+
set to false so that sidpy dataset will not be written to hf5-file automatically
|
|
377
|
+
|
|
378
|
+
Returns
|
|
379
|
+
-------
|
|
380
|
+
sidpy.Dataset
|
|
381
|
+
sidpy dataset with location of hdf5 dataset as attribute
|
|
382
|
+
|
|
383
|
+
"""
|
|
384
|
+
if not isinstance(filename, str):
|
|
385
|
+
raise TypeError('filename must be a non-empty string')
|
|
386
|
+
if filename == '':
|
|
387
|
+
raise TypeError('filename must be a non-empty string')
|
|
388
|
+
|
|
389
|
+
_, file_name = os.path.split(filename)
|
|
390
|
+
basename, extension = os.path.splitext(file_name)
|
|
391
|
+
provenance = ''
|
|
392
|
+
if extension == '.hf5':
|
|
393
|
+
reader = SciFiReaders.NSIDReader(filename)
|
|
394
|
+
datasets = reader.read()
|
|
395
|
+
if len(datasets) < 1:
|
|
396
|
+
print('no hdf5 dataset found in file')
|
|
397
|
+
return {}
|
|
398
|
+
if isinstance(datasets, dict):
|
|
399
|
+
dataset_dict = datasets
|
|
400
|
+
else:
|
|
401
|
+
dataset_dict = {}
|
|
402
|
+
for index, dataset in enumerate(datasets):
|
|
403
|
+
title = str(dataset.title).rsplit('/', maxsplit=1)[-1]
|
|
404
|
+
# dataset.title = str(dataset.title).split('/')[-1]
|
|
405
|
+
dataset_dict[title] = dataset
|
|
406
|
+
if index == 0:
|
|
407
|
+
file = datasets[0].h5_dataset.file
|
|
408
|
+
master_group = datasets[0].h5_dataset.parent.parent.parent
|
|
409
|
+
for key in master_group.keys():
|
|
410
|
+
if key not in dataset_dict:
|
|
411
|
+
dataset_dict[key] = h5_group_to_dict(master_group[key])
|
|
412
|
+
if not write_hdf_file:
|
|
413
|
+
file.close()
|
|
414
|
+
for dset in dataset_dict.values():
|
|
415
|
+
if isinstance(dset, sidpy.Dataset):
|
|
416
|
+
if 'Measurement' in dset.title:
|
|
417
|
+
dset.title = dset.title.split('/')[-1]
|
|
418
|
+
return dataset_dict
|
|
419
|
+
|
|
420
|
+
if extension in ['.dm3', '.dm4']:
|
|
421
|
+
reader = SciFiReaders.DMReader(filename)
|
|
422
|
+
elif extension in ['.emi']:
|
|
423
|
+
try:
|
|
424
|
+
import hyperspy.api as hs
|
|
425
|
+
s = hs.load(filename)
|
|
426
|
+
dataset_dict = {}
|
|
427
|
+
spectrum_number = 0
|
|
428
|
+
if not isinstance(s, list):
|
|
429
|
+
s = [s]
|
|
430
|
+
for index, datum in enumerate(s):
|
|
431
|
+
dset = SciFiReaders.convert_hyperspy(datum)
|
|
432
|
+
if datum.data.ndim == 1:
|
|
433
|
+
dset.title = dset.title + f'_{spectrum_number}_Spectrum'
|
|
434
|
+
spectrum_number += 1
|
|
435
|
+
elif datum.data.ndim == 3:
|
|
436
|
+
dset.title = dset.title + '_SI'
|
|
437
|
+
dset = dset.T
|
|
438
|
+
dset.title = dset.title[11:]
|
|
439
|
+
dset.add_provenance('pyTEMlib', 'open_file', version=__version__,
|
|
440
|
+
linked_data='emi_converted_by_hyperspy')
|
|
441
|
+
dataset_dict[f'Channel_{index:03d}'] = dset
|
|
442
|
+
return dataset_dict
|
|
443
|
+
except ImportError:
|
|
444
|
+
print('This file type needs hyperspy to be installed to be able to be read')
|
|
445
|
+
return
|
|
446
|
+
elif extension == '.emd':
|
|
447
|
+
reader = SciFiReaders.EMDReader(filename, sum_frames=sum_frames)
|
|
448
|
+
provenance = 'SciFiReader.EMDReader'
|
|
449
|
+
elif 'edax' in extension.lower():
|
|
450
|
+
if 'h5' in extension:
|
|
451
|
+
reader = SciFiReaders.EDAXReader(filename)
|
|
452
|
+
provenance = 'SciFiReader.EDAXReader'
|
|
453
|
+
|
|
454
|
+
elif extension in ['.ndata', '.h5']:
|
|
455
|
+
reader = SciFiReaders.NionReader(filename)
|
|
456
|
+
provenance = 'SciFiReader.NionReader'
|
|
457
|
+
|
|
458
|
+
elif extension in ['.rto']:
|
|
459
|
+
reader = SciFiReaders.BrukerReader(filename)
|
|
460
|
+
provenance = 'SciFiReader.BrukerReader'
|
|
461
|
+
|
|
462
|
+
elif extension in ['.mrc']:
|
|
463
|
+
reader = SciFiReaders.MRCReader(filename)
|
|
464
|
+
provenance = 'SciFiReader.MRCReader'
|
|
465
|
+
|
|
466
|
+
else:
|
|
467
|
+
raise NotImplementedError('extension not supported')
|
|
468
|
+
|
|
469
|
+
_, file_name = os.path.split(filename)
|
|
470
|
+
basename, _ = os.path.splitext(file_name)
|
|
471
|
+
|
|
472
|
+
# ### Here we read the data into sidpy datasets
|
|
473
|
+
if extension != '.emi':
|
|
474
|
+
dset = reader.read()
|
|
475
|
+
|
|
476
|
+
if extension in ['.dm3', '.dm4']:
|
|
477
|
+
title = (basename.strip().replace('-', '_')).split('/')[-1]
|
|
478
|
+
if not isinstance(dset, dict):
|
|
479
|
+
print('Please use new SciFiReaders Package for full functionality')
|
|
480
|
+
if isinstance(dset, sidpy.Dataset):
|
|
481
|
+
dset = {'Channel_000': dset}
|
|
482
|
+
|
|
483
|
+
for key in dset:
|
|
484
|
+
read_dm_annotation(dset[key])
|
|
485
|
+
|
|
486
|
+
elif extension == '.emd':
|
|
487
|
+
if not sum_eds:
|
|
488
|
+
return
|
|
489
|
+
eds_keys = []
|
|
490
|
+
for key, item in dset.items():
|
|
491
|
+
if 'SuperX' in item.title or 'UltraX' in item.title:
|
|
492
|
+
if len(eds_keys) == 0:
|
|
493
|
+
spectrum = item.copy()
|
|
494
|
+
else:
|
|
495
|
+
spectrum += item
|
|
496
|
+
eds_keys.append(key)
|
|
497
|
+
spectrum.compute()
|
|
498
|
+
|
|
499
|
+
spectrum.data_type = dset[eds_keys[0]].data_type
|
|
500
|
+
if 'SuperX' in dset[eds_keys[0]].title:
|
|
501
|
+
spectrum.title = 'EDS_SuperX'
|
|
502
|
+
if 'UltraX' in dset[eds_keys[0]].title:
|
|
503
|
+
spectrum.title = 'EDS_UltraX'
|
|
504
|
+
spectrum.original_metadata = dset[eds_keys[0]].original_metadata.copy()
|
|
505
|
+
spectrum.metadata = dset[eds_keys[0]].metadata.copy()
|
|
506
|
+
|
|
507
|
+
for key in eds_keys:
|
|
508
|
+
del dset[key]
|
|
509
|
+
dset['SuperX'] = spectrum
|
|
510
|
+
|
|
511
|
+
if isinstance(dset, dict):
|
|
512
|
+
dataset_dict = dset
|
|
513
|
+
for dataset in dataset_dict.values():
|
|
514
|
+
dataset.add_provenance('pyTEMlib', 'open_file',
|
|
515
|
+
version=__version__,
|
|
516
|
+
linked_data=provenance)
|
|
517
|
+
dataset.metadata['filename'] = filename
|
|
518
|
+
|
|
519
|
+
elif isinstance(dset, list):
|
|
520
|
+
DeprecationWarning('Update SciFiReaders, we do not support list of datasets anymore')
|
|
521
|
+
else:
|
|
522
|
+
dset.filename = basename.strip().replace('-', '_')
|
|
523
|
+
read_essential_metadata(dset)
|
|
524
|
+
dset.metadata['filename'] = filename
|
|
525
|
+
dataset_dict = {'Channel_000': dset}
|
|
526
|
+
|
|
527
|
+
# Temporary Fix for dual eels spectra in dm files
|
|
528
|
+
# Todo: Fix in SciFiReaders
|
|
529
|
+
for dset in dataset_dict.values():
|
|
530
|
+
if 'experiment' in dset.metadata:
|
|
531
|
+
exp_meta = dset.metadata['experiment']
|
|
532
|
+
if 'single_exposure_time' in exp_meta:
|
|
533
|
+
exp_meta['exposure_time'] = exp_meta['number_of_frames'] * \
|
|
534
|
+
exp_meta['single_exposure_time']
|
|
535
|
+
if write_hdf_file:
|
|
536
|
+
save_dataset(dataset_dict, filename=filename)
|
|
537
|
+
|
|
538
|
+
save_path(filename)
|
|
539
|
+
return dataset_dict
|
|
540
|
+
|
|
541
|
+
|
|
542
|
+
################################################################
|
|
543
|
+
# Read Functions
|
|
544
|
+
#################################################################
|
|
545
|
+
|
|
546
|
+
def read_essential_metadata(dataset):
|
|
547
|
+
"""Updates dataset.metadata['experiment'] with essential information read from original metadata
|
|
548
|
+
|
|
549
|
+
This depends on whether it is originally a nion or a dm3 file
|
|
550
|
+
"""
|
|
551
|
+
if not isinstance(dataset, sidpy.Dataset):
|
|
552
|
+
raise TypeError("we need a sidpy.Dataset")
|
|
553
|
+
experiment_dictionary = {}
|
|
554
|
+
if dataset.original_metadata.get('metadata', {}).get('hardware_source'):
|
|
555
|
+
experiment_dictionary = read_nion_image_info(dataset.original_metadata)
|
|
556
|
+
if 'experiment' not in dataset.metadata:
|
|
557
|
+
dataset.metadata['experiment'] = {}
|
|
558
|
+
dataset.metadata['experiment'].update(experiment_dictionary)
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
def read_nion_image_info(original_metadata):
|
|
563
|
+
"""Read essential parameter from original_metadata originating from a dm3 file"""
|
|
564
|
+
if not isinstance(original_metadata, dict):
|
|
565
|
+
raise TypeError('We need a dictionary to read')
|
|
566
|
+
metadata = original_metadata.get('metadata', {}).get('hardware_source', {})
|
|
567
|
+
|
|
568
|
+
return metadata.get('ImageScanned', {})
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
def get_h5_filename(fname):
|
|
572
|
+
"""Determines file name of hdf5 file for newly converted data file"""
|
|
573
|
+
|
|
574
|
+
path, filename = os.path.split(fname)
|
|
575
|
+
basename, _ = os.path.splitext(filename)
|
|
576
|
+
h5_file_name_original = os.path.join(path, basename + '.hf5')
|
|
577
|
+
h5_file_name = h5_file_name_original
|
|
578
|
+
|
|
579
|
+
if os.path.exists(os.path.abspath(h5_file_name_original)):
|
|
580
|
+
count = 1
|
|
581
|
+
h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'
|
|
582
|
+
while os.path.exists(os.path.abspath(h5_file_name)):
|
|
583
|
+
count += 1
|
|
584
|
+
h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'
|
|
585
|
+
|
|
586
|
+
if h5_file_name != h5_file_name_original:
|
|
587
|
+
path, filename = os.path.split(h5_file_name)
|
|
588
|
+
print('Cannot overwrite file. Using: ', filename)
|
|
589
|
+
return str(h5_file_name)
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
def get_start_channel(h5_file):
|
|
593
|
+
""" Legacy for get start channel"""
|
|
594
|
+
|
|
595
|
+
DeprecationWarning('Depreciated: use function get_main_channel instead')
|
|
596
|
+
return get_main_channel(h5_file)
|
|
597
|
+
|
|
598
|
+
|
|
599
|
+
def get_main_channel(h5_file):
|
|
600
|
+
"""Returns name of first channel group in hdf5-file"""
|
|
601
|
+
|
|
602
|
+
current_channel = None
|
|
603
|
+
if 'Measurement_000' in h5_file:
|
|
604
|
+
if 'Measurement_000/Channel_000' in h5_file:
|
|
605
|
+
current_channel = h5_file['Measurement_000/Channel_000']
|
|
606
|
+
return current_channel
|
|
607
|
+
|
|
608
|
+
|
|
609
|
+
def h5_tree(input_object):
|
|
610
|
+
"""Just a wrapper for the sidpy function print_tree,
|
|
611
|
+
|
|
612
|
+
so that sidpy does not have to be loaded in notebook
|
|
613
|
+
|
|
614
|
+
"""
|
|
615
|
+
|
|
616
|
+
if isinstance(input_object, sidpy.Dataset):
|
|
617
|
+
if not isinstance(input_object.h5_dataset, h5py.Dataset):
|
|
618
|
+
raise ValueError('sidpy dataset does not have an associated h5py dataset')
|
|
619
|
+
h5_file = input_object.h5_dataset.file
|
|
620
|
+
elif isinstance(input_object, h5py.Dataset):
|
|
621
|
+
h5_file = input_object.file
|
|
622
|
+
elif isinstance(input_object, (h5py.Group, h5py.File)):
|
|
623
|
+
h5_file = input_object
|
|
624
|
+
else:
|
|
625
|
+
raise TypeError('should be a h5py.object or sidpy Dataset')
|
|
626
|
+
sidpy.hdf_utils.print_tree(h5_file)
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
def add_dataset_from_file(datasets, filename=None, key_name='Log', single_dataset=True):
|
|
630
|
+
"""Add dataset to datasets dictionary
|
|
631
|
+
|
|
632
|
+
Parameters
|
|
633
|
+
----------
|
|
634
|
+
dataset: dict
|
|
635
|
+
dictionary to write to file
|
|
636
|
+
filename: str, default: None,
|
|
637
|
+
name of file to open, if None, adialog will appear
|
|
638
|
+
key_name: str, default: 'Log'
|
|
639
|
+
name for key in dictionary with running number being added
|
|
640
|
+
|
|
641
|
+
Returns
|
|
642
|
+
-------
|
|
643
|
+
key_name: str
|
|
644
|
+
actual last used name of dictionary key
|
|
645
|
+
"""
|
|
646
|
+
datasets2 = open_file(filename=filename)
|
|
647
|
+
first_dataset = datasets2[list(datasets2)[0]]
|
|
648
|
+
if isinstance(first_dataset, sidpy.Dataset):
|
|
649
|
+
index = 0
|
|
650
|
+
for key in datasets.keys():
|
|
651
|
+
if key_name in key:
|
|
652
|
+
if int(key[-3:]) >= index:
|
|
653
|
+
index = int(key[-3:])+1
|
|
654
|
+
if single_dataset:
|
|
655
|
+
datasets[key_name+f'_{index:03}'] = first_dataset
|
|
656
|
+
else:
|
|
657
|
+
for key, dataset in datasets2.items():
|
|
658
|
+
print(key)
|
|
659
|
+
if isinstance(dataset, sidpy.Dataset):
|
|
660
|
+
datasets[key_name+f'_{index:03}'] = dataset
|
|
661
|
+
index += 1
|
|
662
|
+
else:
|
|
663
|
+
print(key)
|
|
664
|
+
datasets[key] = dataset
|
|
665
|
+
index -= 1
|
|
666
|
+
else:
|
|
667
|
+
return None
|
|
668
|
+
|
|
669
|
+
return f'{key_name}_{index:03}'
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
# ##
|
|
673
|
+
# Crystal Structure Read and Write
|
|
674
|
+
# ##
|
|
675
|
+
def read_poscar(file_name):
|
|
676
|
+
"""
|
|
677
|
+
Open a POSCAR file from Vasp
|
|
678
|
+
If no file name is provided an open file dialog to select a POSCAR file appears
|
|
679
|
+
|
|
680
|
+
Parameters
|
|
681
|
+
----------
|
|
682
|
+
file_name: str
|
|
683
|
+
if None is provided an open file dialog will appear
|
|
684
|
+
|
|
685
|
+
Return
|
|
686
|
+
------
|
|
687
|
+
crystal: ase.Atoms
|
|
688
|
+
crystal structure in ase format
|
|
689
|
+
"""
|
|
690
|
+
|
|
691
|
+
# use ase package to read file
|
|
692
|
+
base = os.path.basename(file_name)
|
|
693
|
+
base_name = os.path.splitext(base)[0]
|
|
694
|
+
crystal = ase.io.read(file_name, format='vasp', parallel=False)
|
|
695
|
+
|
|
696
|
+
# make dictionary and plot structure (not essential for further notebook)
|
|
697
|
+
crystal.info = {'title': base_name}
|
|
698
|
+
return crystal
|
|
699
|
+
|
|
700
|
+
|
|
701
|
+
def read_cif(file_name, verbose=False): # open file dialog to select cif file
|
|
702
|
+
"""
|
|
703
|
+
Open a cif file
|
|
704
|
+
If no file name is provided an open file dialog to select a cif file appears
|
|
705
|
+
|
|
706
|
+
Parameters
|
|
707
|
+
----------
|
|
708
|
+
file_name: str
|
|
709
|
+
if None is provided an open file dialog will appear
|
|
710
|
+
verbose: bool
|
|
711
|
+
|
|
712
|
+
Return
|
|
713
|
+
------
|
|
714
|
+
crystal: ase.Atoms
|
|
715
|
+
crystal structure in ase format
|
|
716
|
+
"""
|
|
717
|
+
|
|
718
|
+
base = os.path.basename(file_name)
|
|
719
|
+
base_name = os.path.splitext(base)[0]
|
|
720
|
+
crystal = ase.io.read(file_name, format='cif', store_tags=True, parallel=False)
|
|
721
|
+
|
|
722
|
+
# make dictionary and plot structure (not essential for further notebook)
|
|
723
|
+
if crystal.info is None:
|
|
724
|
+
crystal.info = {'title': base_name}
|
|
725
|
+
crystal.info.update({'title': base_name})
|
|
726
|
+
if verbose:
|
|
727
|
+
print('Opened cif file for ', crystal.get_chemical_formula())
|
|
728
|
+
|
|
729
|
+
return crystal
|
|
730
|
+
|
|
731
|
+
|
|
732
|
+
def h5_add_crystal_structure(h5_file, input_structure):
|
|
733
|
+
"""Write crystal structure to NSID file"""
|
|
734
|
+
|
|
735
|
+
if isinstance(input_structure, ase.Atoms):
|
|
736
|
+
|
|
737
|
+
crystal_tags = crystal_tools.get_dictionary(input_structure)
|
|
738
|
+
if crystal_tags['metadata'] == {}:
|
|
739
|
+
crystal_tags['metadata'] = {'title': input_structure.get_chemical_formula()}
|
|
740
|
+
elif isinstance(input_structure, dict):
|
|
741
|
+
crystal_tags = input_structure
|
|
742
|
+
else:
|
|
743
|
+
raise TypeError('Need a dictionary or an ase.Atoms object with ase installed')
|
|
744
|
+
|
|
745
|
+
structure_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Structure_')
|
|
746
|
+
|
|
747
|
+
for key, item in crystal_tags.items():
|
|
748
|
+
if not isinstance(item, dict):
|
|
749
|
+
structure_group[key] = item
|
|
750
|
+
|
|
751
|
+
if 'base' in crystal_tags:
|
|
752
|
+
structure_group['relative_positions'] = crystal_tags['base']
|
|
753
|
+
if 'title' in crystal_tags:
|
|
754
|
+
structure_group['title'] = str(crystal_tags['title'])
|
|
755
|
+
structure_group['_' + crystal_tags['title']] = str(crystal_tags['title'])
|
|
756
|
+
|
|
757
|
+
# ToDo: Save all of info dictionary
|
|
758
|
+
if 'metadata' in input_structure:
|
|
759
|
+
structure_group.create_group('metadata')
|
|
760
|
+
sidpy.hdf.hdf_utils.write_simple_attrs(structure_group['metadata'],
|
|
761
|
+
input_structure['metadata'])
|
|
762
|
+
|
|
763
|
+
h5_file.file.flush()
|
|
764
|
+
return structure_group
|
|
765
|
+
|
|
766
|
+
|
|
767
|
+
def h5_add_to_structure(structure_group, crystal_tags):
|
|
768
|
+
"""add dictionary as structure group"""
|
|
769
|
+
|
|
770
|
+
for key in crystal_tags:
|
|
771
|
+
if key in structure_group.keys():
|
|
772
|
+
print(key, ' not written; use new name')
|
|
773
|
+
else:
|
|
774
|
+
structure_group[key] = crystal_tags[key]
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
def h5_get_crystal_structure(structure_group):
|
|
778
|
+
"""Read crystal structure from NSID file
|
|
779
|
+
Any additional information will be read as dictionary into the
|
|
780
|
+
info attribute of the ase.Atoms object
|
|
781
|
+
|
|
782
|
+
Parameters
|
|
783
|
+
----------
|
|
784
|
+
structure_group: h5py.Group
|
|
785
|
+
location in hdf5 file to where the structure information is stored
|
|
786
|
+
|
|
787
|
+
Returns
|
|
788
|
+
-------
|
|
789
|
+
atoms: ase.Atoms object
|
|
790
|
+
crystal structure in ase format
|
|
791
|
+
|
|
792
|
+
"""
|
|
793
|
+
|
|
794
|
+
crystal_tags = {'unit_cell': structure_group['unit_cell'][()],
|
|
795
|
+
'base': structure_group['relative_positions'][()],
|
|
796
|
+
'title': structure_group['title'][()]}
|
|
797
|
+
if '2D' in structure_group:
|
|
798
|
+
crystal_tags['2D'] = structure_group['2D'][()]
|
|
799
|
+
elements = structure_group['elements'][()]
|
|
800
|
+
crystal_tags['elements'] = []
|
|
801
|
+
for e in elements:
|
|
802
|
+
crystal_tags['elements'].append(e.astype(str, copy=False))
|
|
803
|
+
|
|
804
|
+
atoms = crystal_tools.atoms_from_dictionary(crystal_tags)
|
|
805
|
+
if 'metadata' in structure_group:
|
|
806
|
+
atoms.info = sidpy.hdf.hdf_utils.h5_group_to_dict(structure_group)
|
|
807
|
+
|
|
808
|
+
if 'zone_axis' in structure_group:
|
|
809
|
+
atoms.info = {'experiment': {'zone_axis': structure_group['zone_axis'][()]}}
|
|
810
|
+
# ToDo: Read all of info dictionary
|
|
811
|
+
return atoms
|