pyTEMlib 0.2025.4.2__py3-none-any.whl → 0.2025.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

Files changed (94) hide show
  1. build/lib/pyTEMlib/__init__.py +33 -0
  2. build/lib/pyTEMlib/animation.py +640 -0
  3. build/lib/pyTEMlib/atom_tools.py +238 -0
  4. build/lib/pyTEMlib/config_dir.py +31 -0
  5. build/lib/pyTEMlib/crystal_tools.py +1219 -0
  6. build/lib/pyTEMlib/diffraction_plot.py +756 -0
  7. build/lib/pyTEMlib/dynamic_scattering.py +293 -0
  8. build/lib/pyTEMlib/eds_tools.py +826 -0
  9. build/lib/pyTEMlib/eds_xsections.py +432 -0
  10. build/lib/pyTEMlib/eels_tools/__init__.py +44 -0
  11. build/lib/pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  12. build/lib/pyTEMlib/eels_tools/eels_database.py +134 -0
  13. build/lib/pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  14. build/lib/pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  15. build/lib/pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  16. build/lib/pyTEMlib/file_reader.py +274 -0
  17. build/lib/pyTEMlib/file_tools.py +811 -0
  18. build/lib/pyTEMlib/get_bote_salvat.py +69 -0
  19. build/lib/pyTEMlib/graph_tools.py +1153 -0
  20. build/lib/pyTEMlib/graph_viz.py +599 -0
  21. build/lib/pyTEMlib/image/__init__.py +37 -0
  22. build/lib/pyTEMlib/image/image_atoms.py +270 -0
  23. build/lib/pyTEMlib/image/image_clean.py +197 -0
  24. build/lib/pyTEMlib/image/image_distortion.py +299 -0
  25. build/lib/pyTEMlib/image/image_fft.py +277 -0
  26. build/lib/pyTEMlib/image/image_graph.py +926 -0
  27. build/lib/pyTEMlib/image/image_registration.py +316 -0
  28. build/lib/pyTEMlib/image/image_utilities.py +309 -0
  29. build/lib/pyTEMlib/image/image_window.py +421 -0
  30. build/lib/pyTEMlib/image_tools.py +699 -0
  31. build/lib/pyTEMlib/interactive_image.py +1 -0
  32. build/lib/pyTEMlib/kinematic_scattering.py +1196 -0
  33. build/lib/pyTEMlib/microscope.py +61 -0
  34. build/lib/pyTEMlib/probe_tools.py +906 -0
  35. build/lib/pyTEMlib/sidpy_tools.py +153 -0
  36. build/lib/pyTEMlib/simulation_tools.py +104 -0
  37. build/lib/pyTEMlib/test.py +437 -0
  38. build/lib/pyTEMlib/utilities.py +314 -0
  39. build/lib/pyTEMlib/version.py +5 -0
  40. build/lib/pyTEMlib/xrpa_x_sections.py +20976 -0
  41. pyTEMlib/__init__.py +25 -3
  42. pyTEMlib/animation.py +31 -22
  43. pyTEMlib/atom_tools.py +29 -34
  44. pyTEMlib/config_dir.py +2 -28
  45. pyTEMlib/crystal_tools.py +129 -165
  46. pyTEMlib/eds_tools.py +559 -342
  47. pyTEMlib/eds_xsections.py +432 -0
  48. pyTEMlib/eels_tools/__init__.py +44 -0
  49. pyTEMlib/eels_tools/core_loss_tools.py +751 -0
  50. pyTEMlib/eels_tools/eels_database.py +134 -0
  51. pyTEMlib/eels_tools/low_loss_tools.py +655 -0
  52. pyTEMlib/eels_tools/peak_fit_tools.py +175 -0
  53. pyTEMlib/eels_tools/zero_loss_tools.py +264 -0
  54. pyTEMlib/file_reader.py +274 -0
  55. pyTEMlib/file_tools.py +260 -1130
  56. pyTEMlib/get_bote_salvat.py +69 -0
  57. pyTEMlib/graph_tools.py +101 -174
  58. pyTEMlib/graph_viz.py +150 -0
  59. pyTEMlib/image/__init__.py +37 -0
  60. pyTEMlib/image/image_atoms.py +270 -0
  61. pyTEMlib/image/image_clean.py +197 -0
  62. pyTEMlib/image/image_distortion.py +299 -0
  63. pyTEMlib/image/image_fft.py +277 -0
  64. pyTEMlib/image/image_graph.py +926 -0
  65. pyTEMlib/image/image_registration.py +316 -0
  66. pyTEMlib/image/image_utilities.py +309 -0
  67. pyTEMlib/image/image_window.py +421 -0
  68. pyTEMlib/image_tools.py +154 -928
  69. pyTEMlib/kinematic_scattering.py +1 -1
  70. pyTEMlib/probe_tools.py +1 -1
  71. pyTEMlib/test.py +437 -0
  72. pyTEMlib/utilities.py +314 -0
  73. pyTEMlib/version.py +2 -3
  74. pyTEMlib/xrpa_x_sections.py +14 -10
  75. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/METADATA +13 -16
  76. pytemlib-0.2025.9.1.dist-info/RECORD +86 -0
  77. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/WHEEL +1 -1
  78. pytemlib-0.2025.9.1.dist-info/top_level.txt +6 -0
  79. pyTEMlib/core_loss_widget.py +0 -721
  80. pyTEMlib/eels_dialog.py +0 -754
  81. pyTEMlib/eels_dialog_utilities.py +0 -1199
  82. pyTEMlib/eels_tools.py +0 -2359
  83. pyTEMlib/file_tools_qt.py +0 -193
  84. pyTEMlib/image_dialog.py +0 -158
  85. pyTEMlib/image_dlg.py +0 -146
  86. pyTEMlib/info_widget.py +0 -1086
  87. pyTEMlib/info_widget3.py +0 -1120
  88. pyTEMlib/low_loss_widget.py +0 -479
  89. pyTEMlib/peak_dialog.py +0 -1129
  90. pyTEMlib/peak_dlg.py +0 -286
  91. pytemlib-0.2025.4.2.dist-info/RECORD +0 -38
  92. pytemlib-0.2025.4.2.dist-info/top_level.txt +0 -1
  93. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/entry_points.txt +0 -0
  94. {pytemlib-0.2025.4.2.dist-info → pytemlib-0.2025.9.1.dist-info}/licenses/LICENSE +0 -0
pyTEMlib/file_tools.py CHANGED
@@ -8,11 +8,12 @@
8
8
 
9
9
  ##################################
10
10
  """
11
+ import typing
11
12
 
12
- import numpy as np
13
- import h5py
14
13
  import os
15
14
  import pickle
15
+ import numpy as np
16
+ import h5py
16
17
 
17
18
  # For structure files of various flavor for instance POSCAR and other theory packages
18
19
  import ase.io
@@ -23,36 +24,23 @@ import ase.io
23
24
  import SciFiReaders
24
25
  import pyNSID
25
26
  import sidpy
26
- import sidpy
27
- import xml.etree.ElementTree as ET
28
- import collections
29
- import ipywidgets as widgets
30
- from IPython.display import display
27
+ import ipywidgets
28
+ import IPython
31
29
 
32
30
  # =============================================
33
31
  # Include pyTEMlib libraries #
34
32
  # =============================================
35
- import pyTEMlib.crystal_tools
36
- from pyTEMlib.config_dir import config_path
37
- from pyTEMlib.sidpy_tools import *
38
-
39
- Qt_available = True
40
- try:
41
- from PyQt5 import QtCore, QtWidgets, QtGui
42
- except ModuleNotFoundError:
43
- print('Qt dialogs are not available')
44
- Qt_available = False
45
-
33
+ from . import crystal_tools
34
+ from .config_dir import config_path
35
+ from .file_reader import adorned_to_sidpy, read_old_h5group
36
+ from .version import __version__
46
37
  Dimension = sidpy.Dimension
47
38
 
48
- __version__ = '2024.9.14'
49
-
50
- from traitlets import Unicode, Bool, validate, TraitError
51
- import ipywidgets
39
+ __version__ = '2025.8.07'
52
40
 
41
+ ChooseDataset = sidpy.ChooseDataset
53
42
 
54
- @ipywidgets.register
55
- class FileWidget2(ipywidgets.DOMWidget):
43
+ class FileWidget(sidpy.FileWidget):
56
44
  """Widget to select directories or widgets from a list
57
45
 
58
46
  Works in google colab.
@@ -78,147 +66,46 @@ class FileWidget2(ipywidgets.DOMWidget):
78
66
  >>drive.mount("/content/drive")
79
67
  >>file_list = pyTEMlib.file_tools.FileWidget()
80
68
  next code cell:
81
- >>dataset = pyTEMlib.file_tools.open_file(file_list.file_name)
69
+ >>datasets = file_list.datasets
70
+ >>dataset = file_list.selected_dataset
82
71
 
83
72
  """
84
-
85
- def __init__(self, dir_name=None, extension=['*'], sum_frames=False):
86
- self.save_path = False
87
- self.dir_dictionary = {}
88
- self.dir_list = ['.', '..']
89
- self.display_list = ['.', '..']
90
- self.sum_frames = sum_frames
91
-
92
- self.dir_name = '.'
93
- if dir_name is None:
94
- self.dir_name = get_last_path()
95
- self.save_path = True
96
- elif os.path.isdir(dir_name):
97
- self.dir_name = dir_name
98
-
99
- self.get_directory(self.dir_name)
100
- self.dir_list = ['.']
101
- self.extensions = extension
102
- self.file_name = ''
103
- self.datasets = {}
104
- self.dataset = None
105
-
106
- self.select_files = widgets.Select(
107
- options=self.dir_list,
108
- value=self.dir_list[0],
109
- description='Select file:',
110
- disabled=False,
111
- rows=10,
112
- layout=widgets.Layout(width='70%')
113
- )
114
- self.path_choice = widgets.Dropdown(options=['None'],
115
- value='None',
116
- description='directory:',
117
- disabled=False,
118
- layout=widgets.Layout(width='90%'))
119
-
120
-
121
-
122
- self.set_options()
123
- ui = widgets.VBox([self.path_choice, self.select_files])
124
- display(ui)
125
-
126
- self.select_files.observe(self.get_file_name, names='value')
127
- self.path_choice.observe(self.set_dir, names='value')
128
-
129
-
130
-
131
- def get_directory(self, directory=None):
132
- self.dir_name = directory
133
- self.dir_dictionary = {}
134
- self.dir_list = []
135
- self.dir_list = ['.', '..'] + os.listdir(directory)
136
-
137
- def set_dir(self, value=0):
138
- self.dir_name = self.path_choice.value
139
- self.select_files.index = 0
140
- self.set_options()
141
-
142
-
143
- def set_options(self):
144
- self.dir_name = os.path.abspath(os.path.join(self.dir_name, self.dir_list[self.select_files.index]))
145
- dir_list = os.listdir(self.dir_name)
146
- file_dict = update_directory_list(self.dir_name)
147
-
148
- sort = np.argsort(file_dict['directory_list'])
149
- self.dir_list = ['.', '..']
150
- self.display_list = ['.', '..']
151
- for j in sort:
152
- self.display_list.append(f" * {file_dict['directory_list'][j]}")
153
- self.dir_list.append(file_dict['directory_list'][j])
154
-
155
- sort = np.argsort(file_dict['display_file_list'])
156
-
157
- for i, j in enumerate(sort):
158
- if '--' in dir_list[j]:
159
- self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
160
- else:
161
- self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
162
- self.dir_list.append(file_dict['file_list'][j])
163
-
164
- self.dir_label = os.path.split(self.dir_name)[-1] + ':'
165
- self.select_files.options = self.display_list
166
-
167
- path = self.dir_name
168
- old_path = ' '
169
- path_list = []
170
- while path != old_path:
171
- path_list.append(path)
172
- old_path = path
173
- path = os.path.split(path)[0]
174
- self.path_choice.options = path_list
175
- self.path_choice.value = path_list[0]
176
-
177
- def get_file_name(self, b):
178
-
179
- if os.path.isdir(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
180
- self.set_options()
181
-
182
- elif os.path.isfile(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
183
- self.file_name = os.path.join(self.dir_name, self.dir_list[self.select_files.index])
184
-
185
- class FileWidget3(FileWidget2):
186
73
  def __init__(self, dir_name=None, extension=['*'], sum_frames=False):
187
74
  if dir_name is None:
188
75
  dir_name = get_last_path()
189
- self.save_path = True
76
+ self.save_path = True
190
77
  super().__init__(dir_name=dir_name, extension=extension, sum_frames=sum_frames)
191
-
192
- select_button = widgets.Button(description='Select Main',
193
- layout=widgets.Layout(width='auto', grid_area='header'),
194
- style=widgets.ButtonStyle(button_color='lightblue'))
195
-
196
- add_button = widgets.Button(description='Add',
197
- layout=widgets.Layout(width='auto', grid_area='header'),
198
- style=widgets.ButtonStyle(button_color='lightblue'))
199
-
78
+ select_button = ipywidgets.Button(description='Select Main',
79
+ layout=ipywidgets.Layout(width='auto', grid_area='header'),
80
+ style=ipywidgets.ButtonStyle(button_color='lightblue'))
81
+
82
+ add_button = ipywidgets.Button(description='Add',
83
+ layout=ipywidgets.Layout(width='auto', grid_area='header'),
84
+ style=ipywidgets.ButtonStyle(button_color='lightblue'))
200
85
  self.dataset_list = ['None']
201
- self.loaded_datasets = widgets.Dropdown(options=self.dataset_list,
86
+ self.selected_dataset = None
87
+ self.datasets = {}
88
+ self.selected_key = ''
89
+ self.loaded_datasets = ipywidgets.Dropdown(options=self.dataset_list,
202
90
  value=self.dataset_list[0],
203
91
  description='loaded datasets:',
204
92
  disabled=False)
205
-
206
- ui = widgets.HBox([select_button, add_button, self.loaded_datasets])
207
- display(ui)
208
-
209
-
93
+
94
+ ui = ipywidgets.HBox([select_button, add_button, self.loaded_datasets])
95
+ IPython.display.display(ui)
210
96
  select_button.on_click(self.select_main)
211
97
  add_button.on_click(self.add_dataset)
212
98
  self.loaded_datasets.observe(self.select_dataset)
213
-
214
99
 
215
- def select_dataset(self, value=0):
100
+ def select_dataset(self, value: int = 0):
101
+ """Select a dataset from the dropdown."""
216
102
  key = self.loaded_datasets.value.split(':')[0]
217
103
  if key != 'None':
218
104
  self.selected_dataset = self.datasets[key]
219
105
  self.selected_key = key
220
106
 
221
- def select_main(self, value=0):
107
+ def select_main(self, value: int = 0):
108
+ """Select the main dataset."""
222
109
  self.datasets = {}
223
110
  self.dataset_list = []
224
111
  self.datasets = open_file(self.file_name, sum_frames=self.sum_frames)
@@ -230,257 +117,16 @@ class FileWidget3(FileWidget2):
230
117
  self.dataset = self.datasets[list(self.datasets.keys())[0]]
231
118
  self.selected_dataset = self.dataset
232
119
 
233
- def add_dataset(self, value=0):
234
- key = add_dataset_from_file(self.datasets, self.file_name, 'Channel')
235
- self.dataset_list.append(f'{key}: {self.datasets[key].title}')
236
- self.loaded_datasets.options = self.dataset_list
237
- self.loaded_datasets.value = self.dataset_list[-1]
238
-
239
-
240
- @ipywidgets.register
241
- class FileWidget(ipywidgets.DOMWidget):
242
- """Widget to select directories or widgets from a list
243
-
244
- Works in google colab.
245
- The widget converts the name of the nion file to the one in Nion's swift software,
246
- because it is otherwise incomprehensible
247
-
248
- Attributes
249
- ----------
250
- dir_name: str
251
- name of starting directory
252
- extension: list of str
253
- extensions of files to be listed in widget
254
-
255
- Methods
256
- -------
257
- get_directory
258
- set_options
259
- get_file_name
260
-
261
- Example
262
- -------
263
- >>from google.colab import drive
264
- >>drive.mount("/content/drive")
265
- >>file_list = pyTEMlib.file_tools.FileWidget()
266
- next code cell:
267
- >>dataset = pyTEMlib.file_tools.open_file(file_list.file_name)
268
-
269
- """
270
-
271
- def __init__(self, dir_name=None, extension=['*'], sum_frames=False):
272
- self.save_path = False
273
- self.dir_dictionary = {}
274
- self.dir_list = ['.', '..']
275
- self.display_list = ['.', '..']
276
- self.sum_frames = sum_frames
277
-
278
- self.dir_name = '.'
279
- if dir_name is None:
280
- self.dir_name = get_last_path()
281
- self.save_path = True
282
- elif os.path.isdir(dir_name):
283
- self.dir_name = dir_name
284
-
285
- self.get_directory(self.dir_name)
286
- self.dir_list = ['.']
287
- self.extensions = extension
288
- self.file_name = ''
289
- self.datasets = {}
290
- self.dataset = None
291
-
292
- self.select_files = widgets.Select(
293
- options=self.dir_list,
294
- value=self.dir_list[0],
295
- description='Select file:',
296
- disabled=False,
297
- rows=10,
298
- layout=widgets.Layout(width='70%')
299
- )
300
-
301
- select_button = widgets.Button(description='Select Main',
302
- layout=widgets.Layout(width='auto', grid_area='header'),
303
- style=widgets.ButtonStyle(button_color='lightblue'))
304
-
305
- add_button = widgets.Button(description='Add',
306
- layout=widgets.Layout(width='auto', grid_area='header'),
307
- style=widgets.ButtonStyle(button_color='lightblue'))
308
-
309
- self.path_choice = widgets.Dropdown(options=['None'],
310
- value='None',
311
- description='directory:',
312
- disabled=False,
313
- layout=widgets.Layout(width='90%'))
314
- self.dataset_list = ['None']
315
- self.loaded_datasets = widgets.Dropdown(options=self.dataset_list,
316
- value=self.dataset_list[0],
317
- description='loaded datasets:',
318
- disabled=False)
319
-
320
- self.set_options()
321
- ui = widgets.VBox([self.path_choice, self.select_files, widgets.HBox([select_button, add_button,
322
- self.loaded_datasets])])
323
- display(ui)
324
-
325
- self.select_files.observe(self.get_file_name, names='value')
326
- self.path_choice.observe(self.set_dir, names='value')
327
-
328
- select_button.on_click(self.select_main)
329
- add_button.on_click(self.add_dataset)
330
- self.loaded_datasets.observe(self.select_dataset)
331
-
332
- def select_main(self, value=0):
333
- self.datasets = {}
334
- #self.loaded_datasets.value = self.dataset_list[0]
335
- self.dataset_list = []
336
- self.datasets = open_file(self.file_name, sum_frames=self.sum_frames)
337
- self.dataset_list = []
338
- for key in self.datasets.keys():
339
- self.dataset_list.append(f'{key}: {self.datasets[key].title}')
340
- self.loaded_datasets.options = self.dataset_list
341
- self.loaded_datasets.value = self.dataset_list[0]
342
- self.debug = 5
343
- self.dataset = self.datasets[list(self.datasets.keys())[0]]
344
- self.debug = 6
345
- self.selected_dataset = self.dataset
346
-
347
- def add_dataset(self, value=0):
120
+ def add_dataset(self, value: int = 0):
121
+ """Add another dataset to the list of loaded datasets."""
348
122
  key = add_dataset_from_file(self.datasets, self.file_name, 'Channel')
349
123
  self.dataset_list.append(f'{key}: {self.datasets[key].title}')
350
124
  self.loaded_datasets.options = self.dataset_list
351
125
  self.loaded_datasets.value = self.dataset_list[-1]
352
126
 
353
- def get_directory(self, directory=None):
354
- self.dir_name = directory
355
- self.dir_dictionary = {}
356
- self.dir_list = []
357
- self.dir_list = ['.', '..'] + os.listdir(directory)
358
-
359
- def set_dir(self, value=0):
360
- self.dir_name = self.path_choice.value
361
- self.select_files.index = 0
362
- self.set_options()
363
-
364
- def select_dataset(self, value=0):
365
-
366
- key = self.loaded_datasets.value.split(':')[0]
367
- if key != 'None':
368
- self.selected_dataset = self.datasets[key]
369
- self.selected_key = key
370
-
371
- def set_options(self):
372
- self.dir_name = os.path.abspath(os.path.join(self.dir_name, self.dir_list[self.select_files.index]))
373
- dir_list = os.listdir(self.dir_name)
374
- file_dict = update_directory_list(self.dir_name)
375
-
376
- sort = np.argsort(file_dict['directory_list'])
377
- self.dir_list = ['.', '..']
378
- self.display_list = ['.', '..']
379
- for j in sort:
380
- self.display_list.append(f" * {file_dict['directory_list'][j]}")
381
- self.dir_list.append(file_dict['directory_list'][j])
382
-
383
- sort = np.argsort(file_dict['display_file_list'])
384
-
385
- for i, j in enumerate(sort):
386
- if '--' in dir_list[j]:
387
- self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
388
- else:
389
- self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
390
- self.dir_list.append(file_dict['file_list'][j])
391
-
392
- self.dir_label = os.path.split(self.dir_name)[-1] + ':'
393
- self.select_files.options = self.display_list
394
-
395
- path = self.dir_name
396
- old_path = ' '
397
- path_list = []
398
- while path != old_path:
399
- path_list.append(path)
400
- old_path = path
401
- path = os.path.split(path)[0]
402
- self.path_choice.options = path_list
403
- self.path_choice.value = path_list[0]
404
-
405
- def get_file_name(self, b):
406
-
407
- if os.path.isdir(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
408
- self.set_options()
409
-
410
- elif os.path.isfile(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
411
- self.file_name = os.path.join(self.dir_name, self.dir_list[self.select_files.index])
412
-
413
-
414
- class ChooseDataset(object):
415
- """Widget to select dataset object """
416
-
417
- def __init__(self, input_object, show_dialog=True):
418
- self.datasets = None
419
- if isinstance(input_object, sidpy.Dataset):
420
- if isinstance(input_object.h5_dataset, h5py.Dataset):
421
- self.current_channel = input_object.h5_dataset.parent
422
- elif isinstance(input_object, h5py.Group):
423
- self.current_channel = input_object
424
- elif isinstance(input_object, h5py.Dataset):
425
- self.current_channel = input_object.parent
426
- elif isinstance(input_object, dict):
427
- self.datasets = input_object
428
- else:
429
- raise ValueError('Need hdf5 group or sidpy Dataset to determine image choices')
430
- self.dataset_names = []
431
- self.dataset_list = []
432
- self.dataset_type = None
433
- self.dataset = None
434
- if not isinstance(self.datasets, dict):
435
- self.reader = SciFiReaders.NSIDReader(self.current_channel.file.filename)
436
- else:
437
- self.reader = None
438
- self.get_dataset_list()
439
- if len(self.dataset_list) < 1:
440
- self.dataset_list = ['None']
441
- self.select_image = widgets.Dropdown(options=self.dataset_list,
442
- value=self.dataset_list[0],
443
- description='select dataset:',
444
- disabled=False)
445
- if show_dialog:
446
- display(self.select_image)
447
-
448
- self.select_image.observe(self.set_dataset, names='value')
449
- self.set_dataset(0)
450
- self.select_image.index = (len(self.dataset_names) - 1)
451
-
452
- def get_dataset_list(self):
453
- """ Get by Log number sorted list of datasets"""
454
- dataset_list = []
455
- if not isinstance(self.datasets, dict):
456
- dataset_list = self.reader.read()
457
- self.datasets = {}
458
- for dataset in dataset_list:
459
- self.datasets[dataset.title] = dataset
460
- order = []
461
- keys = []
462
- for title, dset in self.datasets.items():
463
- if isinstance(dset, sidpy.Dataset):
464
- if self.dataset_type is None or dset.data_type == self.data_type:
465
- if 'Log' in title:
466
- order.append(2)
467
- else:
468
- order.append(0)
469
- keys.append(title)
470
- for index in np.argsort(order):
471
- self.dataset_names.append(keys[index])
472
- self.dataset_list.append(keys[index] + ': ' + self.datasets[keys[index]].title)
473
-
474
- def set_dataset(self, b):
475
- index = self.select_image.index
476
- if index < len(self.dataset_names):
477
- self.key = self.dataset_names[index]
478
- self.dataset = self.datasets[self.key]
479
- self.dataset.title = self.dataset.title.split('/')[-1]
480
- self.dataset.title = self.dataset.title.split('/')[-1]
481
-
482
127
 
483
- def add_to_dict(file_dict, name):
128
+ def add_to_dict(file_dict: dict, name: str):
129
+ """Add a file to the dictionary with its metadata."""
484
130
  full_name = os.path.join(file_dict['directory'], name)
485
131
  basename, extension = os.path.splitext(name)
486
132
  size = os.path.getsize(full_name) * 2 ** -20
@@ -494,17 +140,19 @@ def add_to_dict(file_dict, name):
494
140
  try:
495
141
  reader = SciFiReaders.NionReader(full_name)
496
142
  dataset_nion = reader.read()
497
- display_name = dataset_nion.title
143
+ key = list(dataset_nion.keys())[0]
144
+ display_name = dataset_nion[key].title
498
145
  display_file_list = f" {display_name}{extension} - {size:.1f} MB"
499
146
  except:
500
147
  display_file_list = f" {name} - {size:.1f} MB"
501
148
  else:
502
149
  display_file_list = f' {name} - {size:.1f} MB'
503
- file_dict[name] = {'display_string': display_file_list, 'basename': basename, 'extension': extension,
504
- 'size': size, 'display_name': display_name}
150
+ file_dict[name] = {'display_string': display_file_list, 'basename': basename,
151
+ 'extension': extension, 'size': size, 'display_name': display_name}
505
152
 
506
153
 
507
- def update_directory_list(directory_name):
154
+ def update_directory_list(directory_name: str) -> dict:
155
+ """Update the directory list and return the file dictionary."""
508
156
  dir_list = os.listdir(directory_name)
509
157
 
510
158
  if '.pyTEMlib.files.pkl' in dir_list:
@@ -536,7 +184,8 @@ def update_directory_list(directory_name):
536
184
  save_pickle = False
537
185
 
538
186
  for name in file_dict.keys():
539
- if name not in dir_list and name not in ['directory', 'file_list', 'directory_list', 'display_file_list']:
187
+ if name not in dir_list and name not in ['directory', 'file_list',
188
+ 'directory_list', 'display_file_list']:
540
189
  remove_item.append(name)
541
190
  else:
542
191
  if 'extension' in file_dict[name]:
@@ -554,12 +203,11 @@ def update_directory_list(directory_name):
554
203
  # General Open and Save Methods
555
204
  ####
556
205
 
557
- def get_last_path():
206
+ def get_last_path() -> str:
558
207
  """Returns the path of the file last opened"""
559
208
  try:
560
- fp = open(config_path + '\\path.txt', 'r')
561
- path = fp.read()
562
- fp.close()
209
+ with open(config_path + '\\path.txt', 'r', encoding='utf-8') as file:
210
+ path = file.read()
563
211
  except IOError:
564
212
  path = ''
565
213
 
@@ -571,164 +219,45 @@ def get_last_path():
571
219
  return path
572
220
 
573
221
 
574
- def save_path(filename):
222
+ def save_path(filename: str) -> str:
575
223
  """Save path of last opened file"""
576
224
 
577
225
  if len(filename) > 1:
578
- fp = open(config_path + '\\path.txt', 'w')
579
- path, fname = os.path.split(filename)
580
- fp.write(path)
581
- fp.close()
226
+ with open(config_path + '\\path.txt', 'w', encoding='utf-8') as file:
227
+ path, _ = os.path.split(filename)
228
+ file.write(path)
582
229
  else:
583
230
  path = '.'
584
231
  return path
585
232
 
586
233
 
587
- if Qt_available:
588
- def get_qt_app():
589
- """
590
- will start QT Application if not running yet
591
-
592
- :returns: QApplication
593
-
594
- """
595
-
596
- # start qt event loop
597
- _instance = QtWidgets.QApplication.instance()
598
- if not _instance:
599
- # print('not_instance')
600
- _instance = QtWidgets.QApplication([])
601
-
602
- return _instance
603
-
604
-
605
- def open_file_dialog_qt(file_types=None): # , multiple_files=False):
606
- """Opens a File dialog which is used in open_file() function
607
-
608
- This function uses pyQt5.
609
- The app of the Gui has to be running for QT. Tkinter does not run on Macs at this point in time.
610
- In jupyter notebooks use %gui Qt early in the notebook.
611
-
612
- The file looks first for a path.txt file for the last directory you used.
613
-
614
- Parameters
615
- ----------
616
- file_types : string
617
- file type filter in the form of '*.hf5'
618
-
619
-
620
- Returns
621
- -------
622
- filename : string
623
- full filename with absolute path and extension as a string
624
-
625
- Example
626
- -------
627
- >> import file_tools as ft
628
- >> filename = ft.openfile_dialog()
629
- >> print(filename)
630
-
631
- """
632
- """will start QT Application if not running yet and returns QApplication """
633
-
634
- # determine file types by extension
635
- if file_types is None:
636
- file_types = 'TEM files (*.dm3 *.dm4 *.emd *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \
637
- 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
638
- elif file_types == 'pyNSID':
639
- file_types = 'pyNSID files (*.hf5);;TEM files (*.dm3 *.dm4 *.qf3 *.ndata *.h5 *.hf5);;QF files ( *.qf3);;' \
640
- 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
641
-
642
- # file_types = [("TEM files",["*.dm*","*.hf*","*.ndata" ]),("pyNSID files","*.hf5"),("DM files","*.dm*"),
643
- # ("Nion files",["*.h5","*.ndata"]),("all files","*.*")]
644
-
645
- # Determine last path used
646
- path = get_last_path()
647
-
648
- if Qt_available:
649
- _ = get_qt_app()
650
- filename = sidpy.io.interface_utils.openfile_dialog_QT(file_types=file_types, file_path=path)
651
- save_path(filename)
652
- return filename
653
-
654
-
655
- def save_file_dialog_qt(file_types=None): # , multiple_files=False):
656
- """Opens a File dialog which is used in open_file() function
657
-
658
- This function uses pyQt5.
659
- The app of the Gui has to be running for QT. Tkinter does not run on Macs at this point in time.
660
- In jupyter notebooks use %gui Qt early in the notebook.
661
-
662
- The file looks first for a path.txt file for the last directory you used.
663
-
664
- Parameters
665
- ----------
666
- file_types : string
667
- file type filter in the form of '*.hf5'
668
-
669
-
670
- Returns
671
- -------
672
- filename : string
673
- full filename with absolute path and extension as a string
674
-
675
- Example
676
- -------
677
- >> import file_tools as ft
678
- >> filename = ft.openfile_dialog()
679
- >> print(filename)
680
-
681
- """
682
- """will start QT Application if not running yet and returns QApplication """
683
-
684
- # determine file types by extension
685
- if file_types is None:
686
- file_types = 'pyNSID files (*.hf5);;TEM files (*.dm3 *.dm4 *.qf3 *.ndata *.h5 *.hf5);;QF files ( *.qf3);;' \
687
- 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
688
- elif file_types == 'TEM':
689
- file_types = 'TEM files (*.dm3 *.dm4 *.emd *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \
690
- 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
691
-
692
- # Determine last path used
693
- path = get_last_path()
694
-
695
- if Qt_available:
696
- _ = get_qt_app()
697
- filename = sidpy.io.interface_utils.savefile_dialog(file_types=file_types, file_path=path)
698
- save_path(filename)
699
- return filename
700
-
701
-
702
- def save_dataset(dataset, filename=None, qt=False, h5_group=None):
234
+ def save_dataset(dataset, filename, h5_group=None):
703
235
  """ Saves a dataset to a file in pyNSID format
704
236
  Parameters
705
237
  ----------
706
238
  dataset: sidpy.Dataset
707
239
  the data
708
240
  filename: str
709
- name of file to be opened, if filename is None, a QT file dialog will try to open
241
+ name of file to be opened
710
242
  h5_group: hd5py.Group
711
243
  not used yet
712
244
  """
713
- if filename is None or qt==True:
714
- filename = save_file_dialog_qt()
715
245
  h5_filename = get_h5_filename(filename)
716
246
  h5_file = h5py.File(h5_filename, mode='a')
717
- path, file_name = os.path.split(filename)
718
- basename, _ = os.path.splitext(file_name)
719
-
720
247
  if isinstance(dataset, dict):
721
248
  h5_group = save_dataset_dictionary(h5_file, dataset)
722
249
  return h5_group
723
-
724
- elif isinstance(dataset, sidpy.Dataset):
250
+ if isinstance(dataset, sidpy.Dataset):
725
251
  h5_dataset = save_single_dataset(h5_file, dataset, h5_group=h5_group)
726
252
  return h5_dataset.parent
727
- else:
728
- raise TypeError('Only sidpy.datasets or dictionaries can be saved with pyTEMlib')
253
+
254
+ raise TypeError('Only sidpy.datasets or dictionaries can be saved with pyTEMlib')
729
255
 
730
256
 
731
257
  def save_single_dataset(h5_file, dataset, h5_group=None):
258
+ """
259
+ Saves a single sidpy.Dataset to an HDF5 file.
260
+ """
732
261
  if h5_group is None:
733
262
  h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')
734
263
  h5_group = sidpy.hdf.prov_utils.create_indexed_group(h5_measurement_group, 'Channel_')
@@ -752,7 +281,8 @@ def save_single_dataset(h5_file, dataset, h5_group=None):
752
281
  return h5_dataset
753
282
 
754
283
 
755
- def save_dataset_dictionary(h5_file, datasets):
284
+ def save_dataset_dictionary(h5_file: h5py.File, datasets: dict) -> h5py.Group:
285
+ """Saves a dictionary of datasets to an HDF5 file."""
756
286
  h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')
757
287
  for key, dataset in datasets.items():
758
288
  if key[-1] == '/':
@@ -770,6 +300,9 @@ def save_dataset_dictionary(h5_file, datasets):
770
300
 
771
301
 
772
302
  def h5_group_to_dict(group, group_dict={}):
303
+ """
304
+ Converts an h5py group to a python dictionary.
305
+ """
773
306
  if not isinstance(group, h5py.Group):
774
307
  raise TypeError('we need a h5py group to read from')
775
308
  if not isinstance(group_dict, dict):
@@ -781,59 +314,51 @@ def h5_group_to_dict(group, group_dict={}):
781
314
  return group_dict
782
315
 
783
316
 
784
- def read_annotation(image):
317
+ def read_dm_annotation(image: sidpy.Dataset) -> typing.Dict[str, typing.Any]:
318
+ """
319
+ Reads annotations from a sidpy.Dataset that originated from a dm3 file.
320
+ """
785
321
  if 'MAGE' not in image.data_type.name:
786
322
  return {}
787
323
  scale_x = np.abs(image.x[1]-image.x[0])
788
324
  scale_y = np.abs(image.y[1]-image.y[0])
789
325
  rec_scale = np.array([scale_x, scale_y,scale_x, scale_y])
790
- if 'DocumentObjectList' not in image.original_metadata:
791
- return {}
792
- if '0' not in image.original_metadata['DocumentObjectList']:
793
- return {}
794
- annotations = {}
795
- tags = image.original_metadata['DocumentObjectList']['0']
326
+ annotations = {}
327
+ tags = image.original_metadata.get('DocumentObjectList', {}).get('0', {}).get('AnnotationGroupList', {})
328
+
329
+ if not tags:
330
+ return annotations
331
+
796
332
  for key in tags:
797
- if 'AnnotationGroupList' in key:
798
- an_tags = tags[key]
799
- for key2 in an_tags:
800
- if isinstance(an_tags[key2], dict):
801
- if an_tags[key2]['AnnotationType'] == 13: #type 'text'
802
- annotations[key2] = {'type': 'text'}
803
- if 'Label' in an_tags:
804
- annotations[key2]['label'] = an_tags['Label']
805
- rect = np.array(an_tags[key2]['Rectangle']) * rec_scale
806
- annotations[key2]['position'] = [rect[1],rect[0]]
807
- annotations[key2]['text'] = an_tags['Text']
808
-
809
- elif an_tags[key2]['AnnotationType']==6:
810
- annotations[key2] = {'type': 'circle'}
811
- if 'Label' in an_tags:
812
- annotations[key2]['label'] = an_tags['Label']
813
- rect = np.array(an_tags[key2]['Rectangle']) * rec_scale
814
-
815
- annotations[key2]['radius'] =rect[3]-rect[1]
816
- annotations[key2]['position'] = [rect[1],rect[0]]
817
-
818
- elif an_tags[key2]['AnnotationType'] == 23:
819
- annotations[key2] = {'type': 'spectral_image'}
820
- if 'Label' in an_tags[key2]:
821
- annotations[key2]['label'] = an_tags[key2]['Label']
822
- rect = np.array(an_tags[key2]['Rectangle']) * rec_scale
823
-
824
- annotations[key2]['width'] =rect[3]-rect[1]
825
- annotations[key2]['height'] =rect[2]-rect[0]
826
- annotations[key2]['position'] = [rect[1],rect[0]]
827
- annotations[key2]['Rectangle'] = np.array(an_tags[key2]['Rectangle'])
828
- if len(annotations)>0:
829
- image.metadata['annotations'] = annotations
333
+ if isinstance(tags[key], dict):
334
+ if tags[key]['AnnotationType'] == 13: #type 'text'
335
+ annotations[key] = {'type': 'text'}
336
+ annotations[key]['label'] = tags[key].get('Label', '')
337
+ rect = np.array(tags[key]['Rectangle']) * rec_scale
338
+ annotations[key]['position'] = [rect[1], rect[0]]
339
+ annotations[key]['text'] = tags[key].get('Text', key)
340
+ elif tags[key]['AnnotationType']==6:
341
+ annotations[key] = {'type': 'circle'}
342
+ annotations[key]['label'] = tags[key].get('Label', '')
343
+ rect = np.array(tags[key]['Rectangle']) * rec_scale
344
+ annotations[key]['radius'] = rect[3]-rect[1]
345
+ annotations[key]['position'] = [rect[1],rect[0]]
346
+ elif tags[key]['AnnotationType'] == 23:
347
+ annotations[key] = {'type': 'spectral_image'}
348
+ annotations[key]['label'] = tags[key].get('Label', '')
349
+ rect = np.array(tags[key].get('Rectangle', [0 ,0, 0, 0])) * rec_scale
350
+ annotations[key]['width'] = rect[3]-rect[1]
351
+ annotations[key]['height'] = rect[2]-rect[0]
352
+ annotations[key]['position'] = [rect[1],rect[0]]
353
+ annotations[key]['Rectangle'] = np.array(tags[key].get('Rectangle', [0 ,0, 0, 0]))
354
+ if annotations:
355
+ image.metadata['annotations'] = annotations
830
356
  return annotations
831
357
 
832
358
 
833
- def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=False): # save_file=False,
359
+ def open_file(filename, write_hdf_file=False, sum_frames=False, sum_eds=True):
834
360
  """Opens a file if the extension is .emd, .mrc, .hf5, .ndata, .dm3 or .dm4
835
361
 
836
- If no filename is provided the QT open_file windows opens (if QT_available==True)
837
362
  Everything will be stored in a NSID style hf5 file.
838
363
  Subroutines used:
839
364
  - NSIDReader
@@ -844,7 +369,7 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
844
369
  Parameters
845
370
  ----------
846
371
  filename: str
847
- name of file to be opened, if filename is None, a QT file dialog will try to open
372
+ name of file to be opened
848
373
  h5_group: hd5py.Group
849
374
  not used yet #TODO: provide hook for usage of external chosen group
850
375
  write_hdf_file: bool
@@ -856,184 +381,162 @@ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=Fa
856
381
  sidpy dataset with location of hdf5 dataset as attribute
857
382
 
858
383
  """
859
- if filename is None:
860
- selected_file = open_file_dialog_qt()
861
- filename = selected_file
862
-
863
- else:
864
- if not isinstance(filename, str):
865
- raise TypeError('filename must be a non-empty string or None (to a QT open file dialog)')
866
- elif filename == '':
867
- raise TypeError('filename must be a non-empty string or None (to a QT open file dialog)')
384
+ if not isinstance(filename, str):
385
+ raise TypeError('filename must be a non-empty string')
386
+ if filename == '':
387
+ raise TypeError('filename must be a non-empty string')
868
388
 
869
- path, file_name = os.path.split(filename)
389
+ _, file_name = os.path.split(filename)
870
390
  basename, extension = os.path.splitext(file_name)
871
-
391
+ provenance = ''
872
392
  if extension == '.hf5':
873
393
  reader = SciFiReaders.NSIDReader(filename)
874
394
  datasets = reader.read()
875
395
  if len(datasets) < 1:
876
396
  print('no hdf5 dataset found in file')
877
397
  return {}
398
+ if isinstance(datasets, dict):
399
+ dataset_dict = datasets
878
400
  else:
879
- if isinstance(datasets, dict):
880
- dataset_dict = datasets
881
- else:
882
- dataset_dict = {}
883
- for index, dataset in enumerate(datasets):
884
- title = str(dataset.title).split('/')[-1]
885
- # dataset.title = str(dataset.title).split('/')[-1]
886
- dataset_dict[title] = dataset
887
- if index == 0:
888
- file = datasets[0].h5_dataset.file
889
- master_group = datasets[0].h5_dataset.parent.parent.parent
890
- for key in master_group.keys():
891
- if key not in dataset_dict:
892
- dataset_dict[key] = h5_group_to_dict(master_group[key])
893
- if not write_hdf_file:
894
- file.close()
895
- for dset in dataset_dict.values():
896
- if isinstance(dset, sidpy.Dataset):
897
- if 'Measurement' in dset.title:
898
- dset.title = dset.title.split('/')[-1]
401
+ dataset_dict = {}
402
+ for index, dataset in enumerate(datasets):
403
+ title = str(dataset.title).rsplit('/', maxsplit=1)[-1]
404
+ # dataset.title = str(dataset.title).split('/')[-1]
405
+ dataset_dict[title] = dataset
406
+ if index == 0:
407
+ file = datasets[0].h5_dataset.file
408
+ master_group = datasets[0].h5_dataset.parent.parent.parent
409
+ for key in master_group.keys():
410
+ if key not in dataset_dict:
411
+ dataset_dict[key] = h5_group_to_dict(master_group[key])
412
+ if not write_hdf_file:
413
+ file.close()
414
+ for dset in dataset_dict.values():
415
+ if isinstance(dset, sidpy.Dataset):
416
+ if 'Measurement' in dset.title:
417
+ dset.title = dset.title.split('/')[-1]
899
418
  return dataset_dict
900
- elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi', '.edaxh5', '.mrc']:
901
- # tags = open_file(filename)
902
- if extension in ['.dm3', '.dm4']:
903
- reader = SciFiReaders.DMReader(filename)
904
-
905
- elif extension in ['.emi']:
906
- try:
907
- import hyperspy.api as hs
908
- s = hs.load(filename)
909
- dataset_dict = {}
910
- spectrum_number = 0
911
- if not isinstance(s, list):
912
- s = [s]
913
- for index, datum in enumerate(s):
914
- dset = SciFiReaders.convert_hyperspy(datum)
915
- if datum.data.ndim == 1:
916
- dset.title = dset.title + f'_{spectrum_number}_Spectrum'
917
- spectrum_number += 1
918
- elif datum.data.ndim == 3:
919
- dset.title = dset.title + '_SI'
920
- dset = dset.T
921
- dset.title = dset.title[11:]
922
- dset.add_provenance('pyTEMlib', 'open_file', version=pyTEMlib.__version__, linked_data='emi_converted_by_hyperspy')
923
- dataset_dict[f'Channel_{index:03d}'] = dset
924
-
925
- return dataset_dict
926
- except ImportError:
927
- print('This file type needs hyperspy to be installed to be able to be read')
928
- return
929
- elif extension == '.emd':
930
- reader = SciFiReaders.EMDReader(filename, sum_frames=sum_frames)
931
- provenance = 'SciFiReader.EMDReader'
932
- elif 'edax' in extension.lower():
933
- if 'h5' in extension:
934
- reader = SciFiReaders.EDAXReader(filename)
935
- provenance = 'SciFiReader.EDAXReader'
936
-
937
- elif extension in ['.ndata', '.h5']:
938
- reader = SciFiReaders.NionReader(filename)
939
- provenance = 'SciFiReader.NionReader'
940
-
941
- elif extension in ['.mrc']:
942
- reader = SciFiReaders.MRCReader(filename)
943
- provenance = 'SciFiReader.MRCReader'
944
419
 
945
- else:
946
- raise NotImplementedError('extension not supported')
420
+ if extension in ['.dm3', '.dm4']:
421
+ reader = SciFiReaders.DMReader(filename)
422
+ elif extension in ['.emi']:
423
+ try:
424
+ import hyperspy.api as hs
425
+ s = hs.load(filename)
426
+ dataset_dict = {}
427
+ spectrum_number = 0
428
+ if not isinstance(s, list):
429
+ s = [s]
430
+ for index, datum in enumerate(s):
431
+ dset = SciFiReaders.convert_hyperspy(datum)
432
+ if datum.data.ndim == 1:
433
+ dset.title = dset.title + f'_{spectrum_number}_Spectrum'
434
+ spectrum_number += 1
435
+ elif datum.data.ndim == 3:
436
+ dset.title = dset.title + '_SI'
437
+ dset = dset.T
438
+ dset.title = dset.title[11:]
439
+ dset.add_provenance('pyTEMlib', 'open_file', version=__version__,
440
+ linked_data='emi_converted_by_hyperspy')
441
+ dataset_dict[f'Channel_{index:03d}'] = dset
442
+ return dataset_dict
443
+ except ImportError:
444
+ print('This file type needs hyperspy to be installed to be able to be read')
445
+ return
446
+ elif extension == '.emd':
447
+ reader = SciFiReaders.EMDReader(filename, sum_frames=sum_frames)
448
+ provenance = 'SciFiReader.EMDReader'
449
+ elif 'edax' in extension.lower():
450
+ if 'h5' in extension:
451
+ reader = SciFiReaders.EDAXReader(filename)
452
+ provenance = 'SciFiReader.EDAXReader'
453
+
454
+ elif extension in ['.ndata', '.h5']:
455
+ reader = SciFiReaders.NionReader(filename)
456
+ provenance = 'SciFiReader.NionReader'
457
+
458
+ elif extension in ['.rto']:
459
+ reader = SciFiReaders.BrukerReader(filename)
460
+ provenance = 'SciFiReader.BrukerReader'
461
+
462
+ elif extension in ['.mrc']:
463
+ reader = SciFiReaders.MRCReader(filename)
464
+ provenance = 'SciFiReader.MRCReader'
947
465
 
948
- path, file_name = os.path.split(filename)
949
- basename, _ = os.path.splitext(file_name)
950
- if extension != '.emi':
951
- dset = reader.read()
466
+ else:
467
+ raise NotImplementedError('extension not supported')
952
468
 
953
- if extension in ['.dm3', '.dm4']:
954
- title = (basename.strip().replace('-', '_')).split('/')[-1]
955
- if not isinstance(dset, dict):
956
- print('Please use new SciFiReaders Package for full functionality')
957
- if isinstance(dset, sidpy.Dataset):
958
- dset = {'Channel_000': dset}
959
- for key in dset:
960
- read_annotation(dset[key])
961
- if extension == '.emd':
962
- for key1 in dset:
963
- for key in dset[key1].original_metadata:
964
- if key == 'Instrument':
965
- model = dset[key1].original_metadata[key]['InstrumentModel']
966
- id = dset[key1].original_metadata[key]['InstrumentId']
967
- dset[key1].metadata['experiment']['instrument'] = model + str(id)
968
- if key == 'Optics':
969
- if 'LastMeasuredScreenCurrent' in dset[key1].original_metadata[key]:
970
- dset[key1].metadata['experiment']['current'] = float(dset[key1].original_metadata[key]['LastMeasuredScreenCurrent'])
971
- if key == 'Scan':
972
- if 'DwellTime' in dset[key1].original_metadata[key]:
973
- dset[key1].metadata['experiment']['pixel_time'] = float(dset[key1].original_metadata[key]['DwellTime'])
974
- if 'FrameTime' in dset[key1].original_metadata[key]:
975
- dset[key1].metadata['experiment']['exposure_time'] = float(dset[key1].original_metadata[key]['FrameTime'])
976
- if key == 'Sample':
977
- if 'SampleDescription' in dset[key1].original_metadata[key]:
978
- dset[key1].metadata['experiment']['sample'] = dset[key1].original_metadata[key]['SampleDescription']
979
- if 'SampleId' in dset[key1].original_metadata[key]:
980
- dset[key1].metadata['experiment']['sample_id'] = dset[key1].original_metadata[key]['SampleId']
981
- if key == 'Detectors':
982
- if 'detector' in dset[key1].metadata['experiment']:
983
- used_detector = dset[key1].metadata['experiment']['detector']
984
- for detector in dset[key1].original_metadata[key].values():
985
- if 'DetectorName' in detector:
986
- if used_detector in detector['DetectorName']:
987
- if 'CollectionAngleRange' in detector:
988
- begin = detector['CollectionAngleRange']['begin']
989
- end = detector['CollectionAngleRange']['end']
990
- dset[key1].metadata['experiment']['collection_angle'] = float(begin)
991
- dset[key1].metadata['experiment']['collection_angle_end'] = float(end)
992
- if isinstance(dset, dict):
993
- dataset_dict = dset
994
- for dataset in dataset_dict.values():
995
- dataset.add_provenance('pyTEMlib', 'open_file', version=pyTEMlib.__version__, linked_data = 'SciFiReader')
996
- dataset.metadata['filename'] = filename
997
-
998
- elif isinstance(dset, list):
999
- if len(dset) < 1:
1000
- print('no dataset found in file')
1001
- return {}
1002
- else:
1003
- if 'PageSetup' in dset[0].original_metadata:
1004
- del dset[0].original_metadata['PageSetup']
1005
- dset[0].original_metadata['original_title'] = title
1006
- dataset_dict = {}
1007
- for index, dataset in enumerate(dset):
1008
- if extension == '.emi':
1009
- if 'experiment' in dataset.metadata:
1010
- if 'detector' in dataset.metadata['experiment']:
1011
- dataset.title = dataset.metadata['experiment']['detector']
1012
- dataset.filename = basename.strip()
1013
- # read_essential_metadata(dataset)
1014
- dataset.metadata['filename'] = filename
1015
- dataset_dict[f'Channel_{index:03}'] = dataset
1016
- else:
1017
- dset.filename = basename.strip().replace('-', '_')
1018
- read_essential_metadata(dset)
1019
- dset.metadata['filename'] = filename
1020
- dataset_dict = {'Channel_000': dset}
469
+ _, file_name = os.path.split(filename)
470
+ basename, _ = os.path.splitext(file_name)
471
+
472
+ # ### Here we read the data into sidpy datasets
473
+ if extension != '.emi':
474
+ dset = reader.read()
475
+
476
+ if extension in ['.dm3', '.dm4']:
477
+ title = (basename.strip().replace('-', '_')).split('/')[-1]
478
+ if not isinstance(dset, dict):
479
+ print('Please use new SciFiReaders Package for full functionality')
480
+ if isinstance(dset, sidpy.Dataset):
481
+ dset = {'Channel_000': dset}
482
+
483
+ for key in dset:
484
+ read_dm_annotation(dset[key])
485
+
486
+ elif extension == '.emd':
487
+ if not sum_eds:
488
+ return
489
+ eds_keys = []
490
+ for key, item in dset.items():
491
+ if 'SuperX' in item.title or 'UltraX' in item.title:
492
+ if len(eds_keys) == 0:
493
+ spectrum = item.copy()
494
+ else:
495
+ spectrum += item
496
+ eds_keys.append(key)
497
+ spectrum.compute()
498
+
499
+ spectrum.data_type = dset[eds_keys[0]].data_type
500
+ if 'SuperX' in dset[eds_keys[0]].title:
501
+ spectrum.title = 'EDS_SuperX'
502
+ if 'UltraX' in dset[eds_keys[0]].title:
503
+ spectrum.title = 'EDS_UltraX'
504
+ spectrum.original_metadata = dset[eds_keys[0]].original_metadata.copy()
505
+ spectrum.metadata = dset[eds_keys[0]].metadata.copy()
506
+
507
+ for key in eds_keys:
508
+ del dset[key]
509
+ dset['SuperX'] = spectrum
510
+
511
+ if isinstance(dset, dict):
512
+ dataset_dict = dset
513
+ for dataset in dataset_dict.values():
514
+ dataset.add_provenance('pyTEMlib', 'open_file',
515
+ version=__version__,
516
+ linked_data=provenance)
517
+ dataset.metadata['filename'] = filename
518
+
519
+ elif isinstance(dset, list):
520
+ DeprecationWarning('Update SciFiReaders, we do not support list of datasets anymore')
521
+ else:
522
+ dset.filename = basename.strip().replace('-', '_')
523
+ read_essential_metadata(dset)
524
+ dset.metadata['filename'] = filename
525
+ dataset_dict = {'Channel_000': dset}
1021
526
 
1022
527
  # Temporary Fix for dual eels spectra in dm files
1023
- # Todo: Fic in ScifyReaders
1024
- for dset in dataset_dict.values():
528
+ # Todo: Fix in SciFiReaders
529
+ for dset in dataset_dict.values():
1025
530
  if 'experiment' in dset.metadata:
1026
- if 'single_exposure_time' in dset.metadata['experiment']:
1027
- dset.metadata['experiment']['exposure_time'] = dset.metadata['experiment']['number_of_frames'] * \
1028
- dset.metadata['experiment']['single_exposure_time']
531
+ exp_meta = dset.metadata['experiment']
532
+ if 'single_exposure_time' in exp_meta:
533
+ exp_meta['exposure_time'] = exp_meta['number_of_frames'] * \
534
+ exp_meta['single_exposure_time']
1029
535
  if write_hdf_file:
1030
- h5_master_group = save_dataset(dataset_dict, filename=filename)
536
+ save_dataset(dataset_dict, filename=filename)
1031
537
 
1032
538
  save_path(filename)
1033
- return dataset_dict
1034
- else:
1035
- print('file type not handled yet.')
1036
- return
539
+ return dataset_dict
1037
540
 
1038
541
 
1039
542
  ################################################################
@@ -1048,96 +551,28 @@ def read_essential_metadata(dataset):
1048
551
  if not isinstance(dataset, sidpy.Dataset):
1049
552
  raise TypeError("we need a sidpy.Dataset")
1050
553
  experiment_dictionary = {}
1051
- if 'metadata' in dataset.original_metadata:
1052
- if 'hardware_source' in dataset.original_metadata['metadata']:
1053
- experiment_dictionary = read_nion_image_info(dataset.original_metadata)
1054
- if 'DM' in dataset.original_metadata:
1055
- experiment_dictionary = read_dm3_info(dataset.original_metadata)
554
+ if dataset.original_metadata.get('metadata', {}).get('hardware_source'):
555
+ experiment_dictionary = read_nion_image_info(dataset.original_metadata)
1056
556
  if 'experiment' not in dataset.metadata:
1057
557
  dataset.metadata['experiment'] = {}
1058
-
1059
558
  dataset.metadata['experiment'].update(experiment_dictionary)
1060
559
 
1061
560
 
1062
- def read_dm3_info(original_metadata):
1063
- """Read essential parameter from original_metadata originating from a dm3 file"""
1064
- if not isinstance(original_metadata, dict):
1065
- raise TypeError('We need a dictionary to read')
1066
-
1067
- if 'DM' not in original_metadata:
1068
- return {}
1069
- if 'ImageTags' not in original_metadata:
1070
- return {}
1071
- exp_dictionary = original_metadata['ImageTags']
1072
- experiment = {}
1073
- if 'EELS' in exp_dictionary:
1074
- if 'Acquisition' in exp_dictionary['EELS']:
1075
- for key, item in exp_dictionary['EELS']['Acquisition'].items():
1076
- if 'Exposure' in key:
1077
- _, units = key.split('(')
1078
- if units[:-1] == 's':
1079
- experiment['single_exposure_time'] = item
1080
- if 'Integration' in key:
1081
- _, units = key.split('(')
1082
- if units[:-1] == 's':
1083
- experiment['exposure_time'] = item
1084
- if 'frames' in key:
1085
- experiment['number_of_frames'] = item
1086
-
1087
- if 'Experimental Conditions' in exp_dictionary['EELS']:
1088
- for key, item in exp_dictionary['EELS']['Experimental Conditions'].items():
1089
- if 'Convergence' in key:
1090
- experiment['convergence_angle'] = item
1091
- if 'Collection' in key:
1092
- # print(item)
1093
- # for val in item.values():
1094
- experiment['collection_angle'] = item
1095
- if 'number_of_frames' not in experiment:
1096
- experiment['number_of_frames'] = 1
1097
- if 'exposure_time' not in experiment:
1098
- if 'single_exposure_time' in experiment:
1099
- experiment['exposure_time'] = experiment['number_of_frames'] * experiment['single_exposure_time']
1100
-
1101
- else:
1102
- if 'Acquisition' in exp_dictionary:
1103
- if 'Parameters' in exp_dictionary['Acquisition']:
1104
- if 'High Level' in exp_dictionary['Acquisition']['Parameters']:
1105
- if 'Exposure (s)' in exp_dictionary['Acquisition']['Parameters']['High Level']:
1106
- experiment['exposure_time'] = exp_dictionary['Acquisition']['Parameters']['High Level'][
1107
- 'Exposure (s)']
1108
-
1109
- if 'Microscope Info' in exp_dictionary:
1110
- if 'Microscope' in exp_dictionary['Microscope Info']:
1111
- experiment['microscope'] = exp_dictionary['Microscope Info']['Microscope']
1112
- if 'Voltage' in exp_dictionary['Microscope Info']:
1113
- experiment['acceleration_voltage'] = exp_dictionary['Microscope Info']['Voltage']
1114
-
1115
- return experiment
1116
-
1117
561
 
1118
562
  def read_nion_image_info(original_metadata):
1119
563
  """Read essential parameter from original_metadata originating from a dm3 file"""
1120
564
  if not isinstance(original_metadata, dict):
1121
565
  raise TypeError('We need a dictionary to read')
1122
- if 'metadata' not in original_metadata:
1123
- return {}
1124
- if 'hardware_source' not in original_metadata['metadata']:
1125
- return {}
1126
- if 'ImageScanned' not in original_metadata['metadata']['hardware_source']:
1127
- return {}
566
+ metadata = original_metadata.get('metadata', {}).get('hardware_source', {})
1128
567
 
1129
- exp_dictionary = original_metadata['metadata']['hardware_source']['ImageScanned']
1130
- experiment = exp_dictionary
1131
- # print(exp_dictionary)
1132
- if 'autostem' in exp_dictionary:
1133
- pass
568
+ return metadata.get('ImageScanned', {})
1134
569
 
1135
570
 
1136
571
  def get_h5_filename(fname):
1137
572
  """Determines file name of hdf5 file for newly converted data file"""
1138
573
 
1139
574
  path, filename = os.path.split(fname)
1140
- basename, extension = os.path.splitext(filename)
575
+ basename, _ = os.path.splitext(filename)
1141
576
  h5_file_name_original = os.path.join(path, basename + '.hf5')
1142
577
  h5_file_name = h5_file_name_original
1143
578
 
@@ -1191,59 +626,6 @@ def h5_tree(input_object):
1191
626
  sidpy.hdf_utils.print_tree(h5_file)
1192
627
 
1193
628
 
1194
- def log_results(h5_group, dataset=None, attributes=None):
1195
- """Log Results in hdf5-file
1196
-
1197
- Saves either a sidpy.Dataset or dictionary in a hdf5-file.
1198
- The group for the result will consist of 'Log_' and a running index.
1199
- That group will be placed in h5_group.
1200
-
1201
- Parameters
1202
- ----------
1203
- h5_group: hd5py.Group, or sidpy.Dataset
1204
- groups where result group are to be stored
1205
- dataset: sidpy.Dataset or None
1206
- sidpy dataset to be stored
1207
- attributes: dict
1208
- dictionary containing results that are not based on a sidpy.Dataset
1209
-
1210
- Returns
1211
- -------
1212
- log_group: hd5py.Group
1213
- group in hdf5 file with results.
1214
-
1215
- """
1216
- if isinstance(h5_group, sidpy.Dataset):
1217
- h5_group = h5_group.h5_dataset
1218
- if not isinstance(h5_group, h5py.Dataset):
1219
- raise TypeError('Use h5_dataset of sidpy.Dataset is not a valid h5py.Dataset')
1220
- h5_group = h5_group.parent.parent
1221
-
1222
- if not isinstance(h5_group, h5py.Group):
1223
- raise TypeError('Need a valid h5py.Group for logging results')
1224
-
1225
- if dataset is None:
1226
- log_group = sidpy.hdf.prov_utils.create_indexed_group(h5_group, 'Log_')
1227
- else:
1228
- log_group = pyNSID.hdf_io.write_results(h5_group, dataset=dataset)
1229
- if hasattr(dataset, 'meta_data'):
1230
- if 'analysis' in dataset.meta_data:
1231
- log_group['analysis'] = dataset.meta_data['analysis']
1232
- if hasattr(dataset, 'structures'):
1233
- for structure in dataset.structures.values():
1234
- h5_add_crystal_structure(log_group, structure)
1235
-
1236
- dataset.h5_dataset = log_group[dataset.title.replace('-', '_')][dataset.title.replace('-', '_')]
1237
- if attributes is not None:
1238
- for key, item in attributes.items():
1239
- if not isinstance(item, dict):
1240
- log_group[key] = attributes[key]
1241
- else:
1242
- log_group.create_group(key)
1243
- sidpy.hdf.hdf_utils.write_simple_attrs(log_group[key], attributes[key])
1244
- return log_group
1245
-
1246
-
1247
629
  def add_dataset_from_file(datasets, filename=None, key_name='Log', single_dataset=True):
1248
630
  """Add dataset to datasets dictionary
1249
631
 
@@ -1261,11 +643,9 @@ def add_dataset_from_file(datasets, filename=None, key_name='Log', single_datase
1261
643
  key_name: str
1262
644
  actual last used name of dictionary key
1263
645
  """
1264
-
1265
646
  datasets2 = open_file(filename=filename)
1266
647
  first_dataset = datasets2[list(datasets2)[0]]
1267
648
  if isinstance(first_dataset, sidpy.Dataset):
1268
-
1269
649
  index = 0
1270
650
  for key in datasets.keys():
1271
651
  if key_name in key:
@@ -1274,12 +654,17 @@ def add_dataset_from_file(datasets, filename=None, key_name='Log', single_datase
1274
654
  if single_dataset:
1275
655
  datasets[key_name+f'_{index:03}'] = first_dataset
1276
656
  else:
1277
- for dataset in datasets2.values():
1278
- datasets[key_name+f'_{index:03}'] = dataset
1279
- index += 1
657
+ for key, dataset in datasets2.items():
658
+ print(key)
659
+ if isinstance(dataset, sidpy.Dataset):
660
+ datasets[key_name+f'_{index:03}'] = dataset
661
+ index += 1
662
+ else:
663
+ print(key)
664
+ datasets[key] = dataset
1280
665
  index -= 1
1281
666
  else:
1282
- return None
667
+ return None
1283
668
 
1284
669
  return f'{key_name}_{index:03}'
1285
670
 
@@ -1287,7 +672,7 @@ def add_dataset_from_file(datasets, filename=None, key_name='Log', single_datase
1287
672
  # ##
1288
673
  # Crystal Structure Read and Write
1289
674
  # ##
1290
- def read_poscar(file_name=None):
675
+ def read_poscar(file_name):
1291
676
  """
1292
677
  Open a POSCAR file from Vasp
1293
678
  If no file name is provided an open file dialog to select a POSCAR file appears
@@ -1303,9 +688,6 @@ def read_poscar(file_name=None):
1303
688
  crystal structure in ase format
1304
689
  """
1305
690
 
1306
- if file_name is None:
1307
- file_name = open_file_dialog_qt('POSCAR (POSCAR*.txt);;All files (*)')
1308
-
1309
691
  # use ase package to read file
1310
692
  base = os.path.basename(file_name)
1311
693
  base_name = os.path.splitext(base)[0]
@@ -1316,7 +698,7 @@ def read_poscar(file_name=None):
1316
698
  return crystal
1317
699
 
1318
700
 
1319
- def read_cif(file_name=None, verbose=False): # open file dialog to select cif file
701
+ def read_cif(file_name, verbose=False): # open file dialog to select cif file
1320
702
  """
1321
703
  Open a cif file
1322
704
  If no file name is provided an open file dialog to select a cif file appears
@@ -1333,10 +715,6 @@ def read_cif(file_name=None, verbose=False): # open file dialog to select cif f
1333
715
  crystal structure in ase format
1334
716
  """
1335
717
 
1336
- if file_name is None:
1337
- file_name = open_file_dialog_qt('cif (*.cif);;All files (*)')
1338
- # use ase package to read file
1339
-
1340
718
  base = os.path.basename(file_name)
1341
719
  base_name = os.path.splitext(base)[0]
1342
720
  crystal = ase.io.read(file_name, format='cif', store_tags=True, parallel=False)
@@ -1351,12 +729,12 @@ def read_cif(file_name=None, verbose=False): # open file dialog to select cif f
1351
729
  return crystal
1352
730
 
1353
731
 
1354
- def h5_add_crystal_structure(h5_file, input_structure, name=None):
732
+ def h5_add_crystal_structure(h5_file, input_structure):
1355
733
  """Write crystal structure to NSID file"""
1356
734
 
1357
735
  if isinstance(input_structure, ase.Atoms):
1358
736
 
1359
- crystal_tags = pyTEMlib.crystal_tools.get_dictionary(input_structure)
737
+ crystal_tags = crystal_tools.get_dictionary(input_structure)
1360
738
  if crystal_tags['metadata'] == {}:
1361
739
  crystal_tags['metadata'] = {'title': input_structure.get_chemical_formula()}
1362
740
  elif isinstance(input_structure, dict):
@@ -1379,7 +757,8 @@ def h5_add_crystal_structure(h5_file, input_structure, name=None):
1379
757
  # ToDo: Save all of info dictionary
1380
758
  if 'metadata' in input_structure:
1381
759
  structure_group.create_group('metadata')
1382
- sidpy.hdf.hdf_utils.write_simple_attrs(structure_group['metadata'], input_structure['metadata'])
760
+ sidpy.hdf.hdf_utils.write_simple_attrs(structure_group['metadata'],
761
+ input_structure['metadata'])
1383
762
 
1384
763
  h5_file.file.flush()
1385
764
  return structure_group
@@ -1397,7 +776,8 @@ def h5_add_to_structure(structure_group, crystal_tags):
1397
776
 
1398
777
  def h5_get_crystal_structure(structure_group):
1399
778
  """Read crystal structure from NSID file
1400
- Any additional information will be read as dictionary into the info attribute of the ase.Atoms object
779
+ Any additional information will be read as dictionary into the
780
+ info attribute of the ase.Atoms object
1401
781
 
1402
782
  Parameters
1403
783
  ----------
@@ -1421,7 +801,7 @@ def h5_get_crystal_structure(structure_group):
1421
801
  for e in elements:
1422
802
  crystal_tags['elements'].append(e.astype(str, copy=False))
1423
803
 
1424
- atoms = pyTEMlib.crystal_tools.atoms_from_dictionary(crystal_tags)
804
+ atoms = crystal_tools.atoms_from_dictionary(crystal_tags)
1425
805
  if 'metadata' in structure_group:
1426
806
  atoms.info = sidpy.hdf.hdf_utils.h5_group_to_dict(structure_group)
1427
807
 
@@ -1429,253 +809,3 @@ def h5_get_crystal_structure(structure_group):
1429
809
  atoms.info = {'experiment': {'zone_axis': structure_group['zone_axis'][()]}}
1430
810
  # ToDo: Read all of info dictionary
1431
811
  return atoms
1432
-
1433
- import collections
1434
- def etree_to_dict(element):
1435
- """Recursively converts an ElementTree object into a nested dictionary."""
1436
- d = {element.tag: {} if element.attrib else None}
1437
- children = list(element)
1438
- if children:
1439
- dd = collections.defaultdict(list)
1440
- for dc in map(etree_to_dict, children):
1441
- for k, v in dc.items():
1442
- dd[k].append(v)
1443
- d = {element.tag: {k: v[0] if len(v) == 1 else v for k, v in dd.items()}}
1444
- if element.attrib:
1445
- d[element.tag].update(('@' + k, v) for k, v in element.attrib.items())
1446
- if element.text:
1447
- text = element.text.strip()
1448
- if children or element.attrib:
1449
- if text:
1450
- d[element.tag]['#text'] = text
1451
- else:
1452
- d[element.tag] = text
1453
- return d
1454
-
1455
- def read_adorned_metadata(image):
1456
- xml_str = image.metadata.metadata_as_xml
1457
- root = ET.fromstring(xml_str)
1458
- metadata_dict = etree_to_dict(root)
1459
- detector = 'detector'
1460
-
1461
- if 'Detectors' in metadata_dict['Metadata']:
1462
- if 'ScanningDetector' in metadata_dict['Metadata']['Detectors']:
1463
- detector = metadata_dict['Metadata']['Detectors']['ScanningDetector']['DetectorName']
1464
- elif 'ImagingDetector' in metadata_dict['Metadata']['Detectors']:
1465
- detector = metadata_dict['Metadata']['Detectors']['ImagingDetector']['DetectorName']
1466
- segment = ''
1467
- if 'CustomPropertyGroup' in metadata_dict['Metadata']:
1468
- if 'CustomProperties' in metadata_dict['Metadata']['CustomPropertyGroup']:
1469
- for list_item in metadata_dict['Metadata']['CustomPropertyGroup']['CustomProperties']:
1470
-
1471
- if isinstance(list_item, dict):
1472
- for key in list_item:
1473
- for item in list_item[key]:
1474
- if '@name' in item:
1475
- if item['@name']== 'DetectorCommercialName':
1476
- detector = item['@value']
1477
- if item['@name']== 'StemSegment':
1478
- segment = '_'+item['@value']
1479
- return detector+segment, metadata_dict['Metadata']
1480
-
1481
-
1482
- def get_metadata_from_adorned(ds):
1483
- ds.metadata['experiment']= {}
1484
- if 'Optics' in ds.original_metadata:
1485
- if 'LastMeasuredScreenCurrent' in ds.original_metadata['Optics']:
1486
- ds.metadata['experiment']['current'] = float(ds.original_metadata['Optics']['LastMeasuredScreenCurrent'])
1487
- if 'ConvergenceAngle' in ds.original_metadata['Optics']:
1488
- ds.metadata['experiment']['convergence_angle'] = float(ds.original_metadata['Optics']['ConvergenceAngle'])
1489
- if 'AccelerationVoltage' in ds.original_metadata['Optics']:
1490
- ds.metadata['experiment']['acceleration_voltage'] = float(ds.original_metadata['Optics']['AccelerationVoltage'])
1491
- if 'SpotIndex' in ds.original_metadata['Optics']:
1492
- ds.metadata['experiment']['spot_size'] = ds.original_metadata['Optics']['SpotIndex']
1493
- if' StagesSettings' in ds.original_metadata:
1494
- if 'StagePosition' in ds.original_metadata['StagesSettings']:
1495
- ds.metadata['experiment']['stage_position'] = ds.original_metadata['StagesSettings']['StagePosition']
1496
- if 'Detectors' in ds.original_metadata:
1497
- if 'ScanningDetector' in ds.original_metadata['Detectors']:
1498
- ds.metadata['experiment']['detector'] = ds.original_metadata['Detectors']['ScanningDetector']['DetectorName']
1499
- elif 'ImagingDetector' in ds.original_metadata['Detectors']:
1500
- ds.metadata['experiment']['detector'] = ds.original_metadata['Detectors']['ImagingDetector']['DetectorName']
1501
- ds.metadata['experiment']['exposure_time'] = ds.original_metadata['Detectors']['ImagingDetector']['ExposureTime']
1502
-
1503
-
1504
- def adorned_to_sidpy(images):
1505
- """
1506
- Convert a list of adorned images to a dictionary of Sidpy datasets.
1507
- Each dataset is created from the image data and adorned metadata.
1508
- The datasets are stored in a dictionary with keys 'Channel_000', 'Channel_001', etc.
1509
- The dimensions of the datasets are set based on the image data shape and pixel sizes.
1510
- The original metadata is also stored in the dataset.
1511
- Args:
1512
- images (list or object): A list of adorned images or a single adorned image.
1513
- Returns:
1514
- dict: A dictionary of Sidpy datasets, where each dataset corresponds to an image.
1515
- """
1516
-
1517
- data_sets = {}
1518
- if not isinstance(images, list):
1519
- images = [images]
1520
- for index, image in enumerate(images):
1521
- name, original_metadata = read_adorned_metadata(image)
1522
- data_sets[f'Channel_{index:03}'] = sidpy.Dataset.from_array(image.data.T, title=name)
1523
- ds = data_sets[f'Channel_{index:03}']
1524
-
1525
-
1526
- ds.original_metadata = original_metadata
1527
-
1528
- pixel_size_x_m = float(ds.original_metadata['BinaryResult']['PixelSize']['X']['#text'])
1529
- pixel_size_y_m = float(ds.original_metadata['BinaryResult']['PixelSize']['Y']['#text'])
1530
- pixel_size_x_nm = pixel_size_x_m * 1e9
1531
- pixel_size_y_nm = pixel_size_y_m * 1e9
1532
- if image.data.ndim == 3:
1533
- ds.data_type = 'image_stack'
1534
- ds.set_dimension(0, sidpy.Dimension(np.arange(image.data.shape[0]),
1535
- name='frame', units='frame', quantity='Length', dimension_type='temporal'))
1536
- ds.set_dimension(1, sidpy.Dimension(np.arange(image.data.shape[1]) * pixel_size_y_nm,
1537
- name='y', units='nm', quantity='Length', dimension_type='spatial'))
1538
- ds.set_dimension(2, sidpy.Dimension(np.arange(image.data.shape[2]) * pixel_size_x_nm,
1539
- name='x', units='nm', quantity='Length', dimension_type='spatial'))
1540
- else:
1541
- ds.data_type = 'image'
1542
- ds.set_dimension(0, sidpy.Dimension(np.arange(image.data.shape[0]) * pixel_size_y_nm,
1543
- name='y', units='nm', quantity='Length', dimension_type='spatial'))
1544
- ds.set_dimension(1, sidpy.Dimension(np.arange(image.data.shape[1]) * pixel_size_x_nm,
1545
- name='x', units='nm', quantity='Length', dimension_type='spatial'))
1546
-
1547
- get_metadata_from_adorned(ds)
1548
- return data_sets
1549
-
1550
-
1551
- ###############################################
1552
- # Support old pyTEM file format
1553
- ###############################################
1554
-
1555
- def read_old_h5group(current_channel):
1556
- """Make a sidpy.Dataset from pyUSID style hdf5 group
1557
-
1558
- Parameters
1559
- ----------
1560
- current_channel: h5_group
1561
-
1562
- Returns
1563
- -------
1564
- sidpy.Dataset
1565
- """
1566
-
1567
- dim_dir = []
1568
- if 'nDim_Data' in current_channel:
1569
- h5_dataset = current_channel['nDim_Data']
1570
- reader = pyNSID.NSIDReader(h5_dataset.file.filename)
1571
- dataset = reader.read(h5_dataset)
1572
- dataset.h5_file = current_channel.file
1573
- return dataset
1574
- elif 'Raw_Data' in current_channel:
1575
- if 'image_stack' in current_channel:
1576
- sid_dataset = sidpy.Dataset.from_array(np.swapaxes(current_channel['image_stack'][()], 2, 0))
1577
- dim_dir = ['SPATIAL', 'SPATIAL', 'TEMPORAL']
1578
- elif 'data' in current_channel:
1579
- sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])
1580
- dim_dir = ['SPATIAL', 'SPATIAL']
1581
- else:
1582
- size_x = int(current_channel['spatial_size_x'][()])
1583
- size_y = int(current_channel['spatial_size_y'][()])
1584
- if 'spectral_size_x' in current_channel:
1585
- size_s = int(current_channel['spectral_size_x'][()])
1586
- else:
1587
- size_s = 0
1588
- data = np.reshape(current_channel['Raw_Data'][()], (size_x, size_y, size_s))
1589
- sid_dataset = sidpy.Dataset.from_array(data)
1590
- if size_x > 1:
1591
- dim_dir.append('SPATIAL')
1592
- if size_y > 1:
1593
- dim_dir.append('SPATIAL')
1594
- if size_s > 1:
1595
- dim_dir.append('SPECTRAL')
1596
- sid_dataset.h5_dataset = current_channel['Raw_Data']
1597
-
1598
- elif 'data' in current_channel:
1599
- sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])
1600
- dim_dir = ['SPATIAL', 'SPATIAL']
1601
- sid_dataset.h5_dataset = current_channel['data']
1602
- else:
1603
- return
1604
-
1605
- if 'SPATIAL' in dim_dir:
1606
- if 'SPECTRAL' in dim_dir:
1607
- sid_dataset.data_type = sidpy.DataType.SPECTRAL_IMAGE
1608
- elif 'TEMPORAL' in dim_dir:
1609
- sid_dataset.data_type = sidpy.DataType.IMAGE_STACK
1610
- else:
1611
- sid_dataset.data_type = sidpy.DataType.IMAGE
1612
- else:
1613
- sid_dataset.data_type = sidpy.DataType.SPECTRUM
1614
-
1615
- sid_dataset.quantity = 'intensity'
1616
- sid_dataset.units = 'counts'
1617
- if 'analysis' in current_channel:
1618
- sid_dataset.source = current_channel['analysis'][()]
1619
-
1620
- set_dimensions(sid_dataset, current_channel)
1621
-
1622
- return sid_dataset
1623
-
1624
-
1625
- def set_dimensions(dset, current_channel):
1626
- """Attaches correct dimension from old pyTEMlib style.
1627
-
1628
- Parameters
1629
- ----------
1630
- dset: sidpy.Dataset
1631
- current_channel: hdf5.Group
1632
- """
1633
- dim = 0
1634
- if dset.data_type == sidpy.DataType.IMAGE_STACK:
1635
- dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]), name='frame',
1636
- units='frame', quantity='stack',
1637
- dimension_type='TEMPORAL'))
1638
- dim += 1
1639
- if 'IMAGE' in dset.data_type:
1640
-
1641
- if 'spatial_scale_x' in current_channel:
1642
- scale_x = current_channel['spatial_scale_x'][()]
1643
- else:
1644
- scale_x = 1
1645
- if 'spatial_units' in current_channel:
1646
- units_x = current_channel['spatial_units'][()]
1647
- if len(units_x) < 2:
1648
- units_x = 'pixel'
1649
- else:
1650
- units_x = 'generic'
1651
- if 'spatial_scale_y' in current_channel:
1652
- scale_y = current_channel['spatial_scale_y'][()]
1653
- else:
1654
- scale_y = 0
1655
- dset.set_dimension(dim, sidpy.Dimension('x', np.arange(dset.shape[dim])*scale_x,
1656
- units=units_x, quantity='Length',
1657
- dimension_type='SPATIAL'))
1658
- dim += 1
1659
- dset.set_dimension(dim, sidpy.Dimension('y', np.arange(dset.shape[dim])*scale_y,
1660
- units=units_x, quantity='Length',
1661
- dimension_type='SPATIAL'))
1662
- dim += 1
1663
- if dset.data_type in [sidpy.DataType.SPECTRUM, sidpy.DataType.SPECTRAL_IMAGE]:
1664
- if 'spectral_scale_x' in current_channel:
1665
- scale_s = current_channel['spectral_scale_x'][()]
1666
- else:
1667
- scale_s = 1.0
1668
- if 'spectral_units_x' in current_channel:
1669
- units_s = current_channel['spectral_units_x']
1670
- else:
1671
- units_s = 'eV'
1672
-
1673
- if 'spectral_offset_x' in current_channel:
1674
- offset = current_channel['spectral_offset_x']
1675
- else:
1676
- offset = 0.0
1677
- dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]) * scale_s + offset,
1678
- name='energy',
1679
- units=units_s,
1680
- quantity='energy_loss',
1681
- dimension_type='SPECTRAL'))