pyTEMlib 0.2020.11.1__py3-none-any.whl → 0.2024.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

Files changed (60) hide show
  1. pyTEMlib/__init__.py +11 -11
  2. pyTEMlib/animation.py +631 -0
  3. pyTEMlib/atom_tools.py +240 -245
  4. pyTEMlib/config_dir.py +57 -33
  5. pyTEMlib/core_loss_widget.py +658 -0
  6. pyTEMlib/crystal_tools.py +1255 -0
  7. pyTEMlib/diffraction_plot.py +756 -0
  8. pyTEMlib/dynamic_scattering.py +293 -0
  9. pyTEMlib/eds_tools.py +609 -0
  10. pyTEMlib/eels_dialog.py +749 -491
  11. pyTEMlib/{interactive_eels.py → eels_dialog_utilities.py} +1199 -1177
  12. pyTEMlib/eels_tools.py +2031 -1698
  13. pyTEMlib/file_tools.py +1276 -560
  14. pyTEMlib/file_tools_qt.py +193 -0
  15. pyTEMlib/graph_tools.py +1166 -450
  16. pyTEMlib/graph_viz.py +449 -0
  17. pyTEMlib/image_dialog.py +158 -0
  18. pyTEMlib/image_dlg.py +146 -232
  19. pyTEMlib/image_tools.py +1399 -1028
  20. pyTEMlib/info_widget.py +933 -0
  21. pyTEMlib/interactive_image.py +1 -226
  22. pyTEMlib/kinematic_scattering.py +1196 -0
  23. pyTEMlib/low_loss_widget.py +176 -0
  24. pyTEMlib/microscope.py +61 -81
  25. pyTEMlib/peak_dialog.py +1047 -410
  26. pyTEMlib/peak_dlg.py +286 -242
  27. pyTEMlib/probe_tools.py +653 -207
  28. pyTEMlib/sidpy_tools.py +153 -136
  29. pyTEMlib/simulation_tools.py +104 -87
  30. pyTEMlib/version.py +6 -3
  31. pyTEMlib/xrpa_x_sections.py +20972 -0
  32. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/LICENSE +21 -21
  33. pyTEMlib-0.2024.9.0.dist-info/METADATA +92 -0
  34. pyTEMlib-0.2024.9.0.dist-info/RECORD +37 -0
  35. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/WHEEL +5 -5
  36. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/entry_points.txt +0 -1
  37. pyTEMlib/KinsCat.py +0 -2758
  38. pyTEMlib/__version__.py +0 -2
  39. pyTEMlib/data/TEMlibrc +0 -68
  40. pyTEMlib/data/edges_db.csv +0 -189
  41. pyTEMlib/data/edges_db.pkl +0 -0
  42. pyTEMlib/data/fparam.txt +0 -103
  43. pyTEMlib/data/microscopes.csv +0 -7
  44. pyTEMlib/data/microscopes.xml +0 -167
  45. pyTEMlib/data/path.txt +0 -1
  46. pyTEMlib/defaults_parser.py +0 -90
  47. pyTEMlib/dm3_reader.py +0 -613
  48. pyTEMlib/edges_db.py +0 -76
  49. pyTEMlib/eels_dlg.py +0 -224
  50. pyTEMlib/hdf_utils.py +0 -483
  51. pyTEMlib/image_tools1.py +0 -2194
  52. pyTEMlib/info_dialog.py +0 -237
  53. pyTEMlib/info_dlg.py +0 -202
  54. pyTEMlib/nion_reader.py +0 -297
  55. pyTEMlib/nsi_reader.py +0 -170
  56. pyTEMlib/structure_tools.py +0 -316
  57. pyTEMlib/test.py +0 -2072
  58. pyTEMlib-0.2020.11.1.dist-info/METADATA +0 -20
  59. pyTEMlib-0.2020.11.1.dist-info/RECORD +0 -45
  60. {pyTEMlib-0.2020.11.1.dist-info → pyTEMlib-0.2024.9.0.dist-info}/top_level.txt +0 -0
pyTEMlib/file_tools.py CHANGED
@@ -1,560 +1,1276 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- File Tools for pyTEMlib
4
- """
5
- ##################################
6
- #
7
- # 2018 01 31 Included Nion Swift files to be opened
8
- # major revision 2020 09 to include sidpy and pyNSID data formats
9
- #
10
- ##################################
11
-
12
- import numpy as np
13
- import h5py
14
- import os
15
-
16
- # Open/Save File dialog
17
- try:
18
- from PyQt5 import QtGui, QtWidgets, QtCore
19
- QT_available = True
20
- except ImportError:
21
- QT_available = False
22
-
23
- # =============================================================
24
- # Include sidpy and other pyTEMlib Libraries #
25
- # =============================================================
26
- from .config_dir import config_path
27
-
28
- from .nsi_reader import NSIDReader
29
- from .dm3_reader import DM3Reader
30
- from .nion_reader import NionReader
31
- import pyNSID
32
-
33
- import ipywidgets as widgets
34
- from IPython.display import display
35
-
36
- import pyTEMlib.sidpy_tools
37
- import sys
38
- sys.path.insert(0, "../../sidpy/")
39
- import sidpy
40
-
41
- import sidpy.hdf.prov_utils as hdf_utils
42
-
43
- ChooseDataset = pyTEMlib.sidpy_tools.ChooseDataset
44
- Dimension = sidpy.Dimension
45
- nest_dict = sidpy.base.dict_utils.nest_dict
46
-
47
- get_slope = sidpy.base.num_utils.get_slope
48
- __version__ = '10.30.2020'
49
-
50
- # TODO: new sidpy-version, uncomment and delete function below.
51
- # flatten_dict = sidpy.dict_utils.flatten_dict
52
-
53
-
54
- def flatten_dict(d, parent_key='', sep='-'):
55
- """
56
- should be in sidpy
57
- """
58
- items = []
59
- for k, v in d.items():
60
- if sep in k:
61
- k = k.replace(sep, '_')
62
- new_key = parent_key + sep + k if parent_key else k
63
- if isinstance(v, dict):
64
- items.extend(flatten_dict(v, new_key, sep=sep).items())
65
- elif isinstance(v, list):
66
- for i in range(len(v)):
67
- if isinstance(v[i], dict):
68
- for kk in v[i]:
69
- items.append(('dim-'+kk+'-'+str(i), v[i][kk]))
70
- else:
71
- if type(v) != bytes:
72
- items.append((new_key, v))
73
- else:
74
- if type(v) != bytes:
75
- items.append((new_key, v))
76
- return dict(items)
77
-
78
-
79
- #
80
- # General Open and Save Methods
81
- #
82
-
83
-
84
- def get_qt_app():
85
- """
86
- will start QT Application if not running yet
87
-
88
- :returns: QApplication
89
-
90
- """
91
-
92
- # start qt event loop
93
- _instance = QtWidgets.QApplication.instance()
94
- if not _instance:
95
- # print('not_instance')
96
- _instance = QtWidgets.QApplication([])
97
-
98
- return _instance
99
-
100
-
101
- def get_last_path():
102
- """
103
- Get last path used in pyTEMlib
104
- """
105
- try:
106
- fp = open(config_path + '\\path.txt', 'r')
107
- path = fp.read()
108
- fp.close()
109
- except IOError:
110
- path = ''
111
-
112
- if len(path) < 2:
113
- path = '.'
114
- return path
115
-
116
-
117
- def save_path(filename):
118
- """
119
- save last path used in pyTEMlin
120
- """
121
- if len(filename) > 1:
122
- fp = open(config_path + '\\path.txt', 'w')
123
- path, fname = os.path.split(filename)
124
- fp.write(path)
125
- fp.close()
126
- else:
127
- path = '.'
128
- return path
129
-
130
-
131
- def set_directory():
132
- """
133
- set directory to last used path in pyTEMlib
134
- """
135
- path = get_last_path()
136
-
137
- try:
138
- get_qt_app()
139
- except BaseException:
140
- pass
141
-
142
- options = QtWidgets.QFileDialog.Options()
143
- options |= QtWidgets.QFileDialog.ShowDirsOnly
144
-
145
- fname = str(QtWidgets.QFileDialog.getExistingDirectory(None, "Select Directory", path, options=options))
146
-
147
- path = save_path(fname)
148
-
149
- return path
150
-
151
-
152
- def savefile_dialog(initial_file='*.hf5', file_types=None):
153
- """
154
- Opens a save dialog in QT and returns an "*.hf5" file.
155
- New now with initial file
156
- """
157
- # Check whether QT is available
158
- if not QT_available:
159
- print('No QT dialog')
160
- return None
161
- else:
162
- if file_types is None:
163
- file_types = "All files (*)"
164
- try:
165
- get_qt_app()
166
- except BaseException:
167
- pass
168
-
169
- # Determine last path used
170
- path = get_last_path()
171
-
172
- filename = sidpy.io.interface_utils.savefile_dialog(initial_file, file_types=file_types, file_path=path)
173
- save_path(filename)
174
-
175
- if len(filename) > 3:
176
- h5_file_name = get_h5_filename(filename)
177
- return h5_file_name
178
- else:
179
- return ''
180
-
181
-
182
- def openfile_dialog(file_types=None): # , multiple_files=False):
183
- """
184
- Opens a File dialog which is used in open_file() function
185
-
186
- This function uses tkinter or pyQt5.
187
- The app of the Gui has to be running for QT so Tkinter is a safer bet.
188
- In jupyter notebooks use %gui Qt early in the notebook.
189
-
190
-
191
- The file looks first for a path.txt file for the last directory you used.
192
-
193
- Parameters
194
- file_types : string of the file type filter
195
- Returns
196
- filename : full filename with absolute path and extension as a string
197
- Examples
198
- >> import file_tools as ft
199
- >>
200
- >> filename = ft.openfile_dialog()
201
- >>
202
- >> print(filename)
203
- """
204
- # determine file types by extension
205
- if file_types is None:
206
- file_types = 'TEM files (*.dm3 *.qf3 *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \
207
- 'DM files (*.dm3);;Nion files (*.ndata *.h5);;All files (*)'
208
- elif file_types == 'pyNSID':
209
- file_types = 'pyNSID files (*.hf5);;TEM files (*.dm3 *.qf3 *.ndata *.h5 *.hf5);;QF files ( *.qf3);;' \
210
- 'DM files (*.dm3);;Nion files (*.ndata *.h5);;All files (*)'
211
-
212
- # file_types = [("TEM files",["*.dm*","*.hf*","*.ndata" ]),("pyUSID files","*.hf5"),("DM files","*.dm*"),
213
- # ("Nion files",["*.h5","*.ndata"]),("all files","*.*")]
214
- # Determine last path used
215
-
216
- path = get_last_path()
217
- _ = get_qt_app()
218
-
219
- filename = sidpy.io.interface_utils.openfile_dialog(file_types=file_types, file_path=path)
220
- #
221
- save_path(filename)
222
-
223
- return filename
224
-
225
-
226
- def read_h5_dataset(h5_group):
227
- """
228
- Reads a h5_dataset
229
- """
230
- if isinstance(h5_group, h5py.Group):
231
- reader = NSIDReader(h5_group)
232
- dataset = reader.read()[-1]
233
- elif isinstance(h5_group, h5py.Dataset):
234
- reader = NSIDReader(h5_group)
235
- dataset = reader.read_h5py_dataset(h5_group)
236
- else:
237
- raise ValueError( 'Need a h5py Group to read a dataset')
238
-
239
- return dataset
240
-
241
- def open_file(filename=None, save_file=False, h5_group=None):
242
- """
243
- Opens a file if the extension is .hf5, .dm3 or .dm4
244
-
245
- If no filename is provided the qt open_file windows opens
246
-
247
- Everything will be stored in a NSID style hf5 file.
248
-
249
- Subroutines used:
250
- - NSIDReader
251
- - nsid.write_
252
- - get_main_tags
253
- - get_additional tags
254
-
255
- """
256
- get_qt_app()
257
- if filename is None:
258
- filename = openfile_dialog()
259
- if filename == '':
260
- return
261
- path, file_name = os.path.split(filename)
262
- basename, extension = os.path.splitext(file_name)
263
-
264
- if extension == '.hf5':
265
- h5_file = h5py.File(filename, mode='a')
266
-
267
- h5_group = get_start_channel(h5_file)
268
- print()
269
- if 'nDim_Data' in h5_group:
270
- h5_dataset = h5_group['nDim_Data']
271
-
272
- h5_dataset.attrs['title'] = basename
273
- reader = NSIDReader(h5_dataset)
274
- dataset = reader.read_h5py_dataset(h5_dataset)
275
- dataset.h5_file = h5_file
276
- elif 'Raw_Data' in h5_group:
277
- dataset = read_old_h5group(h5_group)
278
- dataset.h5_dataset = h5_group['Raw_Data']
279
- else:
280
- reader = NSIDReader(h5_file['Measurement_000/Channel_000'])
281
- dataset = reader.read()[-1]
282
- dataset.h5_file = h5_file
283
- return dataset
284
-
285
- elif extension in ['.dm3', '.dm4', '.ndata', '.h5']:
286
-
287
- # tags = open_file(filename)
288
- if extension in ['.dm3', '.dm4']:
289
- reader = DM3Reader(filename)
290
- elif extension in ['.ndata', '.h5']:
291
- reader = NionReader(filename)
292
- else:
293
- IOError('problem')
294
- path, file_name = os.path.split(filename)
295
- basename, _ = os.path.splitext(file_name)
296
- dset = reader.read()
297
- dset.title = basename.strip().replace('-', '_')
298
- dset.filename = basename.strip().replace('-', '_')
299
- dset.original_metadata = flatten_dict(dset.original_metadata)
300
-
301
- h5_filename = get_h5_filename(filename)
302
- h5_file = h5py.File(h5_filename, mode='a')
303
-
304
- if 'Measurement_000' in h5_file:
305
- print('could not write dataset to file, try saving it with ft.save()')
306
- else:
307
- if not isinstance(h5_group, h5py.Group):
308
- h5_group = h5_file.create_group('Measurement_000/Channel_000')
309
- dset.axes = dset._axes
310
- dset.attrs = {}
311
- h5_dataset = pyNSID.hdf_io.write_nsid_dataset(dset, h5_group)
312
- dset.original_metadata = nest_dict(dset.original_metadata)
313
-
314
- dset.h5_dataset = h5_dataset
315
- return dset
316
- else:
317
- print('file type not handled yet.')
318
- return
319
-
320
-
321
- def get_h5_filename(fname):
322
- """
323
- Determine file name for nsid file
324
- """
325
- path, filename = os.path.split(fname)
326
- basename, extension = os.path.splitext(filename)
327
- h5_file_name_original = os.path.join(path, basename + '.hf5')
328
- h5_file_name = h5_file_name_original
329
-
330
- if os.path.exists(os.path.abspath(h5_file_name_original)):
331
- count = 1
332
- h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'
333
- while os.path.exists(os.path.abspath(h5_file_name)):
334
- count += 1
335
- h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'
336
-
337
- if h5_file_name != h5_file_name_original:
338
- path, filename = os.path.split(h5_file_name)
339
- print('Cannot overwrite file. Using: ', filename)
340
- return str(h5_file_name)
341
-
342
-
343
- def get_start_channel(h5_file):
344
- return get_main_channel(h5_file)
345
-
346
-
347
- def get_main_channel(h5_file):
348
- """
349
- get name of directory in hdf5 file system
350
- """
351
- current_channel = None
352
- if 'Measurement_000' in h5_file:
353
- if 'Measurement_000/Channel_000' in h5_file:
354
- current_channel = h5_file['Measurement_000/Channel_000']
355
- return current_channel
356
-
357
-
358
- def h5_tree(input):
359
- """
360
- Just a wrapper for the sidpy function print_tree,
361
-
362
- so that sidpy does not have to be loaded in notebook
363
- """
364
- if isinstance(input, sidpy.Dataset):
365
- if not isinstance(input.h5_dataset, h5py.Dataset):
366
- raise ValueError('sidpy dataset does not have an associated h5py dataset')
367
- h5_file = input.h5_dataset.file
368
- elif isinstance(input, h5py.Dataset):
369
- h5_file = input.file
370
- elif isinstance(input, (h5py.Group, h5py.File)):
371
- h5_file = input
372
- else:
373
- raise TypeError('should be a h5py.object or sidpy Dataset')
374
- sidpy.hdf_utils.print_tree(h5_file)
375
-
376
-
377
- def log_results(h5_group, dataset=None, attributes=None):
378
- """
379
- Log results in nsid style file
380
- """
381
- if dataset is None:
382
- log_group = hdf_utils.create_indexed_group(h5_group, 'Log_')
383
- else:
384
- log_group = pyNSID.hdf_io.write_results(h5_group, dataset=dataset)
385
- if 'analysis' in dataset.metadata:
386
- if 'analysis' not in log_group:
387
- log_group['analysis'] = dataset.metadata['analysis']
388
- else:
389
- log_group['analysis'][()] = dataset.metadata['analysis']
390
-
391
- dataset.h5_dataset = log_group[dataset.title.replace('-', '_')]
392
- if attributes is not None:
393
- for key, item in flatten_dict(attributes).items():
394
- if key not in log_group:
395
- log_group[key] = item
396
-
397
- return log_group
398
-
399
-
400
- ###
401
- # Crystal Structure Read and Write
402
- ###
403
- def h5_add_crystal_structure(h5_file, crystal_tags):
404
- structure_group = hdf_utils.create_indexed_group(h5_file, 'Structure')
405
-
406
- structure_group['unit_cell'] = np.squeeze(crystal_tags['unit_cell'])
407
- structure_group['relative_positions'] = np.squeeze(crystal_tags['base'])
408
- structure_group['title'] = str(crystal_tags['crystal_name'])
409
- structure_group['_' + crystal_tags['crystal_name']] = str(crystal_tags['crystal_name'])
410
- structure_group['elements'] = np.array(crystal_tags['elements'], dtype='S')
411
- if 'zone_axis' in structure_group:
412
- structure_group['zone_axis'] = np.array(crystal_tags['zone_axis'], dtype=float)
413
- else:
414
- structure_group['zone_axis'] = np.array([1., 0., 0.], dtype=float)
415
- h5_file.flush()
416
- return structure_group
417
-
418
-
419
- def h5_get_crystal_structure(structure_group):
420
- crystal_tags = {'unit_cell': structure_group['unit_cell'][()], 'base': structure_group['relative_positions'][()],
421
- 'crystal_name': structure_group['title'][()]}
422
- if '2D' in structure_group:
423
- crystal_tags['2D'] = structure_group['2D'][()]
424
- elements = structure_group['elements'][()]
425
- crystal_tags['elements'] = []
426
- for e in elements:
427
- crystal_tags['elements'].append(e.astype(str, copy=False))
428
-
429
- if 'zone_axis' in structure_group:
430
- crystal_tags['zone_axis'] = structure_group['zone_axis'][()]
431
- return crystal_tags
432
-
433
-
434
- # ##############################################
435
- # Support old pyTEM file format
436
- # ##############################################
437
-
438
- def read_old_h5group(current_channel):
439
- """
440
- make a sidpy dataset from pyUSID style hdf5 group
441
- input
442
- current_channel: h5_group
443
- return
444
- sidpy Dataset
445
- """
446
- dim_dir = []
447
- if 'nDim_Data' in current_channel:
448
- h5_dataset = current_channel['nDim_Data']
449
- reader = NSIDReader(h5_dataset)
450
- dataset = reader.read_h5py_dataset(h5_dataset)
451
- dataset.h5_file = current_channel.file
452
- return dataset
453
- elif 'Raw_Data' in current_channel:
454
- if 'image_stack' in current_channel:
455
- sid_dataset = sidpy.Dataset.from_array(np.swapaxes(current_channel['image_stack'][()], 2, 0))
456
- dim_dir = ['SPATIAL', 'SPATIAL', 'TEMPORAL']
457
- elif 'data' in current_channel:
458
- sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])
459
- dim_dir = ['SPATIAL', 'SPATIAL']
460
- else:
461
- size_x = int(current_channel['spatial_size_x'][()])
462
- size_y = int(current_channel['spatial_size_y'][()])
463
- if 'spectral_size_x' in current_channel:
464
- size_s = int(current_channel['spectral_size_x'][()])
465
- else:
466
- size_s = 0
467
- data = np.reshape(current_channel['Raw_Data'][()], (size_x, size_y, size_s))
468
- sid_dataset = sidpy.Dataset.from_array(data)
469
- if size_x > 1:
470
- dim_dir.append('SPATIAL')
471
- if size_y > 1:
472
- dim_dir.append('SPATIAL')
473
- if size_s > 1:
474
- dim_dir.append('SPECTRAL')
475
- sid_dataset.h5_dataset = current_channel['Raw_Data']
476
-
477
- elif 'data' in current_channel:
478
- sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])
479
- dim_dir = ['SPATIAL', 'SPATIAL']
480
- sid_dataset.h5_dataset = current_channel['data']
481
- else:
482
- return
483
-
484
- if 'SPATIAL' in dim_dir:
485
- if 'SPECTRAL' in dim_dir:
486
- sid_dataset.data_type = sidpy.DataTypes.SPECTRAL_IMAGE
487
- elif 'TEMPORAL' in dim_dir:
488
- sid_dataset.data_type = sidpy.DataTypes.IMAGE_STACK
489
- else:
490
- sid_dataset.data_type = sidpy.DataTypes.IMAGE
491
- else:
492
- sid_dataset.data_type = sidpy.DataTypes.SPECTRUM
493
-
494
- sid_dataset.quantity = 'intensity'
495
- sid_dataset.units = 'counts'
496
- if 'analysis' in current_channel:
497
- sid_dataset.source = current_channel['analysis'][()]
498
-
499
- set_dimensions(sid_dataset, current_channel)
500
-
501
- return sid_dataset
502
-
503
-
504
- def set_dimensions(dset, current_channel):
505
- """
506
- Attaches correct dimension from old pyTEMlib style.
507
-
508
- Input:
509
- dset: sidpy Dataset
510
- current_channel: hdf5 group
511
- """
512
- dim = 0
513
- if dset.data_type == sidpy.DataTypes.IMAGE_STACK:
514
- dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]), name='frame',
515
- units='frame', quantity='stack',
516
- dimension_type='TEMPORAL'))
517
- dim += 1
518
- if 'IMAGE' in dset.data_type:
519
-
520
- if 'spatial_scale_x' in current_channel:
521
- scale_x = current_channel['spatial_scale_x'][()]
522
- else:
523
- scale_x = 1
524
- if 'spatial_units' in current_channel:
525
- units_x = current_channel['spatial_units'][()]
526
- if len(units_x) < 2:
527
- units_x = 'pixel'
528
- else:
529
- units_x = 'generic'
530
- if 'spatial_scale_y' in current_channel:
531
- scale_y = current_channel['spatial_scale_y'][()]
532
- else:
533
- scale_y = 0
534
- dset.set_dimension(dim, sidpy.Dimension('x', np.arange(dset.shape[dim])*scale_x,
535
- units=units_x, quantity='Length',
536
- dimension_type='SPATIAL'))
537
- dim += 1
538
- dset.set_dimension(dim, sidpy.Dimension('y', np.arange(dset.shape[dim])*scale_y,
539
- units=units_x, quantity='Length',
540
- dimension_type='SPATIAL'))
541
- dim += 1
542
- if dset.data_type in [sidpy.DataTypes.SPECTRUM, sidpy.DataTypes.SPECTRAL_IMAGE]:
543
- if 'spectral_scale_x' in current_channel:
544
- scale_s = current_channel['spectral_scale_x'][()]
545
- else:
546
- scale_s = 1.0
547
- if 'spectral_units_x' in current_channel:
548
- units_s = current_channel['spectral_units_x']
549
- else:
550
- units_s = 'eV'
551
-
552
- if 'spectral_offset_x' in current_channel:
553
- offset = current_channel['spectral_offset_x']
554
- else:
555
- offset = 0.0
556
- dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]) * scale_s + offset,
557
- name='energy',
558
- units=units_s,
559
- quantity='energy_loss',
560
- dimension_type='SPECTRAL'))
1
+ """file_tools: All tools to load and save data
2
+
3
+ ##################################
4
+
5
+ 2018 01 31 Included Nion Swift files to be opened
6
+ major revision 2020 09 to include sidpy and pyNSID data formats
7
+ 2022 change to ase format for structures: this changed the default unit of length to Angstrom!!!
8
+
9
+ ##################################
10
+ """
11
+
12
+ import numpy as np
13
+ import h5py
14
+ import os
15
+ import pickle
16
+
17
+ # For structure files of various flavor for instance POSCAR and other theory packages
18
+ import ase.io
19
+
20
+ # =============================================
21
+ # Include pycroscopy libraries #
22
+ # =============================================
23
+ import SciFiReaders
24
+ import pyNSID
25
+ import sidpy
26
+ import ipywidgets as widgets
27
+ from IPython.display import display
28
+
29
+ # =============================================
30
+ # Include pyTEMlib libraries #
31
+ # =============================================
32
+ import pyTEMlib.crystal_tools
33
+ from pyTEMlib.config_dir import config_path
34
+ from pyTEMlib.sidpy_tools import *
35
+
36
+ from pyTEMlib.sidpy_tools import *
37
+
38
+ Qt_available = True
39
+ try:
40
+ from PyQt5 import QtCore, QtWidgets, QtGui
41
+ except ModuleNotFoundError:
42
+ print('Qt dialogs are not available')
43
+ Qt_available = False
44
+
45
+ Dimension = sidpy.Dimension
46
+
47
+ get_slope = sidpy.base.num_utils.get_slope
48
+ __version__ = '2022.3.3'
49
+
50
+ from traitlets import Unicode, Bool, validate, TraitError
51
+ import ipywidgets
52
+
53
+
54
+ @ipywidgets.register
55
+ class FileWidget(ipywidgets.DOMWidget):
56
+ """Widget to select directories or widgets from a list
57
+
58
+ Works in google colab.
59
+ The widget converts the name of the nion file to the one in Nion's swift software,
60
+ because it is otherwise incomprehensible
61
+
62
+ Attributes
63
+ ----------
64
+ dir_name: str
65
+ name of starting directory
66
+ extension: list of str
67
+ extensions of files to be listed in widget
68
+
69
+ Methods
70
+ -------
71
+ get_directory
72
+ set_options
73
+ get_file_name
74
+
75
+ Example
76
+ -------
77
+ >>from google.colab import drive
78
+ >>drive.mount("/content/drive")
79
+ >>file_list = pyTEMlib.file_tools.FileWidget()
80
+ next code cell:
81
+ >>dataset = pyTEMlib.file_tools.open_file(file_list.file_name)
82
+
83
+ """
84
+
85
+ def __init__(self, dir_name=None, extension=['*'], sum_frames=False):
86
+ self.save_path = False
87
+ self.dir_dictionary = {}
88
+ self.dir_list = ['.', '..']
89
+ self.display_list = ['.', '..']
90
+ self.sum_frames = sum_frames
91
+
92
+ self.dir_name = '.'
93
+ if dir_name is None:
94
+ self.dir_name = get_last_path()
95
+ self.save_path = True
96
+ elif os.path.isdir(dir_name):
97
+ self.dir_name = dir_name
98
+
99
+ self.get_directory(self.dir_name)
100
+ self.dir_list = ['.']
101
+ self.extensions = extension
102
+ self.file_name = ''
103
+ self.datasets = {}
104
+ self.dataset = None
105
+
106
+ self.select_files = widgets.Select(
107
+ options=self.dir_list,
108
+ value=self.dir_list[0],
109
+ description='Select file:',
110
+ disabled=False,
111
+ rows=10,
112
+ layout=widgets.Layout(width='70%')
113
+ )
114
+
115
+ select_button = widgets.Button(description='Select Main',
116
+ layout=widgets.Layout(width='auto', grid_area='header'),
117
+ style=widgets.ButtonStyle(button_color='lightblue'))
118
+
119
+ add_button = widgets.Button(description='Add',
120
+ layout=widgets.Layout(width='auto', grid_area='header'),
121
+ style=widgets.ButtonStyle(button_color='lightblue'))
122
+
123
+ self.path_choice = widgets.Dropdown(options=['None'],
124
+ value='None',
125
+ description='directory:',
126
+ disabled=False,
127
+ button_style='',
128
+ layout=widgets.Layout(width='90%'))
129
+ self.dataset_list = ['None']
130
+ self.loaded_datasets = widgets.Dropdown(options=self.dataset_list,
131
+ value=self.dataset_list[0],
132
+ description='loaded datasets:',
133
+ disabled=False,
134
+ button_style='')
135
+
136
+ self.set_options()
137
+ ui = widgets.VBox([self.path_choice, self.select_files, widgets.HBox([select_button, add_button,
138
+ self.loaded_datasets])])
139
+ display(ui)
140
+
141
+ self.select_files.observe(self.get_file_name, names='value')
142
+ self.path_choice.observe(self.set_dir, names='value')
143
+
144
+ select_button.on_click(self.select_main)
145
+ add_button.on_click(self.add_dataset)
146
+ self.loaded_datasets.observe(self.select_dataset)
147
+
148
+ def select_main(self, value=0):
149
+ self.datasets = {}
150
+ #self.loaded_datasets.value = self.dataset_list[0]
151
+ self.dataset_list = []
152
+ self.datasets = open_file(self.file_name, sum_frames=self.sum_frames)
153
+ self.dataset_list = []
154
+ for key in self.datasets.keys():
155
+ self.dataset_list.append(f'{key}: {self.datasets[key].title}')
156
+ self.loaded_datasets.options = self.dataset_list
157
+ self.loaded_datasets.value = self.dataset_list[0]
158
+ self.debug = 5
159
+ self.dataset = self.datasets[list(self.datasets.keys())[0]]
160
+ self.debug = 6
161
+ self.selected_dataset = self.dataset
162
+
163
+ def add_dataset(self, value=0):
164
+ key = add_dataset_from_file(self.datasets, self.file_name, 'Channel')
165
+ self.dataset_list.append(f'{key}: {self.datasets[key].title}')
166
+ self.loaded_datasets.options = self.dataset_list
167
+ self.loaded_datasets.value = self.dataset_list[-1]
168
+
169
+ def get_directory(self, directory=None):
170
+ self.dir_name = directory
171
+ self.dir_dictionary = {}
172
+ self.dir_list = []
173
+ self.dir_list = ['.', '..'] + os.listdir(directory)
174
+
175
+ def set_dir(self, value=0):
176
+ self.dir_name = self.path_choice.value
177
+ self.select_files.index = 0
178
+ self.set_options()
179
+
180
+ def select_dataset(self, value=0):
181
+
182
+ key = self.loaded_datasets.value.split(':')[0]
183
+ if key != 'None':
184
+ self.selected_dataset = self.datasets[key]
185
+ self.selected_key = key
186
+
187
+ def set_options(self):
188
+ self.dir_name = os.path.abspath(os.path.join(self.dir_name, self.dir_list[self.select_files.index]))
189
+ dir_list = os.listdir(self.dir_name)
190
+ file_dict = update_directory_list(self.dir_name)
191
+
192
+ sort = np.argsort(file_dict['directory_list'])
193
+ self.dir_list = ['.', '..']
194
+ self.display_list = ['.', '..']
195
+ for j in sort:
196
+ self.display_list.append(f" * {file_dict['directory_list'][j]}")
197
+ self.dir_list.append(file_dict['directory_list'][j])
198
+
199
+ sort = np.argsort(file_dict['display_file_list'])
200
+
201
+ for i, j in enumerate(sort):
202
+ if '--' in dir_list[j]:
203
+ self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
204
+ else:
205
+ self.display_list.append(f" {i:3} {file_dict['display_file_list'][j]}")
206
+ self.dir_list.append(file_dict['file_list'][j])
207
+
208
+ self.dir_label = os.path.split(self.dir_name)[-1] + ':'
209
+ self.select_files.options = self.display_list
210
+
211
+ path = self.dir_name
212
+ old_path = ' '
213
+ path_list = []
214
+ while path != old_path:
215
+ path_list.append(path)
216
+ old_path = path
217
+ path = os.path.split(path)[0]
218
+ self.path_choice.options = path_list
219
+ self.path_choice.value = path_list[0]
220
+
221
+ def get_file_name(self, b):
222
+
223
+ if os.path.isdir(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
224
+ self.set_options()
225
+
226
+ elif os.path.isfile(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):
227
+ self.file_name = os.path.join(self.dir_name, self.dir_list[self.select_files.index])
228
+
229
+
230
+ class ChooseDataset(object):
231
+ """Widget to select dataset object """
232
+
233
+ def __init__(self, input_object, show_dialog=True):
234
+ self.datasets = None
235
+ if isinstance(input_object, sidpy.Dataset):
236
+ if isinstance(input_object.h5_dataset, h5py.Dataset):
237
+ self.current_channel = input_object.h5_dataset.parent
238
+ elif isinstance(input_object, h5py.Group):
239
+ self.current_channel = input_object
240
+ elif isinstance(input_object, h5py.Dataset):
241
+ self.current_channel = input_object.parent
242
+ elif isinstance(input_object, dict):
243
+ self.datasets = input_object
244
+ else:
245
+ raise ValueError('Need hdf5 group or sidpy Dataset to determine image choices')
246
+ self.dataset_names = []
247
+ self.dataset_list = []
248
+ self.dataset_type = None
249
+ self.dataset = None
250
+ if not isinstance(self.datasets, dict):
251
+ self.reader = SciFiReaders.NSIDReader(self.current_channel.file.filename)
252
+ else:
253
+ self.reader = None
254
+ self.get_dataset_list()
255
+ self.select_image = widgets.Dropdown(options=self.dataset_list,
256
+ value=self.dataset_list[0],
257
+ description='select dataset:',
258
+ disabled=False,
259
+ button_style='')
260
+ if show_dialog:
261
+ display(self.select_image)
262
+
263
+ self.select_image.observe(self.set_dataset, names='value')
264
+ self.set_dataset(0)
265
+ self.select_image.index = (len(self.dataset_names) - 1)
266
+
267
+ def get_dataset_list(self):
268
+ """ Get by Log number sorted list of datasets"""
269
+ if not isinstance(self.datasets, dict):
270
+ dataset_list = self.reader.read()
271
+ self.datasets = {}
272
+ for dataset in dataset_list:
273
+ self.datasets[dataset.title] = dataset
274
+ order = []
275
+ keys = []
276
+ for title, dset in self.datasets.items():
277
+ if isinstance(dset, sidpy.Dataset):
278
+ if self.dataset_type is None or dset.data_type == self.data_type:
279
+ if 'Log' in title:
280
+ order.append(2)
281
+ else:
282
+ order.append(0)
283
+ keys.append(title)
284
+ for index in np.argsort(order):
285
+ self.dataset_names.append(keys[index])
286
+ self.dataset_list.append(keys[index] + ': ' + self.datasets[keys[index]].title)
287
+
288
+ def set_dataset(self, b):
289
+ index = self.select_image.index
290
+ self.key = self.dataset_names[index]
291
+ self.dataset = self.datasets[self.key]
292
+ self.dataset.title = self.dataset.title.split('/')[-1]
293
+ self.dataset.title = self.dataset.title.split('/')[-1]
294
+
295
+
296
+ def add_to_dict(file_dict, name):
297
+ full_name = os.path.join(file_dict['directory'], name)
298
+ basename, extension = os.path.splitext(name)
299
+ size = os.path.getsize(full_name) * 2 ** -20
300
+ display_name = name
301
+ if len(extension) == 0:
302
+ display_file_list = f' {name} - {size:.1f} MB'
303
+ elif extension[0] == 'hf5':
304
+ if extension in ['.hf5']:
305
+ display_file_list = f" {name} - {size:.1f} MB"
306
+ elif extension in ['.h5', '.ndata']:
307
+ try:
308
+ reader = SciFiReaders.NionReader(full_name)
309
+ dataset_nion = reader.read()
310
+ display_name = dataset_nion.title
311
+ display_file_list = f" {display_name}{extension} - {size:.1f} MB"
312
+ except:
313
+ display_file_list = f" {name} - {size:.1f} MB"
314
+ else:
315
+ display_file_list = f' {name} - {size:.1f} MB'
316
+ file_dict[name] = {'display_string': display_file_list, 'basename': basename, 'extension': extension,
317
+ 'size': size, 'display_name': display_name}
318
+
319
+
320
+ def update_directory_list(directory_name):
321
+ dir_list = os.listdir(directory_name)
322
+
323
+ if '.pyTEMlib.files.pkl' in dir_list:
324
+ with open(os.path.join(directory_name, '.pyTEMlib.files.pkl'), 'rb') as f:
325
+ file_dict = pickle.load(f)
326
+ if directory_name != file_dict['directory']:
327
+ print('directory moved since last time read')
328
+ file_dict['directory'] = directory_name
329
+ dir_list.remove('.pyTEMlib.files.pkl')
330
+ else:
331
+ file_dict = {'directory': directory_name}
332
+
333
+ # add new files
334
+ file_dict['file_list'] = []
335
+ file_dict['display_file_list'] = []
336
+ file_dict['directory_list'] = []
337
+
338
+ for name in dir_list:
339
+ if os.path.isfile(os.path.join(file_dict['directory'], name)):
340
+ if name not in file_dict:
341
+ add_to_dict(file_dict, name)
342
+ file_dict['file_list'].append(name)
343
+ file_dict['display_file_list'].append(file_dict[name]['display_string'])
344
+ else:
345
+ file_dict['directory_list'].append(name)
346
+ remove_item = []
347
+
348
+ # delete items of deleted files
349
+ save_pickle = False
350
+
351
+ for name in file_dict.keys():
352
+ if name not in dir_list and name not in ['directory', 'file_list', 'directory_list', 'display_file_list']:
353
+ remove_item.append(name)
354
+ else:
355
+ if 'extension' in file_dict[name]:
356
+ save_pickle = True
357
+ for item in remove_item:
358
+ file_dict.pop(item)
359
+
360
+ if save_pickle:
361
+ with open(os.path.join(file_dict['directory'], '.pyTEMlib.files.pkl'), 'wb') as f:
362
+ pickle.dump(file_dict, f)
363
+ return file_dict
364
+
365
+
366
+ ####
367
+ # General Open and Save Methods
368
+ ####
369
+
370
+ def get_last_path():
371
+ """Returns the path of the file last opened"""
372
+ try:
373
+ fp = open(config_path + '\\path.txt', 'r')
374
+ path = fp.read()
375
+ fp.close()
376
+ except IOError:
377
+ path = ''
378
+
379
+ if len(path) < 2:
380
+ path = '.'
381
+ else:
382
+ if not os.path.exists(path):
383
+ path = '.'
384
+ return path
385
+
386
+
387
+ def save_path(filename):
388
+ """Save path of last opened file"""
389
+
390
+ if len(filename) > 1:
391
+ fp = open(config_path + '\\path.txt', 'w')
392
+ path, fname = os.path.split(filename)
393
+ fp.write(path)
394
+ fp.close()
395
+ else:
396
+ path = '.'
397
+ return path
398
+
399
+
400
+ if Qt_available:
401
+ def get_qt_app():
402
+ """
403
+ will start QT Application if not running yet
404
+
405
+ :returns: QApplication
406
+
407
+ """
408
+
409
+ # start qt event loop
410
+ _instance = QtWidgets.QApplication.instance()
411
+ if not _instance:
412
+ # print('not_instance')
413
+ _instance = QtWidgets.QApplication([])
414
+
415
+ return _instance
416
+
417
+
418
+ def open_file_dialog_qt(file_types=None): # , multiple_files=False):
419
+ """Opens a File dialog which is used in open_file() function
420
+
421
+ This function uses pyQt5.
422
+ The app of the Gui has to be running for QT. Tkinter does not run on Macs at this point in time.
423
+ In jupyter notebooks use %gui Qt early in the notebook.
424
+
425
+ The file looks first for a path.txt file for the last directory you used.
426
+
427
+ Parameters
428
+ ----------
429
+ file_types : string
430
+ file type filter in the form of '*.hf5'
431
+
432
+
433
+ Returns
434
+ -------
435
+ filename : string
436
+ full filename with absolute path and extension as a string
437
+
438
+ Example
439
+ -------
440
+ >> import file_tools as ft
441
+ >> filename = ft.openfile_dialog()
442
+ >> print(filename)
443
+
444
+ """
445
+ """will start QT Application if not running yet and returns QApplication """
446
+
447
+ # determine file types by extension
448
+ if file_types is None:
449
+ file_types = 'TEM files (*.dm3 *.dm4 *.emd *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \
450
+ 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
451
+ elif file_types == 'pyNSID':
452
+ file_types = 'pyNSID files (*.hf5);;TEM files (*.dm3 *.dm4 *.qf3 *.ndata *.h5 *.hf5);;QF files ( *.qf3);;' \
453
+ 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
454
+
455
+ # file_types = [("TEM files",["*.dm*","*.hf*","*.ndata" ]),("pyNSID files","*.hf5"),("DM files","*.dm*"),
456
+ # ("Nion files",["*.h5","*.ndata"]),("all files","*.*")]
457
+
458
+ # Determine last path used
459
+ path = get_last_path()
460
+
461
+ if Qt_available:
462
+ _ = get_qt_app()
463
+ filename = sidpy.io.interface_utils.openfile_dialog_QT(file_types=file_types, file_path=path)
464
+ save_path(filename)
465
+ return filename
466
+
467
+
468
+ def save_file_dialog_qt(file_types=None): # , multiple_files=False):
469
+ """Opens a File dialog which is used in open_file() function
470
+
471
+ This function uses pyQt5.
472
+ The app of the Gui has to be running for QT. Tkinter does not run on Macs at this point in time.
473
+ In jupyter notebooks use %gui Qt early in the notebook.
474
+
475
+ The file looks first for a path.txt file for the last directory you used.
476
+
477
+ Parameters
478
+ ----------
479
+ file_types : string
480
+ file type filter in the form of '*.hf5'
481
+
482
+
483
+ Returns
484
+ -------
485
+ filename : string
486
+ full filename with absolute path and extension as a string
487
+
488
+ Example
489
+ -------
490
+ >> import file_tools as ft
491
+ >> filename = ft.openfile_dialog()
492
+ >> print(filename)
493
+
494
+ """
495
+ """will start QT Application if not running yet and returns QApplication """
496
+
497
+ # determine file types by extension
498
+ if file_types is None:
499
+ file_types = 'pyNSID files (*.hf5);;TEM files (*.dm3 *.dm4 *.qf3 *.ndata *.h5 *.hf5);;QF files ( *.qf3);;' \
500
+ 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
501
+ elif file_types == 'TEM':
502
+ file_types = 'TEM files (*.dm3 *.dm4 *.emd *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \
503
+ 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'
504
+
505
+ # Determine last path used
506
+ path = get_last_path()
507
+
508
+ if Qt_available:
509
+ _ = get_qt_app()
510
+ filename = sidpy.io.interface_utils.savefile_dialog(file_types=file_types, file_path=path)
511
+ save_path(filename)
512
+ return filename
513
+
514
+
515
+ def save_dataset(dataset, filename=None, h5_group=None):
516
+ """ Saves a dataset to a file in pyNSID format
517
+ Parameters
518
+ ----------
519
+ dataset: sidpy.Dataset
520
+ the data
521
+ filename: str
522
+ name of file to be opened, if filename is None, a QT file dialog will try to open
523
+ h5_group: hd5py.Group
524
+ not used yet
525
+ """
526
+ if filename is None:
527
+ filename = save_file_dialog_qt()
528
+ h5_filename = get_h5_filename(filename)
529
+ h5_file = h5py.File(h5_filename, mode='a')
530
+ path, file_name = os.path.split(filename)
531
+ basename, _ = os.path.splitext(file_name)
532
+
533
+ if isinstance(dataset, dict):
534
+ h5_group = save_dataset_dictionary(h5_file, dataset)
535
+ return h5_group
536
+
537
+ elif isinstance(dataset, sidpy.Dataset):
538
+ h5_dataset = save_single_dataset(h5_file, dataset, h5_group=h5_group)
539
+ return h5_dataset.parent
540
+ else:
541
+ raise TypeError('Only sidpy.datasets or dictionaries can be saved with pyTEMlib')
542
+
543
+
544
+ def save_single_dataset(h5_file, dataset, h5_group=None):
545
+ if h5_group is None:
546
+ h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')
547
+ h5_group = sidpy.hdf.prov_utils.create_indexed_group(h5_measurement_group, 'Channel_')
548
+
549
+ elif isinstance(h5_group, str):
550
+ if h5_group not in h5_file:
551
+ h5_group = h5_file.create_group(h5_group)
552
+ else:
553
+ if h5_group[-1] == '/':
554
+ h5_group = h5_group[:-1]
555
+
556
+ channel = h5_group.split('/')[-1]
557
+ h5_measurement_group = h5_group[:-len(channel)]
558
+ h5_group = sidpy.hdf.prov_utils.create_indexed_group(h5_group, 'Channel_')
559
+ else:
560
+ raise ValueError('h5_group needs to be string or None')
561
+
562
+ h5_dataset = pyNSID.hdf_io.write_nsid_dataset(dataset, h5_group)
563
+ dataset.h5_dataset = h5_dataset
564
+ h5_dataset.file.flush()
565
+ return h5_dataset
566
+
567
+
568
+ def save_dataset_dictionary(h5_file, datasets):
569
+ h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')
570
+ for key, dataset in datasets.items():
571
+ if key[-1] == '/':
572
+ key = key[:-1]
573
+ if isinstance(dataset, sidpy.Dataset):
574
+ h5_group = h5_measurement_group.create_group(key)
575
+ h5_dataset = pyNSID.hdf_io.write_nsid_dataset(dataset, h5_group)
576
+ dataset.h5_dataset = h5_dataset
577
+ h5_dataset.file.flush()
578
+ elif isinstance(dataset, dict):
579
+ sidpy.hdf.hdf_utils.write_dict_to_h5_group(h5_measurement_group, dataset, key)
580
+ else:
581
+ print('could not save item ', key, 'of dataset dictionary')
582
+ return h5_measurement_group
583
+
584
+
585
+ def h5_group_to_dict(group, group_dict={}):
586
+ if not isinstance(group, h5py.Group):
587
+ raise TypeError('we need a h5py group to read from')
588
+ if not isinstance(group_dict, dict):
589
+ raise TypeError('group_dict needs to be a python dictionary')
590
+
591
+ group_dict[group.name.split('/')[-1]] = dict(group.attrs)
592
+ for key in group.keys():
593
+ h5_group_to_dict(group[key], group_dict[group.name.split('/')[-1]])
594
+ return group_dict
595
+
596
+
597
+ def open_file(filename=None, h5_group=None, write_hdf_file=False, sum_frames=False): # save_file=False,
598
+ """Opens a file if the extension is .hf5, .ndata, .dm3 or .dm4
599
+
600
+ If no filename is provided the QT open_file windows opens (if QT_available==True)
601
+ Everything will be stored in a NSID style hf5 file.
602
+ Subroutines used:
603
+ - NSIDReader
604
+ - nsid.write_
605
+ - get_main_tags
606
+ - get_additional tags
607
+
608
+ Parameters
609
+ ----------
610
+ filename: str
611
+ name of file to be opened, if filename is None, a QT file dialog will try to open
612
+ h5_group: hd5py.Group
613
+ not used yet #TODO: provide hook for usage of external chosen group
614
+ write_hdf_file: bool
615
+ set to false so that sidpy dataset will not be written to hf5-file automatically
616
+
617
+ Returns
618
+ -------
619
+ sidpy.Dataset
620
+ sidpy dataset with location of hdf5 dataset as attribute
621
+
622
+ """
623
+ if filename is None:
624
+ selected_file = open_file_dialog_qt()
625
+ filename = selected_file
626
+
627
+ else:
628
+ if not isinstance(filename, str):
629
+ raise TypeError('filename must be a non-empty string or None (to a QT open file dialog)')
630
+ elif filename == '':
631
+ raise TypeError('filename must be a non-empty string or None (to a QT open file dialog)')
632
+
633
+ path, file_name = os.path.split(filename)
634
+ basename, extension = os.path.splitext(file_name)
635
+
636
+ if extension == '.hf5':
637
+ reader = SciFiReaders.NSIDReader(filename)
638
+ datasets = reader.read()
639
+ if len(datasets) < 1:
640
+ print('no hdf5 dataset found in file')
641
+ return {}
642
+ else:
643
+ dataset_dict = {}
644
+ for index, dataset in enumerate(datasets):
645
+ title = dataset.title.split('/')[2]
646
+ dataset.title = dataset.title.split('/')[-1]
647
+ dataset_dict[title] = dataset
648
+ if index == 0:
649
+ file = datasets[0].h5_dataset.file
650
+ master_group = datasets[0].h5_dataset.parent.parent.parent
651
+ for key in master_group.keys():
652
+ if key not in dataset_dict:
653
+ dataset_dict[key] = h5_group_to_dict(master_group[key])
654
+ if not write_hdf_file:
655
+ file.close()
656
+ return dataset_dict
657
+ elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi', '.edaxh5']:
658
+ # tags = open_file(filename)
659
+ if extension in ['.dm3', '.dm4']:
660
+ reader = SciFiReaders.DMReader(filename)
661
+ elif extension in ['.emi']:
662
+ try:
663
+ import hyperspy.api as hs
664
+ s = hs.load(filename)
665
+ dataset_dict = {}
666
+ spectrum_number = 0
667
+ if not isinstance(s, list):
668
+ s = [s]
669
+ for index, datum in enumerate(s):
670
+ dset = SciFiReaders.convert_hyperspy(datum)
671
+ if datum.data.ndim == 1:
672
+ dset.title = dset.title + f'_{spectrum_number}_Spectrum'
673
+ spectrum_number += 1
674
+ elif datum.data.ndim == 3:
675
+ dset.title = dset.title + '_SI'
676
+ dset = dset.T
677
+ dset.title = dset.title[11:]
678
+ dataset_dict[f'Channel_{index:03d}'] = dset
679
+ return dataset_dict
680
+ except ImportError:
681
+ print('This file type needs hyperspy to be installed to be able to be read')
682
+ return
683
+ elif extension == '.emd':
684
+ reader = SciFiReaders.EMDReader(filename, sum_frames=sum_frames)
685
+
686
+ elif 'edax' in extension.lower():
687
+ if 'h5' in extension:
688
+ reader = SciFiReaders.EDAXReader(filename)
689
+
690
+ elif extension in ['.ndata', '.h5']:
691
+ reader = SciFiReaders.NionReader(filename)
692
+
693
+ else:
694
+ raise NotImplementedError('extension not supported')
695
+
696
+ path, file_name = os.path.split(filename)
697
+ basename, _ = os.path.splitext(file_name)
698
+ if extension != '.emi':
699
+ dset = reader.read()
700
+
701
+ if extension in ['.dm3', '.dm4']:
702
+ title = (basename.strip().replace('-', '_')).split('/')[-1]
703
+ if not isinstance(dset, dict):
704
+ print('Please use new SciFiReaders Package for full functionality')
705
+ if isinstance(dset, sidpy.Dataset):
706
+ dset = [dset]
707
+
708
+ if isinstance(dset, dict):
709
+ dataset_dict = dset
710
+
711
+ elif isinstance(dset, list):
712
+ if len(dset) < 1:
713
+ print('no dataset found in file')
714
+ return {}
715
+ else:
716
+ if 'PageSetup' in dset[0].original_metadata:
717
+ del dset[0].original_metadata['PageSetup']
718
+ dset[0].original_metadata['original_title'] = title
719
+ dataset_dict = {}
720
+ for index, dataset in enumerate(dset):
721
+ if extension == '.emi':
722
+ if 'experiment' in dataset.metadata:
723
+ if 'detector' in dataset.metadata['experiment']:
724
+ dataset.title = dataset.metadata['experiment']['detector']
725
+ dataset.filename = basename.strip()
726
+ # read_essential_metadata(dataset)
727
+ dataset.metadata['filename'] = filename
728
+ dataset_dict[f'Channel_{index:03}'] = dataset
729
+ else:
730
+ dset.filename = basename.strip().replace('-', '_')
731
+ read_essential_metadata(dset)
732
+ dset.metadata['filename'] = filename
733
+ dataset_dict = {'Channel_000': dset}
734
+
735
+ # Temporary Fix for dual eels spectra in dm files
736
+ # Todo: Fic in ScifyReaders
737
+ for dset in dataset_dict.values():
738
+ if 'single_exposure_time' in dset.metadata['experiment']:
739
+ dset.metadata['experiment']['exposure_time'] = dset.metadata['experiment']['number_of_frames'] * \
740
+ dset.metadata['experiment']['single_exposure_time']
741
+ if write_hdf_file:
742
+ h5_master_group = save_dataset(dataset_dict, filename=filename)
743
+
744
+ save_path(filename)
745
+ return dataset_dict
746
+ else:
747
+ print('file type not handled yet.')
748
+ return
749
+
750
+
751
+ ################################################################
752
+ # Read Functions
753
+ #################################################################
754
+
755
+ def read_essential_metadata(dataset):
756
+ """Updates dataset.metadata['experiment'] with essential information read from original metadata
757
+
758
+ This depends on whether it is originally a nion or a dm3 file
759
+ """
760
+ if not isinstance(dataset, sidpy.Dataset):
761
+ raise TypeError("we need a sidpy.Dataset")
762
+ experiment_dictionary = {}
763
+ if 'metadata' in dataset.original_metadata:
764
+ if 'hardware_source' in dataset.original_metadata['metadata']:
765
+ experiment_dictionary = read_nion_image_info(dataset.original_metadata)
766
+ if 'DM' in dataset.original_metadata:
767
+ experiment_dictionary = read_dm3_info(dataset.original_metadata)
768
+ if 'experiment' not in dataset.metadata:
769
+ dataset.metadata['experiment'] = {}
770
+
771
+ dataset.metadata['experiment'].update(experiment_dictionary)
772
+
773
+
774
+ def read_dm3_info(original_metadata):
775
+ """Read essential parameter from original_metadata originating from a dm3 file"""
776
+ if not isinstance(original_metadata, dict):
777
+ raise TypeError('We need a dictionary to read')
778
+
779
+ if 'DM' not in original_metadata:
780
+ return {}
781
+ if 'ImageTags' not in original_metadata:
782
+ return {}
783
+ exp_dictionary = original_metadata['ImageTags']
784
+ experiment = {}
785
+ if 'EELS' in exp_dictionary:
786
+ if 'Acquisition' in exp_dictionary['EELS']:
787
+ for key, item in exp_dictionary['EELS']['Acquisition'].items():
788
+ if 'Exposure' in key:
789
+ _, units = key.split('(')
790
+ if units[:-1] == 's':
791
+ experiment['single_exposure_time'] = item
792
+ if 'Integration' in key:
793
+ _, units = key.split('(')
794
+ if units[:-1] == 's':
795
+ experiment['exposure_time'] = item
796
+ if 'frames' in key:
797
+ experiment['number_of_frames'] = item
798
+
799
+ if 'Experimental Conditions' in exp_dictionary['EELS']:
800
+ for key, item in exp_dictionary['EELS']['Experimental Conditions'].items():
801
+ if 'Convergence' in key:
802
+ experiment['convergence_angle'] = item
803
+ if 'Collection' in key:
804
+ # print(item)
805
+ # for val in item.values():
806
+ experiment['collection_angle'] = item
807
+ if 'number_of_frames' not in experiment:
808
+ experiment['number_of_frames'] = 1
809
+ if 'exposure_time' not in experiment:
810
+ if 'single_exposure_time' in experiment:
811
+ experiment['exposure_time'] = experiment['number_of_frames'] * experiment['single_exposure_time']
812
+
813
+ else:
814
+ if 'Acquisition' in exp_dictionary:
815
+ if 'Parameters' in exp_dictionary['Acquisition']:
816
+ if 'High Level' in exp_dictionary['Acquisition']['Parameters']:
817
+ if 'Exposure (s)' in exp_dictionary['Acquisition']['Parameters']['High Level']:
818
+ experiment['exposure_time'] = exp_dictionary['Acquisition']['Parameters']['High Level'][
819
+ 'Exposure (s)']
820
+
821
+ if 'Microscope Info' in exp_dictionary:
822
+ if 'Microscope' in exp_dictionary['Microscope Info']:
823
+ experiment['microscope'] = exp_dictionary['Microscope Info']['Microscope']
824
+ if 'Voltage' in exp_dictionary['Microscope Info']:
825
+ experiment['acceleration_voltage'] = exp_dictionary['Microscope Info']['Voltage']
826
+
827
+ return experiment
828
+
829
+
830
+ def read_nion_image_info(original_metadata):
831
+ """Read essential parameter from original_metadata originating from a dm3 file"""
832
+ if not isinstance(original_metadata, dict):
833
+ raise TypeError('We need a dictionary to read')
834
+ if 'metadata' not in original_metadata:
835
+ return {}
836
+ if 'hardware_source' not in original_metadata['metadata']:
837
+ return {}
838
+ if 'ImageScanned' not in original_metadata['metadata']['hardware_source']:
839
+ return {}
840
+
841
+ exp_dictionary = original_metadata['metadata']['hardware_source']['ImageScanned']
842
+ experiment = exp_dictionary
843
+ # print(exp_dictionary)
844
+ if 'autostem' in exp_dictionary:
845
+ pass
846
+
847
+
848
+ def get_h5_filename(fname):
849
+ """Determines file name of hdf5 file for newly converted data file"""
850
+
851
+ path, filename = os.path.split(fname)
852
+ basename, extension = os.path.splitext(filename)
853
+ h5_file_name_original = os.path.join(path, basename + '.hf5')
854
+ h5_file_name = h5_file_name_original
855
+
856
+ if os.path.exists(os.path.abspath(h5_file_name_original)):
857
+ count = 1
858
+ h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'
859
+ while os.path.exists(os.path.abspath(h5_file_name)):
860
+ count += 1
861
+ h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'
862
+
863
+ if h5_file_name != h5_file_name_original:
864
+ path, filename = os.path.split(h5_file_name)
865
+ print('Cannot overwrite file. Using: ', filename)
866
+ return str(h5_file_name)
867
+
868
+
869
+ def get_start_channel(h5_file):
870
+ """ Legacy for get start channel"""
871
+
872
+ DeprecationWarning('Depreciated: use function get_main_channel instead')
873
+ return get_main_channel(h5_file)
874
+
875
+
876
+ def get_main_channel(h5_file):
877
+ """Returns name of first channel group in hdf5-file"""
878
+
879
+ current_channel = None
880
+ if 'Measurement_000' in h5_file:
881
+ if 'Measurement_000/Channel_000' in h5_file:
882
+ current_channel = h5_file['Measurement_000/Channel_000']
883
+ return current_channel
884
+
885
+
886
+ def h5_tree(input_object):
887
+ """Just a wrapper for the sidpy function print_tree,
888
+
889
+ so that sidpy does not have to be loaded in notebook
890
+
891
+ """
892
+
893
+ if isinstance(input_object, sidpy.Dataset):
894
+ if not isinstance(input_object.h5_dataset, h5py.Dataset):
895
+ raise ValueError('sidpy dataset does not have an associated h5py dataset')
896
+ h5_file = input_object.h5_dataset.file
897
+ elif isinstance(input_object, h5py.Dataset):
898
+ h5_file = input_object.file
899
+ elif isinstance(input_object, (h5py.Group, h5py.File)):
900
+ h5_file = input_object
901
+ else:
902
+ raise TypeError('should be a h5py.object or sidpy Dataset')
903
+ sidpy.hdf_utils.print_tree(h5_file)
904
+
905
+
906
+ def log_results(h5_group, dataset=None, attributes=None):
907
+ """Log Results in hdf5-file
908
+
909
+ Saves either a sidpy.Dataset or dictionary in a hdf5-file.
910
+ The group for the result will consist of 'Log_' and a running index.
911
+ That group will be placed in h5_group.
912
+
913
+ Parameters
914
+ ----------
915
+ h5_group: hd5py.Group, or sidpy.Dataset
916
+ groups where result group are to be stored
917
+ dataset: sidpy.Dataset or None
918
+ sidpy dataset to be stored
919
+ attributes: dict
920
+ dictionary containing results that are not based on a sidpy.Dataset
921
+
922
+ Returns
923
+ -------
924
+ log_group: hd5py.Group
925
+ group in hdf5 file with results.
926
+
927
+ """
928
+ if isinstance(h5_group, sidpy.Dataset):
929
+ h5_group = h5_group.h5_dataset
930
+ if not isinstance(h5_group, h5py.Dataset):
931
+ raise TypeError('Use h5_dataset of sidpy.Dataset is not a valid h5py.Dataset')
932
+ h5_group = h5_group.parent.parent
933
+
934
+ if not isinstance(h5_group, h5py.Group):
935
+ raise TypeError('Need a valid h5py.Group for logging results')
936
+
937
+ if dataset is None:
938
+ log_group = sidpy.hdf.prov_utils.create_indexed_group(h5_group, 'Log_')
939
+ else:
940
+ log_group = pyNSID.hdf_io.write_results(h5_group, dataset=dataset)
941
+ if hasattr(dataset, 'meta_data'):
942
+ if 'analysis' in dataset.meta_data:
943
+ log_group['analysis'] = dataset.meta_data['analysis']
944
+ if hasattr(dataset, 'structures'):
945
+ for structure in dataset.structures.values():
946
+ h5_add_crystal_structure(log_group, structure)
947
+
948
+ dataset.h5_dataset = log_group[dataset.title.replace('-', '_')][dataset.title.replace('-', '_')]
949
+ if attributes is not None:
950
+ for key, item in attributes.items():
951
+ if not isinstance(item, dict):
952
+ log_group[key] = attributes[key]
953
+ else:
954
+ log_group.create_group(key)
955
+ sidpy.hdf.hdf_utils.write_simple_attrs(log_group[key], attributes[key])
956
+ return log_group
957
+
958
+
959
+ def add_dataset_from_file(datasets, filename=None, key_name='Log', single_dataset=True):
960
+ """Add dataset to datasets dictionary
961
+
962
+ Parameters
963
+ ----------
964
+ dataset: dict
965
+ dictionary to write to file
966
+ filename: str, default: None,
967
+ name of file to open, if None, adialog will appear
968
+ key_name: str, default: 'Log'
969
+ name for key in dictionary with running number being added
970
+
971
+ Returns
972
+ -------
973
+ key_name: str
974
+ actual last used name of dictionary key
975
+ """
976
+
977
+ datasets2 = open_file(filename=filename)
978
+ first_dataset = datasets2[list(datasets2)[0]]
979
+ if isinstance(first_dataset, sidpy.Dataset):
980
+
981
+ index = 0
982
+ for key in datasets.keys():
983
+ if key_name in key:
984
+ if int(key[-3:]) >= index:
985
+ index = int(key[-3:])+1
986
+ if single_dataset:
987
+ datasets[key_name+f'_{index:03}'] = first_dataset
988
+ else:
989
+ for dataset in datasets2.values():
990
+ datasets[key_name+f'_{index:03}'] = dataset
991
+ index += 1
992
+ index -= 1
993
+ else:
994
+ return None
995
+
996
+ return f'{key_name}_{index:03}'
997
+
998
+
999
+ # ##
1000
+ # Crystal Structure Read and Write
1001
+ # ##
1002
+ def read_poscar(file_name=None):
1003
+ """
1004
+ Open a POSCAR file from Vasp
1005
+ If no file name is provided an open file dialog to select a POSCAR file appears
1006
+
1007
+ Parameters
1008
+ ----------
1009
+ file_name: str
1010
+ if None is provided an open file dialog will appear
1011
+
1012
+ Return
1013
+ ------
1014
+ crystal: ase.Atoms
1015
+ crystal structure in ase format
1016
+ """
1017
+
1018
+ if file_name is None:
1019
+ file_name = open_file_dialog_qt('POSCAR (POSCAR*.txt);;All files (*)')
1020
+
1021
+ # use ase package to read file
1022
+ base = os.path.basename(file_name)
1023
+ base_name = os.path.splitext(base)[0]
1024
+ crystal = ase.io.read(file_name, format='vasp', parallel=False)
1025
+
1026
+ # make dictionary and plot structure (not essential for further notebook)
1027
+ crystal.info = {'title': base_name}
1028
+ return crystal
1029
+
1030
+
1031
+ def read_cif(file_name=None, verbose=False): # open file dialog to select cif file
1032
+ """
1033
+ Open a cif file
1034
+ If no file name is provided an open file dialog to select a cif file appears
1035
+
1036
+ Parameters
1037
+ ----------
1038
+ file_name: str
1039
+ if None is provided an open file dialog will appear
1040
+ verbose: bool
1041
+
1042
+ Return
1043
+ ------
1044
+ crystal: ase.Atoms
1045
+ crystal structure in ase format
1046
+ """
1047
+
1048
+ if file_name is None:
1049
+ file_name = open_file_dialog_qt('cif (*.cif);;All files (*)')
1050
+ # use ase package to read file
1051
+
1052
+ base = os.path.basename(file_name)
1053
+ base_name = os.path.splitext(base)[0]
1054
+ crystal = ase.io.read(file_name, format='cif', store_tags=True, parallel=False)
1055
+
1056
+ # make dictionary and plot structure (not essential for further notebook)
1057
+ if crystal.info is None:
1058
+ crystal.info = {'title': base_name}
1059
+ crystal.info.update({'title': base_name})
1060
+ if verbose:
1061
+ print('Opened cif file for ', crystal.get_chemical_formula())
1062
+
1063
+ return crystal
1064
+
1065
+
1066
+ def h5_add_crystal_structure(h5_file, input_structure, name=None):
1067
+ """Write crystal structure to NSID file"""
1068
+
1069
+ if isinstance(input_structure, ase.Atoms):
1070
+
1071
+ crystal_tags = pyTEMlib.crystal_tools.get_dictionary(input_structure)
1072
+ if crystal_tags['metadata'] == {}:
1073
+ crystal_tags['metadata'] = {'title': input_structure.get_chemical_formula()}
1074
+ elif isinstance(input_structure, dict):
1075
+ crystal_tags = input_structure
1076
+ else:
1077
+ raise TypeError('Need a dictionary or an ase.Atoms object with ase installed')
1078
+
1079
+ structure_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Structure_')
1080
+
1081
+ for key, item in crystal_tags.items():
1082
+ if not isinstance(item, dict):
1083
+ structure_group[key] = item
1084
+
1085
+ if 'base' in crystal_tags:
1086
+ structure_group['relative_positions'] = crystal_tags['base']
1087
+ if 'title' in crystal_tags:
1088
+ structure_group['title'] = str(crystal_tags['title'])
1089
+ structure_group['_' + crystal_tags['title']] = str(crystal_tags['title'])
1090
+
1091
+ # ToDo: Save all of info dictionary
1092
+ if 'metadata' in input_structure:
1093
+ structure_group.create_group('metadata')
1094
+ sidpy.hdf.hdf_utils.write_simple_attrs(structure_group['metadata'], input_structure['metadata'])
1095
+
1096
+ h5_file.file.flush()
1097
+ return structure_group
1098
+
1099
+
1100
+ def h5_add_to_structure(structure_group, crystal_tags):
1101
+ """add dictionary as structure group"""
1102
+
1103
+ for key in crystal_tags:
1104
+ if key in structure_group.keys():
1105
+ print(key, ' not written; use new name')
1106
+ else:
1107
+ structure_group[key] = crystal_tags[key]
1108
+
1109
+
1110
+ def h5_get_crystal_structure(structure_group):
1111
+ """Read crystal structure from NSID file
1112
+ Any additional information will be read as dictionary into the info attribute of the ase.Atoms object
1113
+
1114
+ Parameters
1115
+ ----------
1116
+ structure_group: h5py.Group
1117
+ location in hdf5 file to where the structure information is stored
1118
+
1119
+ Returns
1120
+ -------
1121
+ atoms: ase.Atoms object
1122
+ crystal structure in ase format
1123
+
1124
+ """
1125
+
1126
+ crystal_tags = {'unit_cell': structure_group['unit_cell'][()],
1127
+ 'base': structure_group['relative_positions'][()],
1128
+ 'title': structure_group['title'][()]}
1129
+ if '2D' in structure_group:
1130
+ crystal_tags['2D'] = structure_group['2D'][()]
1131
+ elements = structure_group['elements'][()]
1132
+ crystal_tags['elements'] = []
1133
+ for e in elements:
1134
+ crystal_tags['elements'].append(e.astype(str, copy=False))
1135
+
1136
+ atoms = pyTEMlib.crystal_tools.atoms_from_dictionary(crystal_tags)
1137
+ if 'metadata' in structure_group:
1138
+ atoms.info = sidpy.hdf.hdf_utils.h5_group_to_dict(structure_group)
1139
+
1140
+ if 'zone_axis' in structure_group:
1141
+ atoms.info = {'experiment': {'zone_axis': structure_group['zone_axis'][()]}}
1142
+ # ToDo: Read all of info dictionary
1143
+ return atoms
1144
+
1145
+
1146
+ ###############################################
1147
+ # Support old pyTEM file format
1148
+ ###############################################
1149
+
1150
+ def read_old_h5group(current_channel):
1151
+ """Make a sidpy.Dataset from pyUSID style hdf5 group
1152
+
1153
+ Parameters
1154
+ ----------
1155
+ current_channel: h5_group
1156
+
1157
+ Returns
1158
+ -------
1159
+ sidpy.Dataset
1160
+ """
1161
+
1162
+ dim_dir = []
1163
+ if 'nDim_Data' in current_channel:
1164
+ h5_dataset = current_channel['nDim_Data']
1165
+ reader = pyNSID.NSIDReader(h5_dataset.file.filename)
1166
+ dataset = reader.read(h5_dataset)
1167
+ dataset.h5_file = current_channel.file
1168
+ return dataset
1169
+ elif 'Raw_Data' in current_channel:
1170
+ if 'image_stack' in current_channel:
1171
+ sid_dataset = sidpy.Dataset.from_array(np.swapaxes(current_channel['image_stack'][()], 2, 0))
1172
+ dim_dir = ['SPATIAL', 'SPATIAL', 'TEMPORAL']
1173
+ elif 'data' in current_channel:
1174
+ sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])
1175
+ dim_dir = ['SPATIAL', 'SPATIAL']
1176
+ else:
1177
+ size_x = int(current_channel['spatial_size_x'][()])
1178
+ size_y = int(current_channel['spatial_size_y'][()])
1179
+ if 'spectral_size_x' in current_channel:
1180
+ size_s = int(current_channel['spectral_size_x'][()])
1181
+ else:
1182
+ size_s = 0
1183
+ data = np.reshape(current_channel['Raw_Data'][()], (size_x, size_y, size_s))
1184
+ sid_dataset = sidpy.Dataset.from_array(data)
1185
+ if size_x > 1:
1186
+ dim_dir.append('SPATIAL')
1187
+ if size_y > 1:
1188
+ dim_dir.append('SPATIAL')
1189
+ if size_s > 1:
1190
+ dim_dir.append('SPECTRAL')
1191
+ sid_dataset.h5_dataset = current_channel['Raw_Data']
1192
+
1193
+ elif 'data' in current_channel:
1194
+ sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])
1195
+ dim_dir = ['SPATIAL', 'SPATIAL']
1196
+ sid_dataset.h5_dataset = current_channel['data']
1197
+ else:
1198
+ return
1199
+
1200
+ if 'SPATIAL' in dim_dir:
1201
+ if 'SPECTRAL' in dim_dir:
1202
+ sid_dataset.data_type = sidpy.DataType.SPECTRAL_IMAGE
1203
+ elif 'TEMPORAL' in dim_dir:
1204
+ sid_dataset.data_type = sidpy.DataType.IMAGE_STACK
1205
+ else:
1206
+ sid_dataset.data_type = sidpy.DataType.IMAGE
1207
+ else:
1208
+ sid_dataset.data_type = sidpy.DataType.SPECTRUM
1209
+
1210
+ sid_dataset.quantity = 'intensity'
1211
+ sid_dataset.units = 'counts'
1212
+ if 'analysis' in current_channel:
1213
+ sid_dataset.source = current_channel['analysis'][()]
1214
+
1215
+ set_dimensions(sid_dataset, current_channel)
1216
+
1217
+ return sid_dataset
1218
+
1219
+
1220
+ def set_dimensions(dset, current_channel):
1221
+ """Attaches correct dimension from old pyTEMlib style.
1222
+
1223
+ Parameters
1224
+ ----------
1225
+ dset: sidpy.Dataset
1226
+ current_channel: hdf5.Group
1227
+ """
1228
+ dim = 0
1229
+ if dset.data_type == sidpy.DataType.IMAGE_STACK:
1230
+ dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]), name='frame',
1231
+ units='frame', quantity='stack',
1232
+ dimension_type='TEMPORAL'))
1233
+ dim += 1
1234
+ if 'IMAGE' in dset.data_type:
1235
+
1236
+ if 'spatial_scale_x' in current_channel:
1237
+ scale_x = current_channel['spatial_scale_x'][()]
1238
+ else:
1239
+ scale_x = 1
1240
+ if 'spatial_units' in current_channel:
1241
+ units_x = current_channel['spatial_units'][()]
1242
+ if len(units_x) < 2:
1243
+ units_x = 'pixel'
1244
+ else:
1245
+ units_x = 'generic'
1246
+ if 'spatial_scale_y' in current_channel:
1247
+ scale_y = current_channel['spatial_scale_y'][()]
1248
+ else:
1249
+ scale_y = 0
1250
+ dset.set_dimension(dim, sidpy.Dimension('x', np.arange(dset.shape[dim])*scale_x,
1251
+ units=units_x, quantity='Length',
1252
+ dimension_type='SPATIAL'))
1253
+ dim += 1
1254
+ dset.set_dimension(dim, sidpy.Dimension('y', np.arange(dset.shape[dim])*scale_y,
1255
+ units=units_x, quantity='Length',
1256
+ dimension_type='SPATIAL'))
1257
+ dim += 1
1258
+ if dset.data_type in [sidpy.DataType.SPECTRUM, sidpy.DataType.SPECTRAL_IMAGE]:
1259
+ if 'spectral_scale_x' in current_channel:
1260
+ scale_s = current_channel['spectral_scale_x'][()]
1261
+ else:
1262
+ scale_s = 1.0
1263
+ if 'spectral_units_x' in current_channel:
1264
+ units_s = current_channel['spectral_units_x']
1265
+ else:
1266
+ units_s = 'eV'
1267
+
1268
+ if 'spectral_offset_x' in current_channel:
1269
+ offset = current_channel['spectral_offset_x']
1270
+ else:
1271
+ offset = 0.0
1272
+ dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]) * scale_s + offset,
1273
+ name='energy',
1274
+ units=units_s,
1275
+ quantity='energy_loss',
1276
+ dimension_type='SPECTRAL'))