pyTEMlib 0.2020.11.0__py3-none-any.whl → 0.2024.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyTEMlib might be problematic. Click here for more details.

Files changed (59) hide show
  1. pyTEMlib/__init__.py +11 -11
  2. pyTEMlib/animation.py +631 -0
  3. pyTEMlib/atom_tools.py +240 -222
  4. pyTEMlib/config_dir.py +57 -29
  5. pyTEMlib/core_loss_widget.py +658 -0
  6. pyTEMlib/crystal_tools.py +1255 -0
  7. pyTEMlib/diffraction_plot.py +756 -0
  8. pyTEMlib/dynamic_scattering.py +293 -0
  9. pyTEMlib/eds_tools.py +609 -0
  10. pyTEMlib/eels_dialog.py +749 -486
  11. pyTEMlib/{interactive_eels.py → eels_dialog_utilities.py} +1199 -1524
  12. pyTEMlib/eels_tools.py +2031 -1731
  13. pyTEMlib/file_tools.py +1276 -491
  14. pyTEMlib/file_tools_qt.py +193 -0
  15. pyTEMlib/graph_tools.py +1166 -450
  16. pyTEMlib/graph_viz.py +449 -0
  17. pyTEMlib/image_dialog.py +158 -0
  18. pyTEMlib/image_dlg.py +146 -0
  19. pyTEMlib/image_tools.py +1399 -956
  20. pyTEMlib/info_widget.py +933 -0
  21. pyTEMlib/interactive_image.py +1 -0
  22. pyTEMlib/kinematic_scattering.py +1196 -0
  23. pyTEMlib/low_loss_widget.py +176 -0
  24. pyTEMlib/microscope.py +61 -78
  25. pyTEMlib/peak_dialog.py +1047 -350
  26. pyTEMlib/peak_dlg.py +286 -248
  27. pyTEMlib/probe_tools.py +653 -202
  28. pyTEMlib/sidpy_tools.py +153 -129
  29. pyTEMlib/simulation_tools.py +104 -87
  30. pyTEMlib/version.py +6 -3
  31. pyTEMlib/xrpa_x_sections.py +20972 -0
  32. {pyTEMlib-0.2020.11.0.dist-info → pyTEMlib-0.2024.8.4.dist-info}/LICENSE +21 -21
  33. pyTEMlib-0.2024.8.4.dist-info/METADATA +93 -0
  34. pyTEMlib-0.2024.8.4.dist-info/RECORD +37 -0
  35. {pyTEMlib-0.2020.11.0.dist-info → pyTEMlib-0.2024.8.4.dist-info}/WHEEL +6 -5
  36. {pyTEMlib-0.2020.11.0.dist-info → pyTEMlib-0.2024.8.4.dist-info}/entry_points.txt +0 -1
  37. pyTEMlib/KinsCat.py +0 -2685
  38. pyTEMlib/__version__.py +0 -2
  39. pyTEMlib/data/TEMlibrc +0 -68
  40. pyTEMlib/data/edges_db.csv +0 -189
  41. pyTEMlib/data/edges_db.pkl +0 -0
  42. pyTEMlib/data/fparam.txt +0 -103
  43. pyTEMlib/data/microscopes.csv +0 -7
  44. pyTEMlib/data/microscopes.xml +0 -167
  45. pyTEMlib/data/path.txt +0 -1
  46. pyTEMlib/defaults_parser.py +0 -86
  47. pyTEMlib/dm3_reader.py +0 -609
  48. pyTEMlib/edges_db.py +0 -76
  49. pyTEMlib/eels_dlg.py +0 -240
  50. pyTEMlib/hdf_utils.py +0 -481
  51. pyTEMlib/image_tools1.py +0 -2194
  52. pyTEMlib/info_dialog.py +0 -227
  53. pyTEMlib/info_dlg.py +0 -205
  54. pyTEMlib/nion_reader.py +0 -293
  55. pyTEMlib/nsi_reader.py +0 -165
  56. pyTEMlib/structure_tools.py +0 -316
  57. pyTEMlib-0.2020.11.0.dist-info/METADATA +0 -20
  58. pyTEMlib-0.2020.11.0.dist-info/RECORD +0 -42
  59. {pyTEMlib-0.2020.11.0.dist-info → pyTEMlib-0.2024.8.4.dist-info}/top_level.txt +0 -0
pyTEMlib/hdf_utils.py DELETED
@@ -1,481 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- Lower-level and simpler NSID-specific HDF5 utilities that facilitate
4
- higher-level data operations
5
-
6
- Created on Tue Aug 3 21:14:25 2020
7
-
8
- @author: Gerd Duscher, and Suhas Somnath
9
- """
10
- from __future__ import division, print_function, absolute_import, unicode_literals
11
- import sys
12
- import h5py
13
- import numpy as np
14
- import dask.array as da
15
-
16
- import sys
17
-
18
- import sidpy
19
- from sidpy.base.num_utils import contains_integers
20
- from sidpy.hdf.hdf_utils import get_attr, copy_dataset
21
- from sidpy.hdf import hdf_utils as hut
22
- from sidpy import Dimension
23
-
24
- if sys.version_info.major == 3:
25
- unicode = str
26
-
27
-
28
- def get_all_main(parent, verbose=False):
29
- """
30
- Simple function to recursively print the contents of an hdf5 group
31
- Parameters
32
- ----------
33
- parent : :class:`h5py.Group`
34
- HDF5 Group to search within
35
- verbose : bool, optional. Default = False
36
- If true, extra print statements (usually for debugging) are enabled
37
- Returns
38
- -------
39
- main_list : list of h5py.Dataset
40
- The datasets found in the file that meet the 'Main Data' criteria.
41
- """
42
- if not isinstance(parent, (h5py.Group, h5py.File)):
43
- raise TypeError('parent should be a h5py.File or h5py.Group object')
44
-
45
- main_list = list()
46
-
47
- def __check(name, obj):
48
- if verbose:
49
- print(name, obj)
50
- if isinstance(obj, h5py.Dataset):
51
- if verbose:
52
- print(name, 'is an HDF5 Dataset.')
53
- ismain = check_if_main(obj)
54
- if ismain:
55
- if verbose:
56
- print(name, 'is a `Main` dataset.')
57
- main_list.append(obj)
58
-
59
- if verbose:
60
- print('Checking the group {} for `Main` datasets.'.format(parent.name))
61
- parent.visititems(__check)
62
-
63
- return main_list
64
-
65
-
66
- def find_dataset(h5_group, dset_name):
67
- """
68
- Uses visit() to find all datasets with the desired name
69
- Parameters
70
- ----------
71
- h5_group : :class:`h5py.Group`
72
- Group to search within for the Dataset
73
- dset_name : str
74
- Name of the dataset to search for
75
- Returns
76
- -------
77
- datasets : list
78
- List of [Name, object] pairs corresponding to datasets that match `ds_name`.
79
- """
80
-
81
- # print 'Finding all instances of', ds_name
82
- datasets = []
83
-
84
- for obj in hut.find_dataset(h5_group, dset_name):
85
- datasets.append(obj)
86
-
87
- return datasets
88
-
89
-
90
- def validate_main_dset(h5_main, must_be_h5):
91
- """
92
- Checks to make sure that the provided object is a NSID main dataset
93
- Errors in parameters will result in Exceptions
94
- Parameters
95
- ----------
96
- h5_main : h5py.Dataset or numpy.ndarray or Dask.array.core.array
97
- object that represents the NSID main data
98
- must_be_h5 : bool
99
- Set to True if the expecting an h5py.Dataset object.
100
- Set to False if expecting a numpy.ndarray or Dask.array.core.array
101
- Returns
102
- -------
103
- """
104
- # Check that h5_main is a dataset
105
- if must_be_h5:
106
- if not isinstance(h5_main, h5py.Dataset):
107
- raise TypeError('{} is not an HDF5 Dataset object.'.format(h5_main))
108
- else:
109
- if not isinstance(h5_main, (np.ndarray, da.core.Array)):
110
- raise TypeError('raw_data should either be a np.ndarray or a '
111
- 'da.core.Array')
112
-
113
- # Check dimensionality
114
- if len(h5_main.shape) != len(h5_main.dims):
115
- raise ValueError('Main data does not have full set of dimensional '
116
- 'scales. Provided object has shape: {} but only {} '
117
- 'dimensional scales'
118
- ''.format(h5_main.shape, len(h5_main.dims)))
119
-
120
-
121
- def validate_anc_h5_dsets(h5_inds, h5_vals, main_shape, is_spectroscopic=True):
122
- """
123
- Checks ancillary HDF5 datasets against shape of a main dataset.
124
- Errors in parameters will result in Exceptions
125
- Parameters
126
- ----------
127
- h5_inds : h5py.Dataset
128
- HDF5 dataset corresponding to the ancillary Indices dataset
129
- h5_vals : h5py.Dataset
130
- HDF5 dataset corresponding to the ancillary Values dataset
131
- main_shape : array-like
132
- Shape of the main dataset expressed as a tuple or similar
133
- is_spectroscopic : bool, Optional. Default = True
134
- set to True if ``dims`` correspond to Spectroscopic Dimensions.
135
- False otherwise.
136
- """
137
- if not isinstance(h5_inds, h5py.Dataset):
138
- raise TypeError('h5_inds must be a h5py.Dataset object')
139
- if not isinstance(h5_vals, h5py.Dataset):
140
- raise TypeError('h5_vals must be a h5py.Dataset object')
141
- if h5_inds.shape != h5_vals.shape:
142
- raise ValueError('h5_inds: {} and h5_vals: {} should be of the same '
143
- 'shape'.format(h5_inds.shape, h5_vals.shape))
144
- if isinstance(main_shape, (list, tuple)):
145
- if not contains_integers(main_shape, min_val=1) or \
146
- len(main_shape) != 2:
147
- raise ValueError("'main_shape' must be a valid HDF5 dataset shape")
148
- else:
149
- raise TypeError('main_shape should be of the following types:'
150
- 'h5py.Dataset, tuple, or list. {} provided'
151
- ''.format(type(main_shape)))
152
-
153
- if h5_inds.shape[is_spectroscopic] != main_shape[is_spectroscopic]:
154
- raise ValueError('index {} in shape of h5_inds: {} and main_data: {} '
155
- 'should be equal'.format(int(is_spectroscopic),
156
- h5_inds.shape, main_shape))
157
-
158
-
159
- def validate_dims_against_main(main_shape, dims, is_spectroscopic=True):
160
- """
161
- Checks Dimension objects against a given shape for main datasets.
162
- Errors in parameters will result in Exceptions
163
- Parameters
164
- ----------
165
- main_shape : array-like
166
- Tuple or list with the shape of the main data
167
- dims : iterable
168
- List of Dimension objects
169
- is_spectroscopic : bool, Optional. Default = True
170
- set to True if ``dims`` correspond to Spectroscopic Dimensions.
171
- False otherwise.
172
- """
173
- if not isinstance(main_shape, (list, tuple)):
174
- raise TypeError('main_shape should be a list or tuple. Provided object'
175
- ' was of type: {}'.format(type(main_shape)))
176
- if len(main_shape) != 2:
177
- raise ValueError('"main_shape" should be of length 2')
178
- contains_integers(main_shape, min_val=1)
179
-
180
- if isinstance(dims, Dimension):
181
- dims = [dims]
182
- elif not isinstance(dims, (list, tuple)):
183
- raise TypeError('"dims" must be a list or tuple of nsid.Dimension '
184
- 'objects. Provided object was of type: {}'
185
- ''.format(type(dims)))
186
- if not all([isinstance(obj, Dimension) for obj in dims]):
187
- raise TypeError('One or more objects in "dims" was not nsid.Dimension')
188
-
189
- if is_spectroscopic:
190
- main_dim = 1
191
- dim_category = 'Spectroscopic'
192
- else:
193
- main_dim = 0
194
- dim_category = 'Position'
195
-
196
- # TODO: This is where the dimension type will need to be taken into account
197
- lhs = main_shape[main_dim]
198
- rhs = np.product([len(x.values) for x in dims])
199
- if lhs != rhs:
200
- raise ValueError(dim_category +
201
- ' dimensions in main data of size: {} do not match '
202
- 'with product of values in provided Dimension objects'
203
- ': {}'.format(lhs, rhs))
204
-
205
-
206
- def check_if_main(h5_main, verbose=False):
207
- """
208
- Checks the input dataset to see if it has all the necessary
209
- features to be considered a Main dataset. This means it is
210
- dataset has dimensions of correct size and has the following attributes:
211
- * quantity
212
- * units
213
- * main_data_name
214
- * data_type
215
- * modality
216
- * source
217
- In addition, the shapes of the ancillary matrices should match with that of
218
- h5_main
219
- Parameters
220
- ----------
221
- h5_main : HDF5 Dataset
222
- Dataset of interest
223
- verbose : Boolean (Optional. Default = False)
224
- Whether or not to print statements
225
- Returns
226
- -------
227
- success : Boolean
228
- True if all tests pass
229
- """
230
- try:
231
- validate_main_dset(h5_main, True)
232
- except Exception as exep:
233
- if verbose:
234
- print(exep)
235
- return False
236
-
237
- # h5_name = h5_main.name.split('/')[-1]
238
- h5_group = h5_main.parent
239
-
240
- # success = True
241
-
242
- # Check for Datasets
243
-
244
- attrs_names = ['dimension_type', 'name', 'nsid_version', 'quantity', 'units', ]
245
- attr_success = []
246
- # Check for all required attributes in dataset
247
- main_attrs_names = ['quantity', 'units', 'main_data_name', 'data_type', 'modality', 'source']
248
- main_attr_success = np.all([att in h5_main.attrs for att in main_attrs_names])
249
- if verbose:
250
- print('All Attributes in dataset: ', main_attr_success)
251
- if not main_attr_success:
252
- if verbose:
253
- print('{} does not have the mandatory attributes'.format(h5_main.name))
254
- return False
255
-
256
- for attr_name in main_attrs_names:
257
- val = get_attr(h5_main, attr_name)
258
- if not isinstance(val, (str, unicode)):
259
- if verbose:
260
- print('Attribute {} of {} found to be {}. Expected a string'.format(attr_name, h5_main.name, val))
261
- return False
262
-
263
- length_success = []
264
- dset_success = []
265
- # Check for Validity of Dimensional Scales
266
- for i, dimension in enumerate(h5_main.dims):
267
- # check for all required attributes
268
- h5_dim_dset = h5_group[dimension.label]
269
- attr_success.append(np.all([att in h5_dim_dset.attrs for att in attrs_names]))
270
- dset_success.append(np.all([attr_success, isinstance(h5_dim_dset, h5py.Dataset)]))
271
- # dimensional scale has to be 1D
272
- if len(h5_dim_dset.shape) == 1:
273
- # and of the same length as the shape of the dataset
274
- length_success.append(h5_main.shape[i] == h5_dim_dset.shape[0])
275
- else:
276
- length_success.append(False)
277
- # We have the list now and can get error messages according to which dataset is bad or not.
278
- if np.all([np.all(attr_success), np.all(length_success), np.all(dset_success)]):
279
- if verbose:
280
- print('Dimensions: All Attributes: ', np.all(attr_success))
281
- print('Dimensions: All Correct Length: ', np.all(length_success))
282
- print('Dimensions: All h5 Datasets: ', np.all(dset_success))
283
- else:
284
- print('length of dimension scale {length_success.index(False)} is wrong')
285
- print('attributes in dimension scale {attr_success.index(False)} are wrong')
286
- print('dimension scale {dset_success.index(False)} is not a dataset')
287
- return False
288
-
289
- return main_attr_success
290
-
291
-
292
- def link_as_main(h5_main, dim_dict):
293
- """
294
- Attaches datasets as h5 Dimensional Scales to `h5_main`
295
- Parameters
296
- ----------
297
- h5_main : h5py.Dataset
298
- N-dimensional Dataset which will have the references added as h5 Dimensional Scales
299
- dim_dict: dictionary with dimensional order as key and items are datasets to be used as h5 Dimensional Scales
300
-
301
- Returns
302
- -------
303
- pyNSID.NSIDataset
304
- NSIDataset version of h5_main now that it is a NSID Main dataset
305
- """
306
- if not isinstance(h5_main, h5py.Dataset):
307
- raise TypeError('h5_main should be a h5py.Dataset object')
308
-
309
- h5_parent_group = h5_main.parent
310
- main_shape = h5_main.shape
311
- ######################
312
- # Validate Dimensions
313
- ######################
314
- # An N dimensional dataset should have N items in the dimension dictionary
315
- if len(dim_dict) != len(main_shape):
316
- raise ValueError('Incorrect number of dimensions: {} provided to support main data, of shape: {}'
317
- .format(len(dim_dict), main_shape))
318
- if set(range(len(main_shape))) != set(dim_dict.keys()):
319
- raise KeyError('')
320
-
321
- dim_names = []
322
- for index, dim_exp_size in enumerate(main_shape):
323
- this_dim = dim_dict[index]
324
- if isinstance(this_dim, h5py.Dataset):
325
- error_message = validate_dimensions(this_dim, main_shape[index])
326
- if len(error_message) > 0:
327
- raise TypeError('Dimension {} has the following error_message:\n'.format(index), error_message)
328
- else:
329
- # if this_dim.name not in dim_names:
330
- if this_dim.name not in dim_names: # names must be unique
331
- dim_names.append(this_dim.name)
332
- else:
333
- raise TypeError('All dimension names must be unique, found {} twice'.format(this_dim.name))
334
- if this_dim.file != h5_parent_group.file:
335
- copy_dataset(this_dim, h5_parent_group, verbose=False)
336
- else:
337
- raise TypeError('Items in dictionary must all be h5py.Datasets !')
338
-
339
- ################
340
- # Attach Scales
341
- ################
342
- for i, this_dim_dset in dim_dict.items():
343
- this_dim_dset.make_scale(this_dim_dset.attrs['name'])
344
- h5_main.dims[int(i)].label = this_dim_dset.attrs['name']
345
- h5_main.dims[int(i)].attach_scale(this_dim_dset)
346
-
347
- return h5_main
348
-
349
-
350
- def get_source_dataset(h5_group):
351
- """
352
- Find the name of the source dataset used to create the input `h5_group`,
353
- so long as the source dataset is in the same HDF5 file
354
- Parameters
355
- ----------
356
- h5_group : :class:`h5py.Group`
357
- Child group whose source dataset will be returned
358
- Returns
359
- -------
360
- h5_source : NSIDataset object
361
- Main dataset from which this group was generated
362
- """
363
- if not isinstance(h5_group, h5py.Group):
364
- raise TypeError('h5_group should be a h5py.Group object')
365
-
366
- h5_parent_group = h5_group.parent
367
- group_name = h5_group.name.split('/')[-1]
368
- # What if the group name was not formatted according to Pycroscopy rules?
369
- name_split = group_name.split('-')
370
- if len(name_split) != 2:
371
- raise ValueError("The provided group's name could not be split by '-' as expected in "
372
- "SourceDataset-ProcessName_000")
373
- h5_source = h5_parent_group[name_split[0]]
374
-
375
- if not isinstance(h5_source, h5py.Dataset):
376
- raise ValueError('Source object was not a dataset!')
377
-
378
- return h5_source
379
-
380
-
381
- def validate_dimensions(this_dim, dim_shape):
382
- """
383
- Checks if the provided object is an h5 dataset.
384
- A valid dataset to be uses as dimension must be 1D not a compound data type but 'simple'.
385
- Such a dataset must have ancillary attributes 'name', quantity', 'units', and 'dimension_type',
386
- which have to be of types str, str, str, and bool respectively and not empty
387
- If it is not valid of dataset, Exceptions are raised.
388
-
389
- Parameters
390
- ----------
391
- this_dim : h5 dataset
392
- with non empty attributes 'name', quantity', 'units', and 'dimension_type'
393
- dim_shape : required length of dataset
394
-
395
- Returns
396
- -------
397
- error_message: string, empty if ok.
398
- """
399
-
400
- if not isinstance(this_dim, h5py.Dataset):
401
- error_message = 'this Dimension must be a h5 Dataset'
402
- return error_message
403
-
404
- error_message = ''
405
- # Is it 1D?
406
- if len(this_dim.shape) != 1:
407
- error_message += ' High dimensional datasets are not allowed as dimensions;\n'
408
- # Does this dataset have a "simple" dtype - no compound data type allowed!
409
- # is the shape matching with the main dataset?
410
- if len(this_dim) != dim_shape:
411
- error_message += ' Dimension has wrong length;\n'
412
- # Does it contain some ancillary attributes like 'name', quantity', 'units', and 'dimension_type'
413
- necessary_attributes = ['name', 'quantity', 'units', 'dimension_type']
414
- for key in necessary_attributes:
415
- if key not in this_dim.attrs:
416
- error_message += 'Missing {} attribute in dimension;\n '.format(key)
417
- elif not isinstance(this_dim.attrs[key], str):
418
- error_message += '{} attribute in dimension should be string;\n '.format(key)
419
-
420
- return error_message
421
-
422
-
423
- def validate_main_dimensions(main_shape, dim_dict, h5_parent_group):
424
- # Each item could either be a Dimension object or a HDF5 dataset
425
- # Collect the file within which these ancillary HDF5 objects are present if they are provided
426
- which_h5_file = {}
427
- # Also collect the names of the dimensions. We want them to be unique
428
- dim_names = []
429
-
430
- dimensions_correct = []
431
- for index, dim_exp_size in enumerate(main_shape):
432
- this_dim = dim_dict[index]
433
- if isinstance(this_dim, h5py.Dataset):
434
- error_message = validate_dimensions(this_dim, main_shape[index])
435
-
436
- # All these checks should live in a helper function for cleanliness
437
-
438
- if len(error_message) > 0:
439
- print('Dimension {} has the following error_message:\n'.format(index), error_message)
440
-
441
- else:
442
- if this_dim.name not in dim_names: # names must be unique
443
- dim_names.append(this_dim.name)
444
- else:
445
- raise TypeError('All dimension names must be unique, found'
446
- ' {} twice'.format(this_dim.name))
447
-
448
- # are all datasets in the same file?
449
- if this_dim.file != h5_parent_group.file:
450
- copy_dataset(this_dim, h5_parent_group, verbose=True)
451
-
452
- elif isinstance(this_dim, Dimension):
453
- # is the shape matching with the main dataset?
454
- dimensions_correct.append(len(this_dim.values) == dim_exp_size)
455
- # Is there a HDF5 dataset with the same name already in the provided group
456
- # where this dataset will be created?
457
- if this_dim.name in h5_parent_group:
458
- # check if this object with the same name is a dataset and if it satisfies the above tests
459
- if isinstance(h5_parent_group[this_dim.name], h5py.Dataset):
460
- print('needs more checking')
461
- dimensions_correct[-1] = False
462
- else:
463
- dimensions_correct[-1] = True
464
- # Otherwise, just append the dimension name for the uniqueness test
465
- elif this_dim.name not in dim_names:
466
- dim_names.append(this_dim.name)
467
- else:
468
- dimensions_correct[-1] = False
469
- else:
470
- raise TypeError('Values of dim_dict should either be h5py.Dataset '
471
- 'objects or Dimension. Object at index: {} was of '
472
- 'type: {}'.format(index, index))
473
-
474
- for dim in which_h5_file:
475
- if which_h5_file[dim] != h5_parent_group.file.filename:
476
- print('need to copy dimension', dim)
477
- for i, dim_name in enumerate(dim_names[:-1]):
478
- if dim_name in dim_names[i + 1:]:
479
- print(dim_name, ' is not unique')
480
-
481
- return dimensions_correct