sxs 2024.0.44__py3-none-any.whl → 2025.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sxs/metadata/metadata.py CHANGED
@@ -1,641 +1,2 @@
1
1
  """Container for metadata of individual simulations"""
2
-
3
- import re
4
- import collections
5
- import numpy as np
6
-
7
-
8
- _valid_identifier_pattern = re.compile(r'\W|^(?=\d)')
9
-
10
-
11
- def _valid_identifier(key):
12
- return _valid_identifier_pattern.sub('_', key)
13
-
14
-
15
- _metadata_key_map = {
16
- # This should be a dictionary of `valid_identifier: metadata_key` pairs for any pair that isn't
17
- # adequately covered by replacing underscores with dashes. For example, this would be a working
18
- # but unnecessary pair:
19
- # 'simulation_name': 'simulation-name',
20
- }
21
-
22
-
23
- def _valid_identifier_to_metadata_key(key):
24
- return _metadata_key_map.get(key, key.replace('_', '-'))
25
-
26
-
27
- def _backwards_compatibility(metadata):
28
- """Deal with the fact that keys have been removed/renamed"""
29
- # See also `sxs.simulations.simulations.Simulations.dataframe`;
30
- # it's probably a good idea to duplicate whatever is included here
31
- # in that function, just to make sure nothing slips through the
32
- # cracks.
33
- if "number_of_orbits" not in metadata:
34
- if "number_of_orbits_from_start" in metadata:
35
- metadata["number_of_orbits"] = metadata["number_of_orbits_from_start"]
36
- else:
37
- metadata["number_of_orbits"] = np.nan
38
- return metadata
39
-
40
-
41
- class Metadata(collections.OrderedDict):
42
- """Interface to metadata
43
-
44
- Note that the constructor is not generally useful from outside this class. See
45
- Metadata.from_file, etc., for more useful initialization functions.
46
-
47
- This object is essentially a `collections.OrderedDict`, with a few extra features:
48
- 1) Keys are always forced to be valid python identifiers, whether setting or
49
- getting.
50
- 2) There are a few extra methods for constructing these objects from json data or
51
- files or txt files, and outputting them to json or txt files.
52
- 3) If an attribute does not exist on the object, the keys are searched. This
53
- allows for tab-completion on the key names.
54
- 4) Properties (methods without parentheses) that attempt to automatically
55
- determine the resolution and lev number from the 'simulation-name'.
56
-
57
- """
58
-
59
- @classmethod
60
- def from_file(cls, file_name, ignore_invalid_lines=False, cache_json=True):
61
- """Read a file into a Metadata object
62
-
63
- Parameters
64
- ----------
65
- file_name : str
66
- The input `file_name` may either end in `.txt`, for an old-style
67
- metadata.txt file, or in `.json`, for a JSON file. If neither ending is
68
- present, the function searches for files with the given `file_name` plus
69
- both `.txt` and `.json`, and reads the newer one or at least the existing
70
- one.
71
- ignore_invalid_lines : bool, optional
72
- If True, invalid lines will be ignored; otherwise, an error will be raised.
73
- cache_json : bool, optional
74
- If True, and the `.json` file does not exist or is older than the `.txt`
75
- file, a new `.json` file will be created with contents equivalent to the
76
- `.txt` file.
77
-
78
- Raises
79
- ------
80
- SyntaxError : on `.txt` parse errors when `ignore_invalid_lines` is False.
81
-
82
- """
83
- from pathlib import Path
84
-
85
- path = Path(file_name).expanduser().resolve()
86
- json_path = path.with_suffix(".json")
87
- txt_path = path.with_suffix(".txt")
88
-
89
- if path.suffix == ".json":
90
- metadata = cls.from_json_file(json_path)
91
- elif path.suffix == ".txt":
92
- metadata = cls.from_txt_file(txt_path, ignore_invalid_lines=ignore_invalid_lines, cache_json=cache_json)
93
- else:
94
- json_exists = json_path.exists()
95
- txt_exists = txt_path.exists()
96
- if json_exists and not txt_exists:
97
- metadata = cls.from_json_file(json_path)
98
- elif txt_exists and not json_exists:
99
- metadata = cls.from_txt_file(txt_path, ignore_invalid_lines=ignore_invalid_lines, cache_json=cache_json)
100
- elif json_exists and txt_exists:
101
- json_time = json_path.stat().st_mtime
102
- txt_time = txt_path.stat().st_mtime
103
- if txt_time > json_time:
104
- metadata = cls.from_txt_file(txt_path, ignore_invalid_lines=ignore_invalid_lines, cache_json=cache_json)
105
- else:
106
- metadata = cls.from_json_file(json_path)
107
- else:
108
- raise ValueError(f"Could not find file named '{json_path}' or '{txt_path}'")
109
-
110
- return _backwards_compatibility(metadata)
111
-
112
- load = from_file
113
-
114
- @classmethod
115
- def from_json_data(cls, json_data):
116
- """Read metadata from a file-like object (not a file)
117
-
118
- Parameters
119
- ----------
120
- json_data : file-like object
121
- Note that this cannot be just a file name; it must be a file-like object
122
- (such as an open file handle). See the `from_json_file` function if you
123
- just want to pass the path to a file.
124
-
125
- See Also
126
- --------
127
- sxs.Metadata.from_json_file : `.json` files
128
- sxs.Metadata.from_file : reads `.txt` or `.json` files
129
-
130
- """
131
- import json
132
- # noinspection PyTypeChecker
133
- return _backwards_compatibility(json.load(json_data, object_pairs_hook=cls))
134
-
135
- @classmethod
136
- def from_json_file(cls, json_file):
137
- """Read metadata.json file
138
-
139
- Parameters
140
- ----------
141
- json_file : str
142
- The path to an `metadata.json` file.
143
-
144
- See Also
145
- --------
146
- sxs.Metadata.from_file : reads `.txt` or `.json` files
147
-
148
- """
149
- import json
150
- from pathlib import Path
151
- path = Path(json_file).expanduser().resolve().with_suffix(".json")
152
- with path.open(mode="r") as metadata_file:
153
- # noinspection PyTypeChecker
154
- metadata = json.load(metadata_file, object_pairs_hook=cls)
155
- metadata["metadata_path"] = str(json_file)
156
- return _backwards_compatibility(metadata)
157
-
158
- @classmethod
159
- def from_txt_file(cls, txt_file, ignore_invalid_lines=False, cache_json=True):
160
- """Read metadata.txt file
161
-
162
- Parameters
163
- ----------
164
- txt_file : str
165
- The path to an old-style `metadata.txt` file.
166
- ignore_invalid_lines : bool, optional
167
- If True, invalid lines will be ignored; otherwise, an error will be raised.
168
- cache_json : bool, optional
169
- If True, a new `.json` file will be created with contents equivalent to the
170
- `.txt` file.
171
-
172
- See Also
173
- --------
174
- sxs.Metadata.from_file : reads `.txt` or `.json` files
175
-
176
- Notes
177
- -----
178
- A standard metadata.txt file is close to being an executable python script that
179
- just defines a bunch of constants. The three main problems with the
180
- metadata.txt format are
181
-
182
- 1) variable names contain dashes, which is the subtraction operator in python,
183
- 2) strings are not enclosed in quotes, and
184
- 3) lists are not enclosed in brackets
185
-
186
- It is easy to correct these problems. In particular, (1) is resolved by
187
- changing dashes to underscores in the identifiers. A bug in SpEC
188
- metadata.txt files -- whereby some comment lines are missing the initial `#` --
189
- is also fixed. There are also occasional other problems, like commas missing
190
- from lists. All syntax errors as of this writing are fixed in this function.
191
-
192
- Note that this function is not very flexible when it comes to generalizing the
193
- syntax of the metadata.txt files. In particular, it assumes that the
194
- right-hand sides are either numbers or strings (or lists of either numbers or
195
- strings). For example, I think I've seen cases where the eccentricity is given
196
- as something like "<1e-5". Since python has no "less-than" type, this is
197
- converted to a string. But generally, this does seem to work on metadata.txt
198
- files in the SXS waveform repository.
199
-
200
- """
201
- # This is considered the safe way to evaluate strings "containing Python values
202
- # from untrusted sources without the need to parse the values oneself". We
203
- # will use it to parse the right-hand side expressions from the metadata.txt
204
- # file, once we've appropriately modified them, so that python will get to
205
- # decide what should be a string, float, int, list, etc.
206
- import warnings
207
- from ast import literal_eval
208
-
209
- from pathlib import Path
210
- path = Path(txt_file).expanduser().resolve().with_suffix(".txt")
211
-
212
- assignment_pattern = re.compile(r"""([-A-Za-z0-9]+)\s*=\s*(.*)""")
213
- string_pattern = re.compile(r"""[A-DF-Za-df-z<>@]""") # Ignore "e" and "E" because they may appear in numbers
214
- multi_space_pattern = re.compile(r"""\s+""")
215
- metadata = cls()
216
-
217
- with path.open("r") as metadata_file:
218
- for line in metadata_file:
219
- # Work around bug where some lines of dashes are missing the comment character
220
- if line.startswith("-"):
221
- continue
222
-
223
- # Process each assignment line, skipping comments and unrecognized lines
224
- match = assignment_pattern.match(line)
225
- if match:
226
- variable, quantity = match.groups()
227
-
228
- # It was a stupid choice to make variables contain dashes
229
- variable = _valid_identifier(variable)
230
-
231
- # If `quantity` is an empty string, we should just replace it with an empty list
232
- if not quantity or quantity == "\n":
233
- quantity = "[]"
234
- else:
235
- q = quantity.strip()
236
- if "[unknown]" in q.lower():
237
- metadata[variable] = "NaN"
238
- continue
239
- elif (q.startswith('"') and q.endswith('"')) or (q.startswith("'") and q.endswith("'")):
240
- # If the whole thing is quoted, just leave it as is
241
- quantity = q
242
- elif string_pattern.search(quantity):
243
- # If this is a string, strip whitespace from it, split lists and place
244
- # brackets around them, and place quotation marks around each element
245
- quantities = [q.strip() for q in quantity.split(",")]
246
- if "," in quantity:
247
- quantity = "['" + "', '".join(quantities) + "']"
248
- else:
249
- quantity = "'" + quantities[0] + "'"
250
- else:
251
- # Otherwise, just place brackets around lists of non-strings
252
- quantity = quantity.strip()
253
- if "," in quantity:
254
- quantity = "[" + quantity + "]"
255
- elif " " in quantity:
256
- quantity = "[" + multi_space_pattern.sub(',', quantity) + "]"
257
-
258
- # Add this line to the metadata, whether or not it's been modified
259
- try:
260
- metadata[variable] = literal_eval(quantity)
261
- except SyntaxError as e:
262
- message = ("\nWhile parsing {0}, transformed input text:\n".format(txt_file)
263
- + " " + line.rstrip()
264
- + "\ninto quantity\n"
265
- + " " + variable + " = " + quantity
266
- + "\nParsing this using `ast.literal_eval` resulted in a SyntaxError.\n")
267
- if cache_json and ignore_invalid_lines:
268
- cache_json = False
269
- message += "JSON caching will be turned off for this file until the error is fixed.\n"
270
- warnings.warn(message)
271
- if not ignore_invalid_lines:
272
- raise e
273
-
274
- if cache_json:
275
- # Skip the text processing next time, and just go straight to json
276
- metadata.to_json_file(path.with_suffix(".json"))
277
-
278
- metadata["metadata_path"] = str(txt_file)
279
- return _backwards_compatibility(metadata)
280
-
281
- def to_json(self, indent=4, separators=(",", ": ")):
282
- """Export to JSON string"""
283
- import json
284
- return json.dumps(self, indent=indent, separators=separators)
285
-
286
- def to_json_file(self, json_file, indent=4, separators=(",", ": ")):
287
- """Write to JSON file"""
288
- from pathlib import Path
289
- path = Path(json_file).expanduser().resolve().with_suffix(".json")
290
- path.parent.mkdir(parents=True, exist_ok=True)
291
- with path.open("w") as f:
292
- f.write(self.to_json(indent=indent, separators=separators))
293
-
294
- save = to_json_file
295
-
296
- def to_txt(self):
297
- """Export to string like metadata.txt contents"""
298
- def deformat(value):
299
- """Basically undo the nice formatting of `from_txt_file`"""
300
- if isinstance(value, list):
301
- return ",".join(["{0}".format(item) for item in value])
302
- else:
303
- return f"{value}"
304
- return "\n".join([f"{_valid_identifier_to_metadata_key(key)} = {deformat(self[key])}" for key in self])
305
-
306
- def to_txt_file(self, txt_file):
307
- """Write to file in metadata.txt format"""
308
- from pathlib import Path
309
- path = Path(txt_file).expanduser().resolve().with_suffix(".txt")
310
- path.mkdir(parents=True, exist_ok=True)
311
- with path.open("w") as f:
312
- f.write(self.to_txt() + "\n")
313
-
314
- def __init__(self, *args, **kwargs):
315
- """Initialize the OrderedDict, converting all keys to valid identifiers
316
-
317
- Note that the constructor is not generally useful from outside this class. See
318
- Metadata.from_file, etc., for more useful initialization functions.
319
-
320
- This function intercepts the allowed args and kwargs and converts any keys
321
- before simply calling the base class's initialization function.
322
-
323
- """
324
- import collections
325
- if len(args) > 0:
326
- args = list(args)
327
- if isinstance(args[0], collections.abc.Mapping):
328
- mapping = args[0]
329
- args[0] = collections.OrderedDict([(_valid_identifier(key), mapping[key]) for key in mapping])
330
- else:
331
- iterable = args[0]
332
- args[0] = [(_valid_identifier(k), v) for k, v in iterable]
333
- if len(kwargs) > 0:
334
- for key in list(kwargs):
335
- kwargs[_valid_identifier(key)] = kwargs.pop(key)
336
- super(Metadata, self).__init__(*args, **kwargs)
337
-
338
- def add_com_parameters(self, first_only=True, raise_on_errors=False):
339
- """Add any translation and boost parameters found in all CoM files in this directory
340
-
341
- Adds a new key `com_parameters` to the top level of the metadata dictionary
342
- containing the `space_translation` and `boost_velocity` parameters for the COM
343
- correction.
344
-
345
- Parameters
346
- ----------
347
- first_only : bool, optional
348
- If True, add the first set of parameters directly under the top-level key
349
- `com_parameters`; otherwise, add separate entries under that key for each
350
- file ending in `_CoM.h5`.
351
- raise_on_errors : bool, optional
352
- If False, suppress any exceptions that happen in the core loop of this
353
- function; otherwise, raise.
354
-
355
- """
356
- import os.path
357
- import glob
358
- import h5py
359
- import re
360
- import ast
361
-
362
- path = os.path.dirname(self.get("metadata_path", "."))
363
- com_parameters = self.get("com_parameters", {})
364
- for file_name in reversed(sorted(glob.glob(os.path.join(path, "*_CoM.h5")))):
365
- try:
366
- with h5py.File(file_name, "r") as f:
367
- for g in f:
368
- g_parameters = {}
369
- if hasattr(f[g], "attrs") and "space_translation" in f[g].attrs:
370
- g_parameters["space_translation"] = list(f[g].attrs["space_translation"])
371
- if hasattr(f[g], "attrs") and "boost_velocity" in f[g].attrs:
372
- g_parameters["boost_velocity"] = list(f[g].attrs["boost_velocity"])
373
- if "History.txt" in f[g]:
374
- history = f[g]["History.txt"][()]
375
- if hasattr(history, "decode"):
376
- history = history.decode()
377
- for parameter_name in ["boost_velocity", "space_translation"]:
378
- if parameter_name not in g_parameters:
379
- pattern = rf'"{parameter_name}": array\((.*?)\)'
380
- matches = re.search(pattern, history)
381
- if matches:
382
- g_parameters[parameter_name] = ast.literal_eval(matches.group(1))
383
- if first_only and "space_translation" in g_parameters and "boost_velocity" in g_parameters:
384
- self["com_parameters"] = g_parameters
385
- return self
386
- if g_parameters:
387
- com_parameters["{0}/{1}".format(os.path.basename(file_name), g)] = g_parameters
388
- except Exception:
389
- if raise_on_errors:
390
- raise
391
- if com_parameters:
392
- self["com_parameters"] = com_parameters
393
- return self
394
-
395
- def add_standard_parameters(self, raise_on_errors=False):
396
- """Add standard parameters that aren't included in the default metadata fields
397
-
398
- New parameters include 'object_types', 'initial_mass_ratio', and
399
- 'reference_mass_ratio'. If 'reference_dimensionless_spin*' are not present,
400
- but the parameters necessary to compute them are, they are also added.
401
- Finally, we also add 'reference_chi_eff', 'reference_chi1_perp', and
402
- 'reference_chi2_perp'.
403
-
404
- """
405
- import math
406
- import numpy as np
407
-
408
- def stringify_nan(number):
409
- if math.isnan(number):
410
- return "NaN"
411
- else:
412
- return number
413
-
414
- def stringify_nans(array):
415
- return [stringify_nan(number) for number in array]
416
-
417
- if "object1" in self and "object2" in self:
418
- self["object_types"] = "".join(sorted([self["object1"].upper(), self["object2"].upper()]))
419
- if "initial_mass1" in self and "initial_mass2" in self:
420
- try:
421
- mass_ratio = float(self["initial_mass1"]) / float(self["initial_mass2"])
422
- self["initial_mass_ratio"] = stringify_nan(mass_ratio)
423
- except Exception:
424
- if raise_on_errors:
425
- raise
426
- if "reference_mass1" in self and "reference_mass2" in self:
427
- try:
428
- mass_ratio = float(self["reference_mass1"]) / float(self["reference_mass2"])
429
- self["reference_mass_ratio"] = stringify_nan(mass_ratio)
430
- except Exception:
431
- if raise_on_errors:
432
- raise
433
- if "reference_dimensionless_spin1" not in self:
434
- if "reference_spin1" in self and "reference_mass1" in self:
435
- try:
436
- self["reference_dimensionless_spin1"] = stringify_nans(
437
- np.array(self["reference_spin1"]) / self["reference_mass1"]**2
438
- )
439
- except Exception:
440
- if raise_on_errors:
441
- raise
442
- if "reference_dimensionless_spin2" not in self:
443
- if "reference_spin2" in self and "reference_mass2" in self:
444
- try:
445
- self["reference_dimensionless_spin2"] = stringify_nans(
446
- np.array(self["reference_spin2"]) / self["reference_mass2"]**2
447
- )
448
- except Exception:
449
- if raise_on_errors:
450
- raise
451
- if "initial_dimensionless_spin1" not in self:
452
- if "initial_spin1" in self and "initial_mass1" in self:
453
- try:
454
- self["initial_dimensionless_spin1"] = stringify_nans(
455
- np.array(self["initial_spin1"]) / self["initial_mass1"]**2
456
- )
457
- except Exception:
458
- if raise_on_errors:
459
- raise
460
- if "initial_dimensionless_spin2" not in self:
461
- if "initial_spin2" in self and "initial_mass2" in self:
462
- try:
463
- self["initial_dimensionless_spin2"] = stringify_nans(
464
- np.array(self["initial_spin2"]) / self["initial_mass2"]**2
465
- )
466
- except Exception:
467
- if raise_on_errors:
468
- raise
469
- if ("reference_mass1" in self and "reference_mass2" in self and "reference_orbital_frequency" in self
470
- and "reference_dimensionless_spin1" in self and "reference_dimensionless_spin2" in self):
471
- try:
472
- m1 = float(self["reference_mass1"])
473
- m2 = float(self["reference_mass2"])
474
- chi1 = np.array(self["reference_dimensionless_spin1"], dtype=float)
475
- chi2 = np.array(self["reference_dimensionless_spin2"], dtype=float)
476
- L = np.array(self["reference_orbital_frequency"], dtype=float)
477
- L /= np.linalg.norm(L)
478
- chi1L = np.dot(chi1, L)
479
- chi2L = np.dot(chi2, L)
480
- chi1perp = np.cross(chi1, L)
481
- chi2perp = np.cross(chi2, L)
482
- self["reference_chi_eff"] = stringify_nan((m1*chi1L+m2*chi2L)/(m1+m2))
483
- self["reference_chi1_perp"] = stringify_nan(np.linalg.norm(chi1perp))
484
- self["reference_chi2_perp"] = stringify_nan(np.linalg.norm(chi2perp))
485
- except Exception:
486
- if raise_on_errors:
487
- raise
488
- return self
489
-
490
- def add_extras(self, raise_on_errors=False):
491
- """Add information to the metadata from other files in its directory"""
492
- self.add_com_parameters(raise_on_errors=raise_on_errors)
493
- self.add_standard_parameters(raise_on_errors=raise_on_errors)
494
- return self
495
-
496
- def reorder_keys(self, order=None):
497
- """Return a copy of this object with keys reordered
498
-
499
- It is sometimes nice to reorder the keys of the metadata to display the most
500
- interesting quantities first. The usual order output by SpEC, for example,
501
- hides crucial quantities like the masses and spins after lots of uninteresting
502
- keys like the author list and various bibtex groups. This function allows the
503
- keys to be reordered using exact matches and regular expressions.
504
-
505
- """
506
- import re
507
- if order is None:
508
- order = [
509
- "url",
510
- "simulation_name",
511
- "alternative_names",
512
- "initial_data_type",
513
- "eos",
514
- "object_types",
515
- "number_of_orbits",
516
- "reference_mass_ratio",
517
- "reference_chi_eff",
518
- "reference_chi1_perp",
519
- "reference_chi2_perp",
520
- "reference_eccentricity",
521
- "reference_dimensionless_spin1",
522
- "reference_dimensionless_spin2",
523
- "reference_orbital_frequency",
524
- "reference_mass1",
525
- "reference_mass2",
526
- "reference.*",
527
- ]
528
- original_keys = list(self)
529
- new = type(self)()
530
- for ordered_key in order:
531
- if ordered_key in original_keys:
532
- new[ordered_key] = self[ordered_key]
533
- original_keys.remove(ordered_key)
534
- else:
535
- key_pattern = re.compile(ordered_key)
536
- for key in list(original_keys): # Iterate over a *copy* of the original_keys list
537
- if key_pattern.match(key):
538
- new[key] = self[key]
539
- original_keys.remove(key)
540
- for key in original_keys:
541
- new[key] = self[key]
542
- return new
543
-
544
- # @classmethod
545
- # def fromkeys(cls, iterable):
546
- # iterable = [(_valid_identifier(k), v) for k, v in iterable]
547
- # return super(Metadata, cls).fromkeys(iterable)
548
-
549
- @property
550
- def resolution(self):
551
- """Try to determine the resolution from the "simulation-name" field"""
552
- import os
553
- simulation_name = self["simulation_name"]
554
- last_slash_index = simulation_name.rindex(os.sep)
555
- return simulation_name[last_slash_index+1:]
556
-
557
- @property
558
- def lev(self):
559
- """Try to determine an integer "Lev" number from the "simulation-name" field"""
560
- resolution = self.resolution
561
- return int(resolution.replace("Lev", ""))
562
-
563
- @property
564
- def simulation_group(self):
565
- """Remove any trailing "/LevN" part of the simulation-name"""
566
- import os
567
- simulation_name = self["simulation_name"]
568
- last_slash_index = simulation_name.rindex(os.sep + "Lev")
569
- if last_slash_index < 1:
570
- last_slash_index = len(simulation_name)
571
- return simulation_name[:last_slash_index]
572
-
573
- ###############################################################################
574
- ###############################################################################
575
- # #
576
- # The following methods mirror equivalent methods in OrderedDict, but also #
577
- # ensure that any keys are converted to valid identifiers first. This #
578
- # enables this object to be used more like the original metadata.txt format. #
579
- # #
580
- ###############################################################################
581
- ###############################################################################
582
-
583
- def __contains__(self, key):
584
- return super(Metadata, self).__contains__(_valid_identifier(key))
585
-
586
- def __delattr__(self, name):
587
- super(Metadata, self).__delattr__(_valid_identifier(name))
588
-
589
- def __delitem__(self, key):
590
- super(Metadata, self).__delitem__(_valid_identifier(key))
591
-
592
- def __getattribute__(self, name):
593
- """Include keys as attributes
594
-
595
- This allows retrieval of a key like `md["simulation_name"]` as `md.simulation_name`.
596
-
597
- """
598
- try:
599
- return super(Metadata, self).__getattribute__(name)
600
- except AttributeError as e:
601
- try:
602
- return self[_valid_identifier(name)]
603
- except KeyError:
604
- raise e
605
-
606
- def __dir__(self):
607
- """Ensure that the keys are included in tab completion"""
608
- return list(sorted(set(super(Metadata, self).__dir__()))) + list(self.keys())
609
-
610
- def __getitem__(self, key):
611
- return super(Metadata, self).__getitem__(_valid_identifier(key))
612
-
613
- def __setattr__(self, name, value):
614
- name = _valid_identifier(name)
615
- super(Metadata, self).__setattr__(name, value)
616
-
617
- def __setitem__(self, key, value):
618
- super(Metadata, self).__setitem__(_valid_identifier(key), value)
619
-
620
- def get(self, key, default=None):
621
- return super(Metadata, self).get(_valid_identifier(key), default)
622
-
623
- def pop(self, key, default=None):
624
- return super(Metadata, self).pop(_valid_identifier(key), default)
625
-
626
- def setdefault(self, key, default=None):
627
- return super(Metadata, self).setdefault(_valid_identifier(key), default)
628
-
629
- def update(self, mapping_or_iterable=None, /, **kwargs):
630
- # This should be just the same as the collections.OrderedDict.update
631
- # method, except that identifiers passed in via mapping_or_iterable
632
- # have to be converted to valid identifiers first.
633
- if mapping_or_iterable is not None:
634
- if hasattr(mapping_or_iterable, "keys") and callable(mapping_or_iterable.keys):
635
- for k in mapping_or_iterable:
636
- self[_valid_identifier(k)] = mapping_or_iterable[k]
637
- else:
638
- for k, v in mapping_or_iterable:
639
- self[_valid_identifier(k)] = v
640
- for k in kwargs:
641
- self[k] = kwargs[k]
2
+ from sxscatalog.metadata.metadata import *