pymetadata 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pymetadata might be problematic. Click here for more details.

Files changed (42) hide show
  1. pymetadata/__init__.py +14 -0
  2. pymetadata/cache.py +52 -0
  3. pymetadata/chebi.py +92 -0
  4. pymetadata/console.py +18 -0
  5. pymetadata/core/__init__.py +1 -0
  6. pymetadata/core/annotation.py +396 -0
  7. pymetadata/core/creator.py +46 -0
  8. pymetadata/core/synonym.py +12 -0
  9. pymetadata/core/xref.py +66 -0
  10. pymetadata/examples/__init__.py +1 -0
  11. pymetadata/examples/cache_path_example.py +15 -0
  12. pymetadata/examples/omex_example.py +46 -0
  13. pymetadata/examples/results/test_from_files.omex +0 -0
  14. pymetadata/examples/results/test_from_omex.omex +0 -0
  15. pymetadata/examples/results/testomex/README.md +3 -0
  16. pymetadata/examples/results/testomex/manifest.xml +9 -0
  17. pymetadata/examples/results/testomex/models/omex_comp.xml +174 -0
  18. pymetadata/examples/results/testomex/models/omex_comp_flat.xml +215 -0
  19. pymetadata/examples/results/testomex/models/omex_minimal.xml +99 -0
  20. pymetadata/examples/test.omex +0 -0
  21. pymetadata/identifiers/__init__.py +1 -0
  22. pymetadata/identifiers/miriam.py +43 -0
  23. pymetadata/identifiers/registry.py +397 -0
  24. pymetadata/log.py +29 -0
  25. pymetadata/metadata/__init__.py +6 -0
  26. pymetadata/metadata/eco.py +15918 -0
  27. pymetadata/metadata/kisao.py +2731 -0
  28. pymetadata/metadata/sbo.py +3754 -0
  29. pymetadata/omex.py +771 -0
  30. pymetadata/omex_v2.py +30 -0
  31. pymetadata/ontologies/__init__.py +1 -0
  32. pymetadata/ontologies/ols.py +214 -0
  33. pymetadata/ontologies/ontology.py +312 -0
  34. pymetadata/py.typed +0 -0
  35. pymetadata/resources/chebi_webservice_wsdl.xml +509 -0
  36. pymetadata/resources/ontologies/README.md +4 -0
  37. pymetadata/resources/templates/ontology_enum.pytemplate +61 -0
  38. pymetadata/unichem.py +190 -0
  39. pymetadata-0.5.0.dist-info/METADATA +154 -0
  40. pymetadata-0.5.0.dist-info/RECORD +42 -0
  41. pymetadata-0.5.0.dist-info/WHEEL +4 -0
  42. pymetadata-0.5.0.dist-info/licenses/LICENSE +7 -0
pymetadata/__init__.py ADDED
@@ -0,0 +1,14 @@
1
+ """pymetadata - Python utilities for metadata."""
2
+
3
+ from pathlib import Path
4
+
5
+ __author__ = "Matthias Koenig"
6
+ __version__ = "0.5.0"
7
+
8
+
9
+ program_name: str = "pymetadata"
10
+ RESOURCES_DIR: Path = Path(__file__).parent / "resources"
11
+ ENUM_DIR: Path = Path(__file__).parent / "metadata"
12
+
13
+ CACHE_USE: bool = False
14
+ CACHE_PATH: Path = Path.home() / ".cache" / "pymetadata"
pymetadata/cache.py ADDED
@@ -0,0 +1,52 @@
1
+ """Caching of information."""
2
+
3
+ import json
4
+ from json.encoder import JSONEncoder
5
+ from pathlib import Path
6
+ from typing import Any, Dict, Optional, Type
7
+
8
+ from pymetadata import log
9
+
10
+
11
+ logger = log.get_logger(__name__)
12
+
13
+
14
+ class DataclassJSONEncoder(JSONEncoder):
15
+ """JSON serialization of dataclasses."""
16
+
17
+ def default(self, o: Any) -> Any:
18
+ """Serialize to JSON."""
19
+ return o.__dict__
20
+
21
+
22
+ def read_json_cache(cache_path: Path) -> Dict:
23
+ """Read JSON cache file.
24
+
25
+ :param cache_path:
26
+ :return: Dictionary with content or None if cache file does not exist.
27
+ """
28
+ if cache_path.exists():
29
+ with open(cache_path) as fp:
30
+ logger.debug(f"Read cache: {cache_path}")
31
+ return json.load(fp) # type: ignore
32
+
33
+ raise IOError(f"Cache path does not exist: '{cache_path}'")
34
+
35
+
36
+ def write_json_cache(
37
+ data: Dict, cache_path: Path, json_encoder: Optional[Type[JSONEncoder]] = None
38
+ ) -> None:
39
+ """Write JSON cache file.
40
+
41
+ :param data: data to serialize
42
+ :param cache_path: path for the cache file
43
+ :param json_encoder: optional JSON encoder
44
+ :return:
45
+ """
46
+ cache_path.parent.mkdir(parents=True, exist_ok=True)
47
+ with open(cache_path, "w") as fp:
48
+ logger.info(f"Write cache: {cache_path}")
49
+ if json_encoder:
50
+ json.dump(data, fp=fp, indent=2, cls=json_encoder)
51
+ else:
52
+ json.dump(data, fp=fp, indent=2)
pymetadata/chebi.py ADDED
@@ -0,0 +1,92 @@
1
+ """Module for working with chebi."""
2
+
3
+ from pathlib import Path
4
+ from pprint import pprint
5
+ from typing import Any, Dict, Optional
6
+
7
+ from zeep import Client
8
+
9
+
10
+ import pymetadata
11
+ from pymetadata import log
12
+ from pymetadata.cache import DataclassJSONEncoder, read_json_cache, write_json_cache
13
+
14
+ logger = log.get_logger(__name__)
15
+
16
+ # FIXME: copy the file to the cache dir
17
+ client = Client(str(pymetadata.RESOURCES_DIR / "chebi_webservice_wsdl.xml"))
18
+
19
+
20
+ class ChebiQuery:
21
+ """Class to query information from ChEBI.
22
+
23
+ An overview over available methods:
24
+ python -mzeep https://www.ebi.ac.uk/webservices/chebi/2.0/webservice?wsdl
25
+ """
26
+
27
+ @staticmethod
28
+ def query(
29
+ chebi: str, cache: Optional[bool] = None, cache_path: Optional[Path] = None
30
+ ) -> Dict:
31
+ """Query additional ChEBI information."""
32
+
33
+ if not chebi:
34
+ return dict()
35
+ if cache is None:
36
+ cache = pymetadata.CACHE_USE
37
+ if cache_path is None:
38
+ cache_path = pymetadata.CACHE_PATH
39
+
40
+ # caching
41
+ chebi_base_path = Path(cache_path) / "chebi"
42
+ if not chebi_base_path.exists():
43
+ chebi_base_path.mkdir(parents=True)
44
+
45
+ chebi_path = chebi_base_path / f"{chebi.replace(':', '%3A')}.json"
46
+ data: Dict[str, Any] = {}
47
+ if cache:
48
+ try:
49
+ data = read_json_cache(cache_path=chebi_path)
50
+ except IOError:
51
+ pass
52
+
53
+ # fetch and cache data
54
+ if not data:
55
+ try:
56
+ result = client.service.getCompleteEntity(chebi)
57
+ # print(result)
58
+ except Exception:
59
+ logger.error(f"CHEBI information could not be retrieved for: {chebi}")
60
+ return dict()
61
+
62
+ # parse formula
63
+ formula = None
64
+ formulae = result["Formulae"]
65
+ if formulae:
66
+ formula = formulae[0]["data"]
67
+
68
+ data = {
69
+ "chebi": chebi,
70
+ "name": result["chebiAsciiName"],
71
+ "definition": result["definition"],
72
+ "formula": formula,
73
+ "charge": result["charge"],
74
+ "mass": result["mass"],
75
+ "inchikey": result["inchiKey"],
76
+ }
77
+
78
+ logger.info(f"Write chebi: {chebi_path}")
79
+ write_json_cache(
80
+ data=data, cache_path=chebi_path, json_encoder=DataclassJSONEncoder
81
+ )
82
+
83
+ return data
84
+
85
+
86
+ if __name__ == "__main__":
87
+ chebis = ["CHEBI:2668", "CHEBI:138366", "CHEBI:9637", "CHEBI:155897"]
88
+ for chebi in chebis:
89
+ print(chebi)
90
+ d = ChebiQuery.query(chebi=chebi, cache=False)
91
+ pprint(d)
92
+ d = ChebiQuery.query(chebi=chebi, cache=True)
pymetadata/console.py ADDED
@@ -0,0 +1,18 @@
1
+ """Rich console for logging."""
2
+
3
+ from rich import pretty
4
+ from rich.console import Console
5
+ from rich.theme import Theme
6
+
7
+
8
+ pretty.install()
9
+ custom_theme = Theme(
10
+ {
11
+ "success": "green",
12
+ "info": "blue",
13
+ "warning": "orange3",
14
+ "error": "red",
15
+ }
16
+ )
17
+
18
+ console = Console(record=True, theme=custom_theme)
@@ -0,0 +1 @@
1
+ """Core data structures."""
@@ -0,0 +1,396 @@
1
+ """Annotation.
2
+
3
+ Core data structure to store annotations.
4
+ """
5
+
6
+ import re
7
+ import urllib
8
+ from pprint import pprint
9
+ from typing import Any, Dict, Final, List, Optional, Tuple, Union
10
+
11
+ import requests
12
+
13
+ from pymetadata import log
14
+ from pymetadata.core.xref import CrossReference, is_url
15
+ from pymetadata.identifiers.miriam import BQB, BQM
16
+ from pymetadata.identifiers.registry import REGISTRY
17
+ from pymetadata.ontologies.ols import ONTOLOGIES, OLSQuery
18
+
19
+
20
+ OLS_QUERY = OLSQuery(ontologies=ONTOLOGIES)
21
+
22
+ IDENTIFIERS_ORG_PREFIX: Final = "http://identifiers.org"
23
+ IDENTIFIERS_ORG_PATTERN1: Final = re.compile(r"^https?://identifiers.org/(.+?)/(.+)")
24
+ IDENTIFIERS_ORG_PATTERN2: Final = re.compile(r"^https?://identifiers.org/(.+)")
25
+ MIRIAM_URN_PATTERN: Final = re.compile(r"^urn:miriam:(.+)")
26
+
27
+ logger = log.get_logger(__name__)
28
+
29
+
30
+ class RDFAnnotation:
31
+ """RDFAnnotation class.
32
+
33
+ Basic storage of annotation information. This consists of the relation
34
+ and the the resource.
35
+ The annotations can be attached to other objects thereby forming
36
+ triples which can be converted to RDF.
37
+
38
+ Resource can be either:
39
+ - `http(s)://identifiers.org/collection/term`, i.e., a identifiers.org URI
40
+ - `collection/term`, i.e., the combination of collection and term
41
+ - `http(s)://arbitrary.url`, an arbitrary URL
42
+ - urn:miriam:uniprot:P03023
43
+ """
44
+
45
+ replaced_collections: Dict[str, str] = {
46
+ "obo.go": "go",
47
+ "biomodels.sbo": "sbo",
48
+ }
49
+
50
+ def __init__(self, qualifier: Union[BQB, BQM], resource: str):
51
+ """Initialize RDFAnnotation."""
52
+ self.qualifier: Union[BQB, BQM] = qualifier
53
+ self.collection: Optional[str] = None
54
+ self.term: Optional[str] = None
55
+ self.resource: str = resource
56
+
57
+ if not qualifier:
58
+ raise ValueError(
59
+ f"MIRIAM qualifiers are required for rdf annotation, but no "
60
+ f"qualifier for resource '{resource}' was provided."
61
+ )
62
+ if not resource:
63
+ raise ValueError(
64
+ f"resource is required for annotation, but resource is emtpy "
65
+ f"'{qualifier} {resource}'."
66
+ )
67
+ if not isinstance(resource, str):
68
+ raise ValueError(
69
+ f"resource must be string, but found '{resource} {type(resource)}'."
70
+ )
71
+
72
+ # handle urls
73
+ if resource.startswith("http"):
74
+ match1 = IDENTIFIERS_ORG_PATTERN1.match(resource)
75
+ if match1:
76
+ # handle identifiers.org pattern
77
+ self.collection, self.term = match1.group(1), match1.group(2)
78
+
79
+ if not self.collection:
80
+ # tests new short pattern
81
+ match2 = IDENTIFIERS_ORG_PATTERN2.match(resource)
82
+ if match2:
83
+ tokens = match2.group(1).split(":")
84
+ if len(tokens) == 2:
85
+ self.collection = tokens[0].lower()
86
+ self.term = match2.group(1)
87
+ else:
88
+ logger.warning(
89
+ f"Identifiers.org URL does not conform to new"
90
+ f"short pattern: {resource}"
91
+ )
92
+
93
+ if not self.collection:
94
+ # other urls are directly stored as resources without collection
95
+ self.collection = None
96
+ self.term = resource
97
+ logger.debug(
98
+ f"{resource} does not conform to "
99
+ f"http(s)://identifiers.org/collection/id or http(s)://identifiers.org/id",
100
+ )
101
+ elif resource.startswith("urn:miriam:"):
102
+ match3 = MIRIAM_URN_PATTERN.match(resource)
103
+ if match3:
104
+ tokens = match3.group(1).split(":")
105
+ self.collection = tokens[0]
106
+ self.term = ":".join(tokens[1:]).replace("%3A", ":")
107
+
108
+ logger.warning(
109
+ f"Deprecated urn pattern `{resource}` updated: "
110
+ f"{self.resource_normalized}"
111
+ )
112
+
113
+ else:
114
+ # handle short notation
115
+ tokens = resource.split("/")
116
+ if len(tokens) == 2:
117
+ self.collection = tokens[0]
118
+ self.term = "/".join(tokens[1:])
119
+ elif len(tokens) == 1 and ":" in tokens[0]:
120
+ self.collection = tokens[0].split(":")[0].lower()
121
+ self.term = tokens[0]
122
+
123
+ if len(tokens) < 2 and not self.collection:
124
+ logger.error(
125
+ f"Resource `{resource}` could not be split in collection and term. "
126
+ f"A given resource must be of the form "
127
+ f"`collection/term` or an url starting with "
128
+ f"`http(s)://`)"
129
+ )
130
+ self.collection = None
131
+ self.term = resource
132
+
133
+ # clean legacy collections
134
+ if self.collection in self.replaced_collections:
135
+ self.collection = self.replaced_collections[self.collection]
136
+
137
+ self.validate()
138
+
139
+ @staticmethod
140
+ def from_tuple(t: Tuple[Union[BQB, BQM], str]) -> "RDFAnnotation":
141
+ """Construct from tuple."""
142
+ qualifier, resource = t[0], t[1]
143
+ return RDFAnnotation(qualifier=qualifier, resource=resource)
144
+
145
+ @property
146
+ def resource_normalized(self) -> Optional[str]:
147
+ """Normalize resource for given annotation.
148
+
149
+ This is the correct usage.
150
+ """
151
+ if not self.term:
152
+ return None
153
+
154
+ if self.collection is not None:
155
+ if self.term.startswith(f"{self.collection.upper()}:"):
156
+ return f"{IDENTIFIERS_ORG_PREFIX}/{self.term}"
157
+ else:
158
+ return f"{IDENTIFIERS_ORG_PREFIX}/{self.collection}/{self.term}"
159
+ else:
160
+ return self.term
161
+
162
+ def __repr__(self) -> str:
163
+ """Get representation string."""
164
+ return f"RDFAnnotation({self.qualifier}|{self.collection}|{self.term})"
165
+
166
+ def to_dict(self) -> Dict:
167
+ """Convert to dict."""
168
+ return {
169
+ "qualifier": self.qualifier.value, # FIXME use enums!
170
+ "collection": self.collection,
171
+ "term": self.term,
172
+ }
173
+
174
+ @staticmethod
175
+ def check_term(collection: str, term: str) -> bool:
176
+ """Check that term follows id pattern for collection.
177
+
178
+ Uses the Identifiers collection information.
179
+ """
180
+ namespace = REGISTRY.ns_dict.get(collection, None)
181
+ if not namespace:
182
+ logger.error(
183
+ f"MIRIAM collection `{collection}` does not exist for term `{term}`"
184
+ )
185
+ return False
186
+
187
+ p = re.compile(namespace.pattern)
188
+ m = p.match(term)
189
+ if not m:
190
+ logger.error(
191
+ f"Term `{term}` did not match pattern "
192
+ f"`{namespace.pattern}` for collection `{collection}`."
193
+ )
194
+ return False
195
+
196
+ return True
197
+
198
+ @staticmethod
199
+ def check_qualifier(qualifier: Union[BQB, BQM]) -> None:
200
+ """Check that the qualifier is an allowed qualifier.
201
+
202
+ :param qualifier:
203
+ :return:
204
+ """
205
+ if not isinstance(qualifier, (BQB, BQM)):
206
+ supported_qualifiers = [e.value for e in BQB] + [e.value for e in BQM]
207
+
208
+ raise ValueError(
209
+ f"qualifier `{qualifier}` is not in supported qualifiers: "
210
+ f"`{supported_qualifiers}`"
211
+ )
212
+
213
+ def validate(self) -> None:
214
+ """Validate annotation."""
215
+ if self.qualifier:
216
+ self.check_qualifier(self.qualifier)
217
+ if self.collection and self.term:
218
+ self.check_term(collection=self.collection, term=self.term)
219
+
220
+
221
+ class RDFAnnotationData(RDFAnnotation):
222
+ """Annotation with resolved information.
223
+
224
+ queries for the resource should happen here;
225
+ this resolves additional information.
226
+ """
227
+
228
+ def __init__(self, annotation: RDFAnnotation):
229
+ """Initialize RDFAnnotationData."""
230
+ self.resource = annotation.resource
231
+ self.qualifier = annotation.qualifier
232
+ self.collection = annotation.collection
233
+ self.term: Optional[str] = annotation.term
234
+ self.url: Optional[str] = None
235
+ self.description: Optional[str] = None
236
+ self.label: Optional[str] = None
237
+ self.synonyms: List = []
238
+ self.xrefs: List = []
239
+ self.warnings: List = []
240
+ self.errors: List = []
241
+
242
+ if self.collection:
243
+ # register MIRIAM xrefs
244
+ namespace = REGISTRY.ns_dict.get(self.collection, None)
245
+ if not namespace:
246
+ raise ValueError(
247
+ f"Namespace does not exist in dict for: `{self.collection}`"
248
+ )
249
+
250
+ namespace_embedded = namespace.namespaceEmbeddedInLui
251
+
252
+ if not namespace.resources:
253
+ namespace.resources = []
254
+
255
+ for ns_resource in namespace.resources:
256
+ # create url
257
+ url = ns_resource.urlPattern
258
+
259
+ if not self.term:
260
+ continue
261
+
262
+ term = self.term
263
+
264
+ # remove prefix
265
+ if namespace_embedded and namespace.prefix:
266
+ term = term[len(namespace.prefix) + 1 :]
267
+
268
+ # urlencode term
269
+ term = urllib.parse.quote(term)
270
+
271
+ # create url
272
+ url = url.replace("{$Id}", term)
273
+ url = url.replace("{$id}", term)
274
+ if namespace.prefix:
275
+ url = url.replace(
276
+ f"{namespace.prefix.upper}:",
277
+ urllib.parse.quote(f"{namespace.prefix.upper}:"),
278
+ )
279
+
280
+ if not self.url:
281
+ # set url to first resource url
282
+ self.url = url
283
+
284
+ # print(url)
285
+ _xref = CrossReference(
286
+ name=ns_resource.name, accession=self.term, url=url
287
+ )
288
+ valid = _xref.validate() and is_url(self.url) # type: ignore
289
+ if valid:
290
+ self.xrefs.append(_xref)
291
+
292
+ # query OLS information
293
+ self.query_ols()
294
+
295
+ def __repr__(self) -> str:
296
+ """Get representation string."""
297
+ return f"RDFAnnotationData({self.collection}|{self.term}|{self.label}|{self.description}|{self.synonyms}|{self.xrefs})"
298
+
299
+ def to_dict(self) -> Dict[str, Any]:
300
+ """Convert to dict."""
301
+
302
+ return {
303
+ "resource": self.resource,
304
+ "resource_normalized": self.resource_normalized,
305
+ # "qualifier": self.qualifier.value,
306
+ "collection": self.collection,
307
+ "term": self.term,
308
+ "label": self.label,
309
+ "description": self.description,
310
+ "url": self.url,
311
+ "synonyms": self.synonyms,
312
+ "xrefs": self.xrefs,
313
+ "errors": self.errors,
314
+ "warnings": self.warnings,
315
+ }
316
+
317
+ def query_ols(self) -> Dict:
318
+ """Query ontology lookup service."""
319
+ try:
320
+ d = OLS_QUERY.query_ols(ontology=self.collection, term=self.term)
321
+ except requests.HTTPError as err:
322
+ logger.error(err)
323
+ d = {
324
+ "errors": [err],
325
+ "warnings": [],
326
+ }
327
+
328
+ info = OLS_QUERY.process_response(d)
329
+
330
+ if self.label is None:
331
+ self.label = info["label"]
332
+
333
+ if self.description is None:
334
+ self.description = info["description"]
335
+
336
+ self.synonyms = info["synonyms"]
337
+ self.xrefs = info["xrefs"]
338
+ self.warnings.extend(info["warnings"])
339
+ self.errors.extend(info["errors"])
340
+
341
+ return info
342
+
343
+
344
+ if __name__ == "__main__":
345
+ for annotation in [
346
+ # FIXME: support this
347
+ RDFAnnotation(
348
+ qualifier=BQB.IS_VERSION_OF,
349
+ resource="NCIT:C75913",
350
+ ),
351
+ RDFAnnotation(
352
+ qualifier=BQB.IS_VERSION_OF,
353
+ resource="taxonomy/562",
354
+ ),
355
+ RDFAnnotation(
356
+ qualifier=BQB.IS_VERSION_OF,
357
+ resource="http://identifiers.org/taxonomy/9606",
358
+ ),
359
+ RDFAnnotation(
360
+ qualifier=BQB.IS_VERSION_OF,
361
+ resource="http://identifiers.org/biomodels.sbo/SBO:0000247",
362
+ ),
363
+ RDFAnnotation(
364
+ qualifier=BQB.IS_VERSION_OF, resource="urn:miriam:obo.go:GO%3A0005623"
365
+ ),
366
+ RDFAnnotation(
367
+ qualifier=BQB.IS_VERSION_OF, resource="urn:miriam:chebi:CHEBI%3A33699"
368
+ ),
369
+ RDFAnnotation(qualifier=BQB.IS_VERSION_OF, resource="chebi/CHEBI:456215"),
370
+ RDFAnnotation(
371
+ qualifier=BQB.IS, resource="https://en.wikipedia.org/wiki/Cytosol"
372
+ ),
373
+ RDFAnnotation(
374
+ qualifier=BQB.IS_VERSION_OF, resource="urn:miriam:uniprot:P03023"
375
+ ),
376
+ RDFAnnotation(
377
+ qualifier=BQB.IS_VERSION_OF,
378
+ resource="http://identifiers.org/go/GO:0005829",
379
+ ),
380
+ RDFAnnotation(
381
+ qualifier=BQB.IS_VERSION_OF, resource="http://identifiers.org/go/GO:0005829"
382
+ ),
383
+ RDFAnnotation(
384
+ qualifier=BQB.IS_VERSION_OF, resource="http://identifiers.org/GO:0005829"
385
+ ),
386
+ RDFAnnotation(
387
+ qualifier=BQB.IS_VERSION_OF, resource="http://identifiers.org/GO:0005829"
388
+ ),
389
+ RDFAnnotation(qualifier=BQB.IS_VERSION_OF, resource="bto/BTO:0000089"),
390
+ RDFAnnotation(qualifier=BQB.IS_VERSION_OF, resource="BTO:0000089"),
391
+ RDFAnnotation(qualifier=BQB.IS_VERSION_OF, resource="chebi/CHEBI:000012"),
392
+ ]:
393
+ print("-" * 80)
394
+ data = RDFAnnotationData(annotation)
395
+ print(data)
396
+ pprint(data.to_dict())
@@ -0,0 +1,46 @@
1
+ """Creator information."""
2
+
3
+ from typing import Optional
4
+
5
+
6
+ class Creator:
7
+ """Creator in ModelHistory and other COMBINE formats."""
8
+
9
+ def __init__(
10
+ self,
11
+ familyName: str,
12
+ givenName: str,
13
+ email: str,
14
+ organization: str,
15
+ site: Optional[str] = None,
16
+ orcid: Optional[str] = None,
17
+ ):
18
+ """Initialize Creator."""
19
+ self.familyName = familyName
20
+ self.givenName = givenName
21
+ self.email = email
22
+ self.organization = organization
23
+ self.site = site
24
+ self.orcid = orcid
25
+
26
+ def __str__(self) -> str:
27
+ """Get string representation."""
28
+ return f"{self.familyName} {self.givenName} ({self.email}, {self.organization}, {self.site}, {self.orcid})"
29
+
30
+ def __hash__(self) -> int:
31
+ """Get hash."""
32
+ return hash(str(self))
33
+
34
+ def __eq__(self, other: object) -> bool:
35
+ """Check for equality."""
36
+ if not isinstance(other, Creator):
37
+ return NotImplemented
38
+
39
+ return (
40
+ self.familyName == other.familyName
41
+ and self.givenName == other.givenName
42
+ and self.email == other.email
43
+ and self.organization == other.organization
44
+ and self.site == other.site
45
+ and self.orcid == other.orcid
46
+ )
@@ -0,0 +1,12 @@
1
+ """Synonym information."""
2
+
3
+ from dataclasses import dataclass
4
+
5
+
6
+ @dataclass
7
+ class Synonym:
8
+ """Synonyms."""
9
+
10
+ name: str
11
+ type: str
12
+ source: str