biocypher 0.9.1__py3-none-any.whl → 0.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of biocypher might be problematic. Click here for more details.

biocypher/_get.py CHANGED
@@ -1,5 +1,6 @@
1
- """
2
- BioCypher get module. Used to download and cache data from external sources.
1
+ """BioCypher get module.
2
+
3
+ Used to download and cache data from external sources.
3
4
  """
4
5
 
5
6
  from __future__ import annotations
@@ -30,19 +31,22 @@ class Resource(ABC):
30
31
  url_s: str | list[str],
31
32
  lifetime: int = 0,
32
33
  ):
33
- """
34
+ """Initialize a Resource.
35
+
34
36
  A Resource is a file, a list of files, an API request, or a list of API
35
37
  requests, any of which can be downloaded from the given URL(s) and
36
38
  cached locally. This class implements checks of the minimum requirements
37
39
  for a resource, to be implemented by a biocypher adapter.
38
40
 
39
41
  Args:
42
+ ----
40
43
  name (str): The name of the resource.
41
44
 
42
45
  url_s (str | list[str]): The URL or URLs of the resource.
43
46
 
44
47
  lifetime (int): The lifetime of the resource in days. If 0, the
45
48
  resource is considered to be permanent.
49
+
46
50
  """
47
51
  self.name = name
48
52
  self.url_s = url_s
@@ -57,10 +61,12 @@ class FileDownload(Resource):
57
61
  lifetime: int = 0,
58
62
  is_dir: bool = False,
59
63
  ):
60
- """
64
+ """Initialize a FileDownload object.
65
+
61
66
  Represents basic information for a File Download.
62
67
 
63
68
  Args:
69
+ ----
64
70
  name(str): The name of the File Download.
65
71
 
66
72
  url_s(str|list[str]): The URL(s) of the File Download.
@@ -69,18 +75,20 @@ class FileDownload(Resource):
69
75
  File Download is cached indefinitely.
70
76
 
71
77
  is_dir (bool): Whether the URL points to a directory or not.
72
- """
73
78
 
79
+ """
74
80
  super().__init__(name, url_s, lifetime)
75
81
  self.is_dir = is_dir
76
82
 
77
83
 
78
84
  class APIRequest(Resource):
79
85
  def __init__(self, name: str, url_s: str | list[str], lifetime: int = 0):
80
- """
86
+ """Initialize an APIRequest object.
87
+
81
88
  Represents basic information for an API Request.
82
89
 
83
90
  Args:
91
+ ----
84
92
  name(str): The name of the API Request.
85
93
 
86
94
  url_s(str|list): The URL of the API endpoint.
@@ -94,29 +102,35 @@ class APIRequest(Resource):
94
102
 
95
103
  class Downloader:
96
104
  def __init__(self, cache_dir: Optional[str] = None) -> None:
97
- """
105
+ """Initialize the Downloader.
106
+
98
107
  The Downloader is a class that manages resources that can be downloaded
99
108
  and cached locally. It manages the lifetime of downloaded resources by
100
109
  keeping a JSON record of the download date of each resource.
101
110
 
102
111
  Args:
112
+ ----
103
113
  cache_dir (str): The directory where the resources are cached. If
104
114
  not given, a temporary directory is created.
115
+
105
116
  """
106
117
  self.cache_dir = cache_dir or TemporaryDirectory().name
107
118
  self.cache_file = os.path.join(self.cache_dir, "cache.json")
108
119
  self.cache_dict = self._load_cache_dict()
109
120
 
110
121
  def download(self, *resources: Resource):
111
- """
112
- Download one or multiple resources. Load from cache if the resource is
113
- already downloaded and the cache is not expired.
122
+ """Download one or multiple resources.
123
+
124
+ Load from cache if the resource is already downloaded and the cache is
125
+ not expired.
114
126
 
115
127
  Args:
128
+ ----
116
129
  resources (Resource): The resource(s) to download or load from
117
130
  cache.
118
131
 
119
132
  Returns:
133
+ -------
120
134
  list[str]: The path or paths to the resource(s) that were downloaded
121
135
  or loaded from cache.
122
136
 
@@ -132,12 +146,14 @@ class Downloader:
132
146
  return paths
133
147
 
134
148
  def _download_or_cache(self, resource: Resource, cache: bool = True):
135
- """
136
- Download a resource if it is not cached or exceeded its lifetime.
149
+ """Download a resource if it is not cached or exceeded its lifetime.
137
150
 
138
151
  Args:
152
+ ----
139
153
  resource (Resource): The resource to download.
154
+
140
155
  Returns:
156
+ -------
141
157
  list[str]: The path or paths to the downloaded resource(s).
142
158
 
143
159
  """
@@ -159,14 +175,16 @@ class Downloader:
159
175
  return paths
160
176
 
161
177
  def _is_cache_expired(self, resource: Resource) -> bool:
162
- """
163
- Check if resource or API request cache is expired.
178
+ """Check if resource or API request cache is expired.
164
179
 
165
180
  Args:
181
+ ----
166
182
  resource (Resource): The resource to download.
167
183
 
168
184
  Returns:
185
+ -------
169
186
  bool: cache is expired or not.
187
+
170
188
  """
171
189
  cache_record = self._get_cache_record(resource)
172
190
  if cache_record:
@@ -182,17 +200,21 @@ class Downloader:
182
200
  if os.path.exists(cache_resource_path) and os.path.isdir(cache_resource_path):
183
201
  shutil.rmtree(cache_resource_path)
184
202
 
185
- def _download_files(self, cache, file_download: FileDownload):
186
- """
187
- Download a resource given it is a file or a directory and return the
188
- path.
203
+ def _download_files(self, cache, file_download: FileDownload) -> list[str]:
204
+ """Download a resource given it is a file or a directory.
205
+
206
+ Upon downloading, return the path(s).
189
207
 
190
208
  Args:
209
+ ----
191
210
  cache (bool): Whether to cache the resource or not.
211
+
192
212
  file_download (FileDownload): The resource to download.
193
213
 
194
214
  Returns:
215
+ -------
195
216
  list[str]: The path or paths to the downloaded resource(s).
217
+
196
218
  """
197
219
  if file_download.is_dir:
198
220
  files = self._get_files(file_download)
@@ -202,7 +224,7 @@ class Downloader:
202
224
  elif isinstance(file_download.url_s, list):
203
225
  paths = []
204
226
  for url in file_download.url_s:
205
- fname = url[url.rfind("/") + 1 :].split("?")[0]
227
+ fname = self._trim_filename(url)
206
228
  path = self._retrieve(
207
229
  url=url,
208
230
  fname=fname,
@@ -211,7 +233,7 @@ class Downloader:
211
233
  paths.append(path)
212
234
  else:
213
235
  paths = []
214
- fname = file_download.url_s[file_download.url_s.rfind("/") + 1 :].split("?")[0]
236
+ fname = self._trim_filename(file_download.url_s)
215
237
  results = self._retrieve(
216
238
  url=file_download.url_s,
217
239
  fname=fname,
@@ -227,20 +249,23 @@ class Downloader:
227
249
  # adapter
228
250
  return paths
229
251
 
230
- def _download_api_request(self, api_request: APIRequest):
231
- """
232
- Download an API request and return the path.
252
+ def _download_api_request(self, api_request: APIRequest) -> list[str]:
253
+ """Download an API request and return the path.
233
254
 
234
255
  Args:
235
- api_request(APIRequest): The API request result that is being cached.
256
+ ----
257
+ api_request(APIRequest): The API request result that is being
258
+ cached.
259
+
236
260
  Returns:
261
+ -------
237
262
  list[str]: The path to the cached API request.
238
263
 
239
264
  """
240
265
  urls = api_request.url_s if isinstance(api_request.url_s, list) else [api_request.url_s]
241
266
  paths = []
242
267
  for url in urls:
243
- fname = url[url.rfind("/") + 1 :].rsplit(".", 1)[0]
268
+ fname = self._trim_filename(url)
244
269
  logger.info(f"Asking for caching API of {api_request.name} {fname}.")
245
270
  response = requests.get(url=url)
246
271
 
@@ -260,10 +285,13 @@ class Downloader:
260
285
  """Get the cached version of a resource.
261
286
 
262
287
  Args:
288
+ ----
263
289
  resource(Resource): The resource to get the cached version of.
264
290
 
265
291
  Returns:
292
+ -------
266
293
  list[str]: The paths to the cached resource(s).
294
+
267
295
  """
268
296
  cached_location = os.path.join(self.cache_dir, resource.name)
269
297
  logger.info(f"Use cached version from {cached_location}.")
@@ -278,17 +306,25 @@ class Downloader:
278
306
  fname: str,
279
307
  path: str,
280
308
  known_hash: str = None,
281
- ):
282
- """
283
- Retrieve a file from a URL using Pooch. Infer type of file from
284
- extension and use appropriate processor.
309
+ ) -> str:
310
+ """Retrieve a file from a URL using Pooch.
311
+
312
+ Infer type of file from extension and use appropriate processor.
285
313
 
286
314
  Args:
315
+ ----
287
316
  url (str): The URL to retrieve the file from.
288
317
 
289
318
  fname (str): The name of the file.
290
319
 
291
320
  path (str): The path to the file.
321
+
322
+ known_hash (str): The known hash of the file.
323
+
324
+ Returns:
325
+ -------
326
+ str: The path to the file.
327
+
292
328
  """
293
329
  if fname.endswith(".zip"):
294
330
  return pooch.retrieve(
@@ -329,15 +365,17 @@ class Downloader:
329
365
  progressbar=True,
330
366
  )
331
367
 
332
- def _get_files(self, file_download: FileDownload):
333
- """
334
- Get the files contained in a directory file.
368
+ def _get_files(self, file_download: FileDownload) -> list[str]:
369
+ """Get the files contained in a directory file.
335
370
 
336
371
  Args:
372
+ ----
337
373
  file_download (FileDownload): The directory file.
338
374
 
339
375
  Returns:
340
- list: The files contained in the directory.
376
+ -------
377
+ list[str]: The files contained in the directory.
378
+
341
379
  """
342
380
  if file_download.url_s.startswith("ftp://"):
343
381
  # remove protocol
@@ -353,14 +391,25 @@ class Downloader:
353
391
  files = ftp.nlst()
354
392
  ftp.quit()
355
393
  else:
356
- raise NotImplementedError("Only FTP directories are supported at the moment.")
394
+ msg = "Only FTP directories are supported at the moment."
395
+ logger.error(msg)
396
+ raise NotImplementedError(msg)
357
397
 
358
398
  return files
359
399
 
360
- def _load_cache_dict(self):
361
- """
362
- Load the cache dictionary from the cache file. Create an empty cache
363
- file if it does not exist.
400
+ def _load_cache_dict(self) -> dict:
401
+ """Load the cache dictionary from the cache file.
402
+
403
+ Create an empty cache file if it does not exist.
404
+
405
+ Args:
406
+ ----
407
+ None.
408
+
409
+ Returns:
410
+ -------
411
+ dict: The cache dictionary.
412
+
364
413
  """
365
414
  if not os.path.exists(self.cache_dir):
366
415
  logger.info(f"Creating cache directory {self.cache_dir}.")
@@ -375,24 +424,27 @@ class Downloader:
375
424
  logger.info(f"Loading cache file {self.cache_file}.")
376
425
  return json.load(f)
377
426
 
378
- def _get_cache_record(self, resource: Resource):
379
- """
380
- Get the cache record of a resource.
427
+ def _get_cache_record(self, resource: Resource) -> dict:
428
+ """Get the cache record of a resource.
381
429
 
382
430
  Args:
431
+ ----
383
432
  resource (Resource): The resource to get the cache record of.
384
433
 
385
434
  Returns:
386
- The cache record of the resource.
435
+ -------
436
+ dict: The cache record of the resource.
437
+
387
438
  """
388
439
  return self.cache_dict.get(resource.name, {})
389
440
 
390
- def _update_cache_record(self, resource: Resource):
391
- """
392
- Update the cache record of a resource.
441
+ def _update_cache_record(self, resource: Resource) -> None:
442
+ """Update the cache record of a resource.
393
443
 
394
444
  Args:
445
+ ----
395
446
  resource (Resource): The resource to update the cache record of.
447
+
396
448
  """
397
449
  cache_record = {}
398
450
  cache_record["url"] = to_list(resource.url_s)
@@ -401,3 +453,34 @@ class Downloader:
401
453
  self.cache_dict[resource.name] = cache_record
402
454
  with open(self.cache_file, "w") as f:
403
455
  json.dump(self.cache_dict, f, default=str)
456
+
457
+ def _trim_filename(self, url: str, max_length: int = 150) -> str:
458
+ """Create a trimmed filename from a URL.
459
+
460
+ If the URL exceeds max_length, create a hash of the filename.
461
+
462
+ Args:
463
+ ----
464
+ url (str): The URL to generate a filename from
465
+ max_length (int): Maximum filename length (default: 150)
466
+
467
+ Returns:
468
+ -------
469
+ str: A valid filename derived from the URL, trimmed if necessary
470
+
471
+ """
472
+ # Extract the filename from the URL
473
+ fname = url[url.rfind("/") + 1 :]
474
+
475
+ # Remove query parameters if present
476
+ if "?" in fname:
477
+ fname = fname.split("?")[0]
478
+
479
+ if len(fname) > max_length:
480
+ import hashlib
481
+
482
+ fname_trimmed = hashlib.md5(fname.encode()).hexdigest()
483
+ else:
484
+ fname_trimmed = fname
485
+
486
+ return fname_trimmed
biocypher/_metadata.py CHANGED
@@ -10,7 +10,7 @@ import pathlib
10
10
 
11
11
  import toml
12
12
 
13
- _VERSION = "0.9.1"
13
+ _VERSION = "0.9.3"
14
14
 
15
15
 
16
16
  def get_metadata():
biocypher/_misc.py CHANGED
@@ -1,22 +1,18 @@
1
- """
2
- Handy functions for use in various places.
3
- """
1
+ """Handy functions for use in various places."""
4
2
 
5
3
  import re
6
4
 
7
- from collections.abc import Iterable
8
- from typing import (
9
- Any,
5
+ from collections.abc import (
10
6
  Generator,
11
7
  ItemsView,
8
+ Iterable,
12
9
  KeysView,
13
10
  Mapping,
14
- Union,
15
11
  ValuesView,
16
12
  )
13
+ from typing import Any
17
14
 
18
15
  import networkx as nx
19
- import stringcase
20
16
 
21
17
  from treelib import Tree
22
18
 
@@ -48,10 +44,7 @@ LIST_LIKE = (
48
44
 
49
45
 
50
46
  def to_list(value: Any) -> list:
51
- """
52
- Ensures that ``value`` is a list.
53
- """
54
-
47
+ """Ensure that ``value`` is a list."""
55
48
  if isinstance(value, LIST_LIKE):
56
49
  value = list(value)
57
50
 
@@ -62,17 +55,12 @@ def to_list(value: Any) -> list:
62
55
 
63
56
 
64
57
  def ensure_iterable(value: Any) -> Iterable:
65
- """
66
- Returns iterables, except strings, wraps simple types into tuple.
67
- """
68
-
58
+ """Return iterables, except strings, wrap simple types into tuple."""
69
59
  return value if isinstance(value, LIST_LIKE) else (value,)
70
60
 
71
61
 
72
- def create_tree_visualisation(inheritance_graph: Union[dict, nx.Graph]) -> Tree:
73
- """
74
- Creates a visualisation of the inheritance tree using treelib.
75
- """
62
+ def create_tree_visualisation(inheritance_graph: dict | nx.Graph) -> Tree:
63
+ """Create a visualisation of the inheritance tree using treelib."""
76
64
  inheritance_tree = _get_inheritance_tree(inheritance_graph)
77
65
  classes, root = _find_root_node(inheritance_tree)
78
66
 
@@ -91,38 +79,42 @@ def create_tree_visualisation(inheritance_graph: Union[dict, nx.Graph]) -> Tree:
91
79
  return tree
92
80
 
93
81
 
94
- def _get_inheritance_tree(inheritance_graph: Union[dict, nx.Graph]) -> dict:
95
- """Transforms an inheritance_graph into an inheritance_tree.
82
+ def _get_inheritance_tree(inheritance_graph: dict | nx.Graph) -> dict | None:
83
+ """Transform an inheritance_graph into an inheritance_tree.
96
84
 
97
85
  Args:
86
+ ----
98
87
  inheritance_graph: A dict or nx.Graph representing the inheritance graph.
99
88
 
100
89
  Returns:
90
+ -------
101
91
  A dict representing the inheritance tree.
92
+
102
93
  """
103
94
  if isinstance(inheritance_graph, nx.Graph):
104
95
  inheritance_tree = nx.to_dict_of_lists(inheritance_graph)
105
96
 
106
97
  multiple_parents_present = _multiple_inheritance_present(inheritance_tree)
107
98
  if multiple_parents_present:
108
- logger.warning(
99
+ msg = (
109
100
  "The ontology contains multiple inheritance (one child node "
110
101
  "has multiple parent nodes). This is not visualized in the "
111
102
  "following hierarchy tree (the child node is only added once). "
112
103
  "If you wish to browse all relationships of the parsed "
113
104
  "ontologies, write a graphml file to disk using "
114
- "`to_disk = <directory>` and view this file."
105
+ "`to_disk = <directory>` and view this file.",
115
106
  )
116
-
107
+ logger.warning(msg)
117
108
  # unlist values
118
109
  inheritance_tree = {k: v[0] for k, v in inheritance_tree.items() if v}
119
110
  return inheritance_tree
120
111
  elif not _multiple_inheritance_present(inheritance_graph):
121
112
  return inheritance_graph
113
+ return None # Explicit return for the case when neither condition is met
122
114
 
123
115
 
124
116
  def _multiple_inheritance_present(inheritance_tree: dict) -> bool:
125
- """Checks if multiple inheritance is present in the inheritance_tree."""
117
+ """Check if multiple inheritance is present in the inheritance_tree."""
126
118
  return any(len(value) > 1 for value in inheritance_tree.values())
127
119
 
128
120
 
@@ -134,7 +126,9 @@ def _find_root_node(inheritance_tree: dict) -> tuple[set, str]:
134
126
  if "entity" in root:
135
127
  root = "entity" # TODO: default: good standard?
136
128
  else:
137
- raise ValueError("Inheritance tree cannot have more than one root node. " f"Found {len(root)}: {root}.")
129
+ msg = f"Inheritance tree cannot have more than one root node. Found {len(root)}: {root}."
130
+ logger.error(msg)
131
+ raise ValueError(msg)
138
132
  else:
139
133
  root = root[0]
140
134
  if not root:
@@ -158,53 +152,62 @@ def from_pascal(s: str, sep: str = " ") -> str:
158
152
 
159
153
 
160
154
  def pascalcase_to_sentencecase(s: str) -> str:
161
- """
162
- Convert PascalCase to sentence case.
155
+ """Convert PascalCase to sentence case.
163
156
 
164
157
  Args:
158
+ ----
165
159
  s: Input string in PascalCase
166
160
 
167
161
  Returns:
162
+ -------
168
163
  string in sentence case form
164
+
169
165
  """
170
166
  return from_pascal(s, sep=" ")
171
167
 
172
168
 
173
169
  def snakecase_to_sentencecase(s: str) -> str:
174
- """
175
- Convert snake_case to sentence case.
170
+ """Convert snake_case to sentence case.
176
171
 
177
172
  Args:
173
+ ----
178
174
  s: Input string in snake_case
179
175
 
180
176
  Returns:
177
+ -------
181
178
  string in sentence case form
179
+
182
180
  """
183
- return stringcase.sentencecase(s).lower()
181
+ return " ".join(word.lower() for word in s.split("_"))
184
182
 
185
183
 
186
184
  def sentencecase_to_snakecase(s: str) -> str:
187
- """
188
- Convert sentence case to snake_case.
185
+ """Convert sentence case to snake_case.
189
186
 
190
187
  Args:
188
+ ----
191
189
  s: Input string in sentence case
192
190
 
193
191
  Returns:
192
+ -------
194
193
  string in snake_case form
194
+
195
195
  """
196
- return stringcase.snakecase(s).lower()
196
+ return "_".join(s.lower().split())
197
197
 
198
198
 
199
199
  def sentencecase_to_pascalcase(s: str, sep: str = r"\s") -> str:
200
- """
201
- Convert sentence case to PascalCase.
200
+ """Convert sentence case to PascalCase.
202
201
 
203
202
  Args:
203
+ ----
204
204
  s: Input string in sentence case
205
+ sep: Separator for the words in the input string
205
206
 
206
207
  Returns:
208
+ -------
207
209
  string in PascalCase form
210
+
208
211
  """
209
212
  return re.sub(
210
213
  r"(?:^|[" + sep + "])([a-zA-Z])",
@@ -214,15 +217,18 @@ def sentencecase_to_pascalcase(s: str, sep: str = r"\s") -> str:
214
217
 
215
218
 
216
219
  def to_lower_sentence_case(s: str) -> str:
217
- """
218
- Convert any string to lower sentence case. Works with snake_case,
219
- PascalCase, and sentence case.
220
+ """Convert any string to lower sentence case.
221
+
222
+ Works with snake_case, PascalCase, and sentence case.
220
223
 
221
224
  Args:
225
+ ----
222
226
  s: Input string
223
227
 
224
228
  Returns:
229
+ -------
225
230
  string in lower sentence case form
231
+
226
232
  """
227
233
  if "_" in s:
228
234
  return snakecase_to_sentencecase(s)
@@ -234,15 +240,17 @@ def to_lower_sentence_case(s: str) -> str:
234
240
  return s
235
241
 
236
242
 
237
- def is_nested(lst) -> bool:
238
- """
239
- Check if a list is nested.
243
+ def is_nested(lst: list) -> bool:
244
+ """Check if a list is nested.
240
245
 
241
246
  Args:
247
+ ----
242
248
  lst (list): The list to check.
243
249
 
244
250
  Returns:
251
+ -------
245
252
  bool: True if the list is nested, False otherwise.
253
+
246
254
  """
247
255
  for item in lst:
248
256
  if isinstance(item, list):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: biocypher
3
- Version: 0.9.1
3
+ Version: 0.9.3
4
4
  Summary: A unifying framework for biomedical research knowledge graphs
5
5
  Home-page: https://github.com/biocypher/biocypher
6
6
  License: MIT
@@ -27,7 +27,6 @@ Requires-Dist: networkx (>=3.0,<4.0)
27
27
  Requires-Dist: pandas (>=2.0.1,<3.0.0)
28
28
  Requires-Dist: pooch (>=1.7.0,<2.0.0)
29
29
  Requires-Dist: rdflib (>=6.2.0,<7.0.0)
30
- Requires-Dist: stringcase (>=1.2.0,<2.0.0)
31
30
  Requires-Dist: tqdm (>=4.65.0,<5.0.0)
32
31
  Requires-Dist: treelib (==1.6.4)
33
32
  Project-URL: Bug Tracker, https://github.com/biocypher/biocypher/issues
@@ -8,11 +8,11 @@ biocypher/_config/test_schema_config_extended.yaml,sha256=wn3A76142hhjnImhMF6ROD
8
8
  biocypher/_core.py,sha256=S8frW62bc0V9M6gwU5l_D2ESa-7xwbOTffRuPqbIbT0,27382
9
9
  biocypher/_create.py,sha256=QsvXrwEQ8k0uNXvCG06UKejvw-QsJwzSaumrBjx9n1k,9884
10
10
  biocypher/_deduplicate.py,sha256=rtglcaLRaVzNjLtaPwTGP8VvCM4PHYQ5CZ-cm32CrKQ,4840
11
- biocypher/_get.py,sha256=2kjiBFu_onGUhgOpV7IMfGX67jyY1rpHg_GWncEUdrc,13278
11
+ biocypher/_get.py,sha256=BDNnvw6Xbz_Lq0ekU1Y60J4DI1zQKiZugJV6zGnWPsI,14873
12
12
  biocypher/_logger.py,sha256=y9dh3SPJOCWXnkFSYSK7aj_-pB7zlAkNCf43Dp1lt74,2941
13
13
  biocypher/_mapping.py,sha256=ntspG2C_NaQODhWTBFk0CDvolkOCjtqlQ9E-NkJAuTg,9030
14
- biocypher/_metadata.py,sha256=Eop3cijNQBsHWeOO8zbBmN_2ICJutZziDPvWppV474M,1415
15
- biocypher/_misc.py,sha256=N8aDg8j3EEcKf9ZRqzSNruUEUK4RixCy1vQ1V4maGxk,6079
14
+ biocypher/_metadata.py,sha256=Z8vZYEjGXUNfcRhLpWBawExiESXuhhhIH7pxsbZYejU,1415
15
+ biocypher/_misc.py,sha256=YzlY7zwa0mim9QFg9HwXErkJFIH3cvLrbgjF8tKOIT8,6353
16
16
  biocypher/_ontology.py,sha256=lipZxU3aj6zrTbBrJZmCW6IRCuz-KQG3AfbYCVq6aFE,33133
17
17
  biocypher/_translate.py,sha256=9E19eLRL0VnxxDuiNhZ5vu54XyKXnfLuBhCgNcL9yAE,17000
18
18
  biocypher/output/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -38,7 +38,7 @@ biocypher/output/write/relational/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeR
38
38
  biocypher/output/write/relational/_csv.py,sha256=m0BSQXts88Qu5AEvoIgnwRz54ia38g4VN3PaA3LCYM8,2807
39
39
  biocypher/output/write/relational/_postgresql.py,sha256=RckQJBiuwvDmHAyXxS8zCavYqDecHHWW_piofurokfQ,11965
40
40
  biocypher/output/write/relational/_sqlite.py,sha256=BuGWOeeNA83lbUvjpkzqcR9_baWLsbfmLXBKe4O1EPE,2105
41
- biocypher-0.9.1.dist-info/LICENSE,sha256=oejgxuxyjSnyPw3YPloz6-dCBB_nYizJ4jDQnr-xZUU,1082
42
- biocypher-0.9.1.dist-info/METADATA,sha256=Qjy-T-iNXAn0wye7-NbLivn7ZPz80ZrOu8utum7_4vk,10643
43
- biocypher-0.9.1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
44
- biocypher-0.9.1.dist-info/RECORD,,
41
+ biocypher-0.9.3.dist-info/LICENSE,sha256=oejgxuxyjSnyPw3YPloz6-dCBB_nYizJ4jDQnr-xZUU,1082
42
+ biocypher-0.9.3.dist-info/METADATA,sha256=5V08qJIN2n5vsedgGbk-ChO0OjrNxR5hJRvM4YEPSfo,10600
43
+ biocypher-0.9.3.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
44
+ biocypher-0.9.3.dist-info/RECORD,,