siibra 1.0a1__1-py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (84) hide show
  1. siibra/VERSION +1 -0
  2. siibra/__init__.py +164 -0
  3. siibra/commons.py +823 -0
  4. siibra/configuration/__init__.py +17 -0
  5. siibra/configuration/configuration.py +189 -0
  6. siibra/configuration/factory.py +589 -0
  7. siibra/core/__init__.py +16 -0
  8. siibra/core/assignment.py +110 -0
  9. siibra/core/atlas.py +239 -0
  10. siibra/core/concept.py +308 -0
  11. siibra/core/parcellation.py +387 -0
  12. siibra/core/region.py +1223 -0
  13. siibra/core/space.py +131 -0
  14. siibra/core/structure.py +111 -0
  15. siibra/exceptions.py +63 -0
  16. siibra/experimental/__init__.py +19 -0
  17. siibra/experimental/contour.py +61 -0
  18. siibra/experimental/cortical_profile_sampler.py +57 -0
  19. siibra/experimental/patch.py +98 -0
  20. siibra/experimental/plane3d.py +256 -0
  21. siibra/explorer/__init__.py +17 -0
  22. siibra/explorer/url.py +222 -0
  23. siibra/explorer/util.py +87 -0
  24. siibra/features/__init__.py +117 -0
  25. siibra/features/anchor.py +224 -0
  26. siibra/features/connectivity/__init__.py +33 -0
  27. siibra/features/connectivity/functional_connectivity.py +57 -0
  28. siibra/features/connectivity/regional_connectivity.py +494 -0
  29. siibra/features/connectivity/streamline_counts.py +27 -0
  30. siibra/features/connectivity/streamline_lengths.py +27 -0
  31. siibra/features/connectivity/tracing_connectivity.py +30 -0
  32. siibra/features/dataset/__init__.py +17 -0
  33. siibra/features/dataset/ebrains.py +90 -0
  34. siibra/features/feature.py +970 -0
  35. siibra/features/image/__init__.py +27 -0
  36. siibra/features/image/image.py +115 -0
  37. siibra/features/image/sections.py +26 -0
  38. siibra/features/image/volume_of_interest.py +88 -0
  39. siibra/features/tabular/__init__.py +24 -0
  40. siibra/features/tabular/bigbrain_intensity_profile.py +77 -0
  41. siibra/features/tabular/cell_density_profile.py +298 -0
  42. siibra/features/tabular/cortical_profile.py +322 -0
  43. siibra/features/tabular/gene_expression.py +257 -0
  44. siibra/features/tabular/layerwise_bigbrain_intensities.py +62 -0
  45. siibra/features/tabular/layerwise_cell_density.py +95 -0
  46. siibra/features/tabular/receptor_density_fingerprint.py +192 -0
  47. siibra/features/tabular/receptor_density_profile.py +110 -0
  48. siibra/features/tabular/regional_timeseries_activity.py +294 -0
  49. siibra/features/tabular/tabular.py +139 -0
  50. siibra/livequeries/__init__.py +19 -0
  51. siibra/livequeries/allen.py +352 -0
  52. siibra/livequeries/bigbrain.py +197 -0
  53. siibra/livequeries/ebrains.py +145 -0
  54. siibra/livequeries/query.py +49 -0
  55. siibra/locations/__init__.py +91 -0
  56. siibra/locations/boundingbox.py +454 -0
  57. siibra/locations/location.py +115 -0
  58. siibra/locations/point.py +344 -0
  59. siibra/locations/pointcloud.py +349 -0
  60. siibra/retrieval/__init__.py +27 -0
  61. siibra/retrieval/cache.py +233 -0
  62. siibra/retrieval/datasets.py +389 -0
  63. siibra/retrieval/exceptions/__init__.py +27 -0
  64. siibra/retrieval/repositories.py +769 -0
  65. siibra/retrieval/requests.py +659 -0
  66. siibra/vocabularies/__init__.py +45 -0
  67. siibra/vocabularies/gene_names.json +29176 -0
  68. siibra/vocabularies/receptor_symbols.json +210 -0
  69. siibra/vocabularies/region_aliases.json +460 -0
  70. siibra/volumes/__init__.py +23 -0
  71. siibra/volumes/parcellationmap.py +1279 -0
  72. siibra/volumes/providers/__init__.py +20 -0
  73. siibra/volumes/providers/freesurfer.py +113 -0
  74. siibra/volumes/providers/gifti.py +165 -0
  75. siibra/volumes/providers/neuroglancer.py +736 -0
  76. siibra/volumes/providers/nifti.py +266 -0
  77. siibra/volumes/providers/provider.py +107 -0
  78. siibra/volumes/sparsemap.py +468 -0
  79. siibra/volumes/volume.py +892 -0
  80. siibra-1.0.0a1.dist-info/LICENSE +201 -0
  81. siibra-1.0.0a1.dist-info/METADATA +160 -0
  82. siibra-1.0.0a1.dist-info/RECORD +84 -0
  83. siibra-1.0.0a1.dist-info/WHEEL +5 -0
  84. siibra-1.0.0a1.dist-info/top_level.txt +1 -0
siibra/commons.py ADDED
@@ -0,0 +1,823 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Constants, functions, and classes used commonly across siibra."""
16
+
17
+ import os
18
+ import re
19
+ from enum import Enum
20
+ from nibabel import Nifti1Image
21
+ from nilearn.image import resample_to_img
22
+ import logging
23
+ from tqdm import tqdm
24
+ import numpy as np
25
+ import pandas as pd
26
+ from typing import Generic, Iterable, Iterator, List, TypeVar, Union, Dict, Generator, Tuple
27
+ from skimage.filters import gaussian
28
+ from dataclasses import dataclass
29
+ from hashlib import md5
30
+ from uuid import UUID
31
+ import math
32
+ try:
33
+ from typing import TypedDict
34
+ except ImportError:
35
+ # support python 3.7
36
+ from typing_extensions import TypedDict
37
+
38
+ logging.addLevelName(21, "INFO_WO_PROGRESS_BARS")
39
+ logger = logging.getLogger(__name__.split(os.path.extsep)[0])
40
+ ch = logging.StreamHandler()
41
+ formatter = logging.Formatter("[{name}:{levelname}] {message}", style="{")
42
+ ch.setFormatter(formatter)
43
+ logger.addHandler(ch)
44
+
45
+
46
+ ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
47
+ HBP_AUTH_TOKEN = os.getenv("HBP_AUTH_TOKEN")
48
+ KEYCLOAK_CLIENT_ID = os.getenv("KEYCLOAK_CLIENT_ID")
49
+ KEYCLOAK_CLIENT_SECRET = os.getenv("KEYCLOAK_CLIENT_SECRET")
50
+ SIIBRA_CACHEDIR = os.getenv("SIIBRA_CACHEDIR")
51
+ SIIBRA_LOG_LEVEL = os.getenv("SIIBRA_LOG_LEVEL", "INFO")
52
+ SIIBRA_USE_CONFIGURATION = os.getenv("SIIBRA_USE_CONFIGURATION")
53
+ SIIBRA_USE_LOCAL_SNAPSPOT = os.getenv("SIIBRA_USE_LOCAL_SNAPSPOT")
54
+ SKIP_CACHEINIT_MAINTENANCE = os.getenv("SKIP_CACHEINIT_MAINTENANCE")
55
+ SIIBRA_MAX_FETCH_SIZE_GIB = float(os.getenv("SIIBRA_MAX_FETCH_SIZE_GIB", 0.2))
56
+
57
+ with open(os.path.join(ROOT_DIR, "VERSION"), "r") as fp:
58
+ __version__ = fp.read().strip()
59
+
60
+
61
+ @dataclass
62
+ class CompareMapsResult:
63
+ intersection_over_union: float
64
+ intersection_over_first: float
65
+ intersection_over_second: float
66
+ correlation: float
67
+ weighted_mean_of_first: float
68
+ weighted_mean_of_second: float
69
+
70
+
71
+ class TypePublication(TypedDict):
72
+ citation: str
73
+ url: str
74
+
75
+
76
+ T = TypeVar("T")
77
+
78
+
79
+ class InstanceTable(Generic[T], Iterable):
80
+ """
81
+ Lookup table for instances of a given class by name/id.
82
+ Provide attribute-access and iteration to a set of named elements,
83
+ given by a dictionary with keys of 'str' type.
84
+ """
85
+
86
+ def __init__(self, matchfunc=lambda a, b: a == b, elements=None):
87
+ """
88
+ Build an object lookup table from a dictionary with string keys, for easy
89
+ attribute-like access, name autocompletion, and iteration.
90
+ Matchfunc can be provided to enable inexact matching inside the index operator.
91
+ It is a binary function, taking as first argument a value of the dictionary
92
+ (ie. an object that you put into this glossary), and as second argument
93
+ the index/specification that should match one of the objects, and returning a boolean.
94
+ """
95
+
96
+ assert hasattr(matchfunc, "__call__")
97
+ if elements is None:
98
+ self._elements: Dict[str, T] = {}
99
+ else:
100
+ assert isinstance(elements, dict)
101
+ assert all(isinstance(k, str) for k in elements.keys())
102
+ self._elements: Dict[str, T] = elements
103
+ self._matchfunc = matchfunc
104
+ self._dataframe_cached = None
105
+
106
+ def add(self, key: str, value: T) -> None:
107
+ """
108
+ Add a key/value pair to the registry.
109
+
110
+ Parameters
111
+ ----------
112
+ key (string): Unique name or key of the object
113
+ value (object): The registered object
114
+ """
115
+ if key in self._elements:
116
+ logger.error(
117
+ f"Key {key} already in {__class__.__name__}, existing value will be replaced."
118
+ )
119
+ self._elements[key] = value
120
+
121
+ def __dir__(self) -> Iterable[str]:
122
+ """List of all object keys in the registry"""
123
+ if isinstance(self[0], type):
124
+ return list(self._elements.keys())
125
+ else:
126
+ return ["dataframe"] + list(self._elements.keys())
127
+
128
+ def __str__(self) -> str:
129
+ if len(self) > 0:
130
+ return f"{self.__class__.__name__}:\n - " + "\n - ".join(self._elements.keys())
131
+ else:
132
+ return f"Empty {self.__class__.__name__}"
133
+
134
+ def __repr__(self):
135
+ return f"<{self.__class__.__name__} of {self[0].__class__}>"
136
+
137
+ def __iter__(self) -> Iterator[T]:
138
+ """Iterate over all objects in the registry"""
139
+ return (w for w in self._elements.values())
140
+
141
+ def __contains__(self, key: Union[str, T]) -> bool:
142
+ """Test wether the given key or element is defined by the registry."""
143
+ if isinstance(key, str):
144
+ return key in self._elements
145
+ return key in [item for _, item in self._elements.values()]
146
+
147
+ def __len__(self) -> int:
148
+ """Return the number of elements in the registry"""
149
+ return len(self._elements)
150
+
151
+ def __getitem__(self, spec) -> T:
152
+ return self.get(spec)
153
+
154
+ def get(self, spec) -> T:
155
+ """Give access to objects in the registry by sequential index,
156
+ exact key, or keyword matching. If the keywords match multiple objects,
157
+ the first in sorted order is returned. If the specification does not match,
158
+ a RuntimeError is raised.
159
+
160
+ Parameters
161
+ ----------
162
+ spec: int, str
163
+ Index or string specification of an object
164
+
165
+ Returns
166
+ -------
167
+ Matched object
168
+ """
169
+ if spec is None:
170
+ return None
171
+ elif spec == "":
172
+ raise IndexError(f"{__class__.__name__} indexed with empty string")
173
+ matches = self.find(spec)
174
+ if len(matches) == 0:
175
+ raise IndexError(
176
+ f"{__class__.__name__} has no entry matching the specification '{spec}'."
177
+ f"Possible values are:\n" + str(self)
178
+ )
179
+ elif len(matches) == 1:
180
+ return matches[0]
181
+ else:
182
+ try:
183
+ S = sorted(matches, reverse=True)
184
+ except TypeError:
185
+ # not all object types support sorting, accept this
186
+ S = matches
187
+ largest = S[0]
188
+ logger.info(
189
+ f"Multiple elements matched the specification '{spec}' - the first in order was chosen: {largest}"
190
+ )
191
+ return largest
192
+
193
+ def __sub__(self, obj) -> "InstanceTable[T]":
194
+ """
195
+ remove an object from the registry
196
+ """
197
+ if obj in self._elements.values():
198
+ return InstanceTable[T](
199
+ self._matchfunc, {k: v for k, v in self._elements.items() if v != obj}
200
+ )
201
+ else:
202
+ return self
203
+
204
+ def provides(self, spec) -> bool:
205
+ """
206
+ Returns True if an element that matches the given specification can be found
207
+ (using find(), thus going beyond the matching of names only as __contains__ does)
208
+ """
209
+ matches = self.find(spec)
210
+ return len(matches) > 0
211
+
212
+ def find(self, spec) -> List[T]:
213
+ """
214
+ Return a list of items matching the given specification,
215
+ which could be either the name or a specification that
216
+ works with the matchfunc of the Glossary.
217
+ """
218
+ if isinstance(spec, str) and (spec in self._elements):
219
+ return [self._elements[spec]]
220
+ elif isinstance(spec, int) and (spec < len(self._elements)):
221
+ return [list(self._elements.values())[spec]]
222
+ else:
223
+ # string matching on values
224
+ matches = [v for v in self._elements.values() if self._matchfunc(v, spec)]
225
+ if len(matches) == 0:
226
+ # string matching on keys
227
+ matches = [
228
+ self._elements[k]
229
+ for k in self._elements.keys()
230
+ if all(w.lower() in k.lower() for w in spec.split())
231
+ ]
232
+ return matches
233
+
234
+ def values(self):
235
+ return self._elements.values()
236
+
237
+ def __getattr__(self, index) -> T:
238
+ """Access elements by using their keys as attributes.
239
+ Keys are auto-generated from the provided names to be uppercase,
240
+ with words delimited using underscores.
241
+ """
242
+ if index in ["keys", "names"]:
243
+ return list(self._elements.keys())
244
+ elif index in self._elements:
245
+ return self._elements[index]
246
+ else:
247
+ hint = ""
248
+ if isinstance(index, str):
249
+ import difflib
250
+
251
+ closest = difflib.get_close_matches(
252
+ index, list(self._elements.keys()), n=3
253
+ )
254
+ if len(closest) > 0:
255
+ hint = f"Did you mean {' or '.join(closest)}?"
256
+ raise AttributeError(f"Term '{index}' not in {__class__.__name__}. " + hint)
257
+
258
+ @property
259
+ def dataframe(self):
260
+ if self._dataframe_cached is None:
261
+ values = self._elements.values()
262
+ attrs = []
263
+ for i, val in enumerate(values):
264
+ attrs.append({'name': val.name, 'species': str(val.species)})
265
+ if hasattr(val, 'maptype'):
266
+ attrs[i].update(
267
+ {
268
+ attribute: val.__getattribute__(attribute).name
269
+ for attribute in ['parcellation', 'space', 'maptype']
270
+ }
271
+ )
272
+ self._dataframe_cached = pd.DataFrame(index=list(self._elements.keys()), data=attrs)
273
+ return self._dataframe_cached
274
+
275
+
276
+ class LoggingContext:
277
+ def __init__(self, level):
278
+ self.level = level
279
+
280
+ def __enter__(self):
281
+ self.old_level = logger.level
282
+ logger.setLevel(self.level)
283
+
284
+ def __exit__(self, et, ev, tb):
285
+ logger.setLevel(self.old_level)
286
+
287
+
288
+ def set_log_level(level):
289
+ logger.setLevel(level)
290
+
291
+
292
+ set_log_level(SIIBRA_LOG_LEVEL)
293
+ QUIET = LoggingContext("ERROR")
294
+ VERBOSE = LoggingContext("DEBUG")
295
+
296
+
297
+ def siibra_tqdm(iterable: Iterable[T] = None, *args, **kwargs):
298
+ return tqdm(
299
+ iterable,
300
+ *args,
301
+ disable=kwargs.pop("disable", False) or (logger.level > logging.INFO),
302
+ **kwargs
303
+ )
304
+
305
+
306
+ def create_key(name: str):
307
+ """
308
+ Creates an uppercase identifier string that includes only alphanumeric
309
+ characters and underscore from a natural language name.
310
+ """
311
+ return re.sub(
312
+ r" +",
313
+ "_",
314
+ "".join([e if e.isalnum() else " " for e in name]).upper().strip(),
315
+ )
316
+
317
+
318
+ class MapIndex:
319
+ """
320
+ Identifies a unique region in a ParcellationMap, combining its labelindex (the "color") and mapindex (the number of the 3Dd map, in case multiple are provided).
321
+ """
322
+
323
+ def __init__(self, volume: int = None, label: int = None, fragment: str = None):
324
+ if volume is None and label is None:
325
+ raise ValueError(
326
+ "At least volume or label need to be specified to build a valid map index."
327
+ )
328
+ if volume is not None:
329
+ assert isinstance(volume, int)
330
+ if label is not None:
331
+ assert isinstance(label, int)
332
+ self.volume = volume
333
+ self.label = label
334
+ self.fragment = fragment
335
+
336
+ @classmethod
337
+ def from_dict(cls, spec: dict):
338
+ assert all(k in spec for k in ['volume', 'label'])
339
+ return cls(
340
+ volume=spec['volume'],
341
+ label=spec['label'],
342
+ fragment=spec.get('fragment')
343
+ )
344
+
345
+ def __str__(self):
346
+ return f"(volume:{self.volume}, label:{self.label}, fragment:{self.fragment})"
347
+
348
+ def __repr__(self):
349
+ frag = f"'{self.fragment}'" if self.fragment else self.fragment
350
+ return f"<{self.__class__.__name__}(volume={self.volume}, label={self.label}, fragment={frag})>"
351
+
352
+ def __eq__(self, other):
353
+ assert isinstance(other, self.__class__), f'Cannot compare {self.__class__} and {other.__class__}'
354
+ return all([
355
+ self.volume == other.volume,
356
+ self.label == other.label,
357
+ self.fragment == other.fragment
358
+ ])
359
+
360
+ def __hash__(self):
361
+ return hash((self.volume, self.label, self.fragment))
362
+
363
+
364
+ class MapType(Enum):
365
+ LABELLED = 1
366
+ STATISTICAL = 2
367
+
368
+
369
+ REMOVE_FROM_NAME = [
370
+ "hemisphere",
371
+ " -",
372
+ "-brain",
373
+ "both",
374
+ "Both",
375
+ ]
376
+
377
+ REPLACE_IN_NAME = {
378
+ "ctx-lh-": "left ",
379
+ "ctx-rh-": "right ",
380
+ }
381
+
382
+
383
+ def clear_name(name):
384
+ """ clean up a region name to the for matching"""
385
+ result = name
386
+ for word in REMOVE_FROM_NAME:
387
+ result = result.replace(word, "")
388
+ for search, repl in REPLACE_IN_NAME.items():
389
+ result = result.replace(search, repl)
390
+ return " ".join(w for w in result.split(" ") if len(w))
391
+
392
+
393
+ def snake2camel(s: str):
394
+ """Converts a string in snake_case into CamelCase.
395
+ For example: JULICH_BRAIN -> JulichBrain"""
396
+ return "".join([w[0].upper() + w[1:].lower() for w in s.split("_")])
397
+
398
+
399
+ # getting nonzero pixels of pmaps is one of the most time consuming tasks when computing metrics,
400
+ # so we cache the nonzero coordinates of array objects at runtime.
401
+ NZCACHE = {}
402
+
403
+
404
+ def nonzero_coordinates(arr):
405
+ # TODO: fix caching
406
+ return np.c_[np.nonzero(arr > 0)]
407
+
408
+
409
+ def affine_scaling(affine):
410
+ """Estimate approximate isotropic scaling factor of an affine matrix. """
411
+ orig = np.dot(affine, [0, 0, 0, 1])
412
+ unit_lengths = []
413
+ for vec in np.identity(3):
414
+ vec_phys = np.dot(affine, np.r_[vec, 1])
415
+ unit_lengths.append(np.linalg.norm(orig - vec_phys))
416
+ return np.prod(unit_lengths)
417
+
418
+
419
+ def compare_arrays(arr1: np.ndarray, affine1: np.ndarray, arr2: np.ndarray, affine2: np.ndarray):
420
+ """
421
+ Compare two arrays in physical space as defined by the given affine matrices.
422
+ Matrices map voxel coordinates to physical coordinates.
423
+ This function uses the object id to cache extraction of the nonzero coordinates.
424
+ Repeated calls involving the same map will therefore be much faster as they
425
+ will only access the image array if overlapping pixels are detected.
426
+
427
+ It is recommended to install the indexed-gzip package,
428
+ which will further speed this up.
429
+ """
430
+ a1, a2 = arr1.squeeze(), arr2.squeeze()
431
+
432
+ def homog(XYZ):
433
+ return np.c_[XYZ, np.ones(XYZ.shape[0])]
434
+
435
+ def colsplit(XYZ):
436
+ return np.split(XYZ, 3, axis=1)
437
+
438
+ # Compute the nonzero voxels in map2 and their correspondences in map1
439
+ XYZnz2 = nonzero_coordinates(a2)
440
+ N2 = XYZnz2.shape[0]
441
+ warp2on1 = np.dot(np.linalg.inv(affine1), affine2)
442
+ XYZnz2on1 = (np.dot(warp2on1, homog(XYZnz2).T).T[:, :3] + 0.5).astype("int")
443
+
444
+ # valid voxel pairs
445
+ valid = np.all(
446
+ np.logical_and.reduce(
447
+ [
448
+ XYZnz2on1 >= 0,
449
+ XYZnz2on1 < arr1.shape[:3],
450
+ XYZnz2 >= 0,
451
+ XYZnz2 < arr2.shape[:3],
452
+ ]
453
+ ),
454
+ 1,
455
+ )
456
+ X1, Y1, Z1 = colsplit(XYZnz2on1[valid, :])
457
+ X2, Y2, Z2 = colsplit(XYZnz2[valid, :])
458
+
459
+ # intersection
460
+ v1, v2 = a1[X1, Y1, Z1].squeeze(), a2[X2, Y2, Z2].squeeze()
461
+ m1, m2 = ((_ > 0).astype("uint8") for _ in [v1, v2])
462
+ intersection = np.minimum(m1, m2).sum()
463
+ if intersection == 0:
464
+ return CompareMapsResult(
465
+ intersection_over_union=0,
466
+ intersection_over_first=0,
467
+ intersection_over_second=0,
468
+ correlation=0,
469
+ weighted_mean_of_first=0,
470
+ weighted_mean_of_second=0,
471
+ )
472
+
473
+ # Compute the nonzero voxels in map1 with their correspondences in map2
474
+ XYZnz1 = nonzero_coordinates(a1)
475
+ N1 = XYZnz1.shape[0]
476
+ warp1on2 = np.dot(np.linalg.inv(affine2), affine1)
477
+
478
+ # Voxels referring to the union of the nonzero pixels in both maps
479
+ XYZa1 = np.unique(np.concatenate((XYZnz1, XYZnz2on1)), axis=0)
480
+ XYZa2 = (np.dot(warp1on2, homog(XYZa1).T).T[:, :3] + 0.5).astype("int")
481
+ valid = np.all(
482
+ np.logical_and.reduce(
483
+ [XYZa1 >= 0, XYZa1 < arr1.shape[:3], XYZa2 >= 0, XYZa2 < arr2.shape[:3]]
484
+ ),
485
+ 1,
486
+ )
487
+ Xa1, Ya1, Za1 = colsplit(XYZa1[valid, :])
488
+ Xa2, Ya2, Za2 = colsplit(XYZa2[valid, :])
489
+
490
+ # pearson's r wrt to full size image
491
+ x = a1[Xa1, Ya1, Za1].squeeze()
492
+ y = a2[Xa2, Ya2, Za2].squeeze()
493
+ mu_x = x.sum() / a1.size
494
+ mu_y = y.sum() / a2.size
495
+ x0 = x - mu_x
496
+ y0 = y - mu_y
497
+ dem = np.sqrt(np.sum(x0 ** 2) * np.sum(y0 ** 2))
498
+ if dem == 0:
499
+ r = 0
500
+ else:
501
+ r = np.sum(np.multiply(x0, y0)) / dem
502
+
503
+ bx = (x > 0).astype("uint8")
504
+ by = (y > 0).astype("uint8")
505
+ return CompareMapsResult(
506
+ intersection_over_union=intersection / np.maximum(bx, by).sum(),
507
+ intersection_over_first=intersection / N1,
508
+ intersection_over_second=intersection / N2,
509
+ correlation=r,
510
+ weighted_mean_of_first=np.sum(x * y) / np.sum(y),
511
+ weighted_mean_of_second=np.sum(x * y) / np.sum(x),
512
+ )
513
+
514
+
515
+ def resample_img_to_img(
516
+ source_img: Nifti1Image,
517
+ target_img: Nifti1Image,
518
+ interpolation: str = ""
519
+ ) -> Nifti1Image:
520
+ """
521
+ Resamples to source image to match the target image according to target's
522
+ affine. (A wrapper of `nilearn.image.resample_to_img`.)
523
+
524
+ Parameters
525
+ ----------
526
+ source_img : Nifti1Image
527
+ target_img : Nifti1Image
528
+ interpolation : str, Default: "nearest" if the source image is a mask otherwise "linear".
529
+ Can be 'continuous', 'linear', or 'nearest'. Indicates the resample method.
530
+
531
+ Returns
532
+ -------
533
+ Nifti1Image
534
+ """
535
+ interpolation = "nearest" if np.array_equal(np.unique(source_img.dataobj), [0, 1]) else "linear"
536
+ resampled_img = resample_to_img(
537
+ source_img=source_img,
538
+ target_img=target_img,
539
+ interpolation=interpolation
540
+ )
541
+ return resampled_img
542
+
543
+
544
+ def connected_components(
545
+ imgdata: np.ndarray,
546
+ background: int = 0,
547
+ connectivity: int = 2,
548
+ threshold: float = 0.0,
549
+ ) -> Generator[Tuple[int, np.ndarray], None, None]:
550
+ """
551
+ Provide an iterator over connected components in the array. If the image
552
+ data is float (such as probability maps), it will convert to a mask and
553
+ then find the connected components.
554
+
555
+ Note
556
+ ----
557
+ `Uses skimage.measure.label()` to determine foreground compenents.
558
+
559
+ Parameters
560
+ ----------
561
+ imgdata : np.ndarray
562
+ background_value : int, Default: 0
563
+ connectivity : int, Default: 2
564
+ threshold: float, Default: 0.0
565
+ The threshold used to create mask from probability maps, i.e, anything
566
+ below set to 0 and rest to 1.
567
+
568
+ Yields
569
+ ------
570
+ Generator[Tuple[int, np.ndarray], None, None]
571
+ tuple of integer label of the component and component as an nd.array in
572
+ the shape of the original image.
573
+ """
574
+ from skimage import measure
575
+
576
+ mask = (imgdata > threshold).astype('uint8')
577
+ components = measure.label(mask, connectivity=connectivity, background=background)
578
+ component_labels = np.unique(components)
579
+ return (
580
+ (label, (components == label).astype('uint8'))
581
+ for label in component_labels
582
+ if label > 0
583
+ )
584
+
585
+
586
+ def unify_stringlist(L: list):
587
+ """Adds asterisks to strings that appear multiple times, so the resulting
588
+ list has only unique strings but still the same length, order, and meaning.
589
+ For example:
590
+ unify_stringlist(['a','a','b','a','c']) -> ['a','a*','b','a**','c']
591
+ """
592
+ assert all([isinstance(_, str) for _ in L])
593
+ return [L[i] + "*" * L[:i].count(L[i]) for i in range(len(L))]
594
+
595
+
596
+ def create_gaussian_kernel(sigma=1, sigma_point=3):
597
+ """
598
+ Compute a 3D Gaussian kernel of the given bandwidth.
599
+ """
600
+ r = int(sigma_point * sigma)
601
+ k_size = 2 * r + 1
602
+ impulse = np.zeros((k_size, k_size, k_size))
603
+ impulse[r, r, r] = 1
604
+ kernel = gaussian(impulse, sigma)
605
+ kernel /= kernel.sum()
606
+ return kernel
607
+
608
+
609
+ def argmax_dim4(img, dim=-1):
610
+ """
611
+ Given a nifti image object with four dimensions, returns a modified object
612
+ with 3 dimensions that is obtained by taking the argmax along one of the
613
+ four dimensions (default: the last one). To distinguish the pure background
614
+ voxels from the foreground voxels of channel 0, the argmax indices are
615
+ incremented by 1 and label index 0 is kept to represent the background.
616
+ """
617
+ assert len(img.shape) == 4
618
+ assert dim >= -1 and dim < 4
619
+ newarr = np.asarray(img.dataobj).argmax(dim) + 1
620
+ # reset the true background voxels to zero
621
+ newarr[np.asarray(img.dataobj).max(dim) == 0] = 0
622
+ return Nifti1Image(dataobj=newarr, header=img.header, affine=img.affine)
623
+
624
+
625
+ def MI(arr1, arr2, nbins=100, normalized=True):
626
+ """
627
+ Compute the mutual information between two 3D arrays, which need to have the same shape.
628
+
629
+ Parameters
630
+ ----------
631
+ arr1: np.ndarray
632
+ First 3D array
633
+ arr2: np.ndarray
634
+ Second 3D array
635
+ nbins: int
636
+ number of bins to use for computing the joint histogram (applies to intensity range)
637
+ normalized: Boolean. Default: True
638
+ if True, the normalized MI of arrays X and Y will be returned,
639
+ leading to a range of values between 0 and 1. Normalization is
640
+ achieved by NMI = 2*MI(X,Y) / (H(X) + H(Y)), where H(x) is the entropy of X
641
+ """
642
+
643
+ assert all(len(arr.shape) == 3 for arr in [arr1, arr2])
644
+ assert (all(arr.size > 0) for arr in [arr1, arr2])
645
+
646
+ # compute the normalized joint 2D histogram as an
647
+ # empirical measure of the joint probabily of arr1 and arr2
648
+ pxy, _, _ = np.histogram2d(arr1.ravel(), arr2.ravel(), bins=nbins)
649
+ pxy /= pxy.sum()
650
+
651
+ # extract the empirical propabilities of intensities
652
+ # from the joint histogram
653
+ px = np.sum(pxy, axis=1) # marginal for x over y
654
+ py = np.sum(pxy, axis=0) # marginal for y over x
655
+
656
+ # compute the mutual information
657
+ px_py = px[:, None] * py[None, :]
658
+ nzs = pxy > 0 # nonzero value indices
659
+ MI = np.sum(pxy[nzs] * np.log(pxy[nzs] / px_py[nzs]))
660
+ if not normalized:
661
+ return MI
662
+
663
+ # normalize, using the sum of their individual entropies H
664
+ def entropy(p):
665
+ nz = p > 0
666
+ assert np.count_nonzero(nz) > 0
667
+ return -np.sum(p[nz] * np.log(p[nz]))
668
+
669
+ Hx, Hy = [entropy(p) for p in [px, py]]
670
+ assert (Hx + Hy) > 0
671
+ NMI = 2 * MI / (Hx + Hy)
672
+ return NMI
673
+
674
+
675
+ def is_mesh(structure: Union[list, dict]):
676
+ if isinstance(structure, dict):
677
+ return all(k in structure for k in ["verts", "faces"])
678
+ elif isinstance(structure, list):
679
+ return all(map(is_mesh, structure))
680
+ else:
681
+ return False
682
+
683
+
684
+ def merge_meshes(meshes: list, labels: list = None):
685
+ # merge a list of meshes into one
686
+ # if meshes have no labels, a list of labels of the
687
+ # same length as the number of meshes can
688
+ # be supplied to add a labeling per sub mesh.
689
+
690
+ assert len(meshes) > 0
691
+ if len(meshes) == 1:
692
+ return meshes[0]
693
+
694
+ assert all('verts' in m for m in meshes)
695
+ assert all('faces' in m for m in meshes)
696
+ has_labels = all('labels' in m for m in meshes)
697
+ if has_labels:
698
+ assert labels is None
699
+
700
+ nverts = [0] + [m['verts'].shape[0] for m in meshes[:-1]]
701
+ verts = np.concatenate([m['verts'] for m in meshes])
702
+ faces = np.concatenate([m['faces'] + N for m, N in zip(meshes, nverts)])
703
+ if has_labels:
704
+ labels = np.array([_ for m in meshes for _ in m['labels']])
705
+ return {'verts': verts, 'faces': faces, 'labels': labels}
706
+ elif labels is not None:
707
+ assert len(labels) == len(meshes)
708
+ labels = np.array(
709
+ [labels[i] for i, m in enumerate(meshes) for v in m['verts']]
710
+ )
711
+ return {'verts': verts, 'faces': faces, 'labels': labels}
712
+ else:
713
+ return {'verts': verts, 'faces': faces}
714
+
715
+
716
+ class Species(Enum):
717
+
718
+ HOMO_SAPIENS = 1
719
+ RATTUS_NORVEGICUS = 2
720
+ MUS_MUSCULUS = 3
721
+ MACACA_FASCICULARIS = 4
722
+ MACACA_MULATTA = 5
723
+ MACACA_FUSCATA = 6
724
+ CHLOROCEBUS_AETHIOPS_SABAEUS = 7
725
+
726
+ UNSPECIFIED_SPECIES = 999
727
+
728
+ @classmethod
729
+ def decode(cls, spec: Union[str, 'Species', dict], fail_if_not_successful=True):
730
+
731
+ MINDS_IDS = {
732
+ "0ea4e6ba-2681-4f7d-9fa9-49b915caaac9": 1,
733
+ "f3490d7f-8f7f-4b40-b238-963dcac84412": 2,
734
+ "cfc1656c-67d1-4d2c-a17e-efd7ce0df88c": 3,
735
+ "c541401b-69f4-4809-b6eb-82594fc90551": 4,
736
+ "745712aa-fad1-47c4-8ab6-088063f78f64": 5,
737
+ "ed8254b1-519c-4356-b1c9-7ead5aa1e3e1": 6,
738
+ "e578d886-c55d-4174-976b-3cf43b142203": 7
739
+ }
740
+
741
+ OPENMINDS_IDS = {
742
+ "97c070c6-8e1f-4ee8-9d28-18c7945921dd": 1,
743
+ "ab532423-1fd7-4255-8c6f-f99dc6df814f": 2,
744
+ "d9875ebd-260e-4337-a637-b62fed4aa91d:": 3,
745
+ "0b6df2b3-5297-40cf-adde-9443d3d8214a": 4,
746
+ "3ad33ec1-5152-497d-9352-1cf4497e0edd": 5,
747
+ "2ab3ecf5-76cc-46fa-98ab-309e3fd50f57": 6,
748
+ "b8bf99e7-0914-4b65-a386-d785249725f1": 7
749
+ }
750
+
751
+ if isinstance(spec, Species):
752
+ return spec
753
+ elif isinstance(spec, str):
754
+ # split it in case it is an actual uuid from KG
755
+ if spec.split('/')[-1] in MINDS_IDS:
756
+ return cls(MINDS_IDS[spec])
757
+ if spec.split('/')[-1] in OPENMINDS_IDS:
758
+ return cls(OPENMINDS_IDS[spec])
759
+ key = cls.name_to_key(spec)
760
+ if key in cls.__members__.keys():
761
+ return getattr(cls, key)
762
+ else:
763
+ if isinstance(spec, (list, set)):
764
+ next_specs = spec
765
+ elif isinstance(spec, dict):
766
+ next_specs = spec.values()
767
+ else:
768
+ raise ValueError(f"Species specification cannot be decoded: {spec}")
769
+ for s in next_specs:
770
+ result = cls.decode(s, fail_if_not_successful=False)
771
+ if result is not None:
772
+ return result
773
+
774
+ # if we get here, spec was not decoded into a species
775
+ if fail_if_not_successful:
776
+ raise ValueError(f"Species specification cannot be decoded: {spec}")
777
+ else:
778
+ return None
779
+
780
+ @staticmethod
781
+ def name_to_key(name: str):
782
+ return re.sub(r'\s+', '_', name.strip()).upper()
783
+
784
+ @staticmethod
785
+ def key_to_name(key: str):
786
+ return re.sub(r'_', ' ', key.strip()).lower()
787
+
788
+ def __str__(self):
789
+ return f"{self.name.lower().replace('_', ' ')}".capitalize()
790
+
791
+ def __repr__(self):
792
+ return f"{self.__class__.__name__}: {str(self)}"
793
+
794
+
795
+ def generate_uuid(string: str):
796
+ if isinstance(string, str):
797
+ b = string.encode("UTF-8")
798
+ elif isinstance(string, Nifti1Image):
799
+ b = string.to_bytes()
800
+ else:
801
+ raise ValueError(f"Cannot build uuid for parameter type {type(string)}")
802
+ hex_string = md5(b).hexdigest()
803
+ return str(UUID(hex=hex_string))
804
+
805
+
806
+ def translation_matrix(tx: float, ty: float, tz: float):
807
+ """Construct a 3D homoegneous translation matrix."""
808
+ return np.array([
809
+ [1, 0, 0, tx],
810
+ [0, 1, 0, ty],
811
+ [0, 0, 1, tz],
812
+ [0, 0, 0, 1]
813
+ ])
814
+
815
+
816
+ def y_rotation_matrix(alpha: float):
817
+ """Construct a 3D y axis rotation matrix."""
818
+ return np.array([
819
+ [math.cos(alpha), 0, math.sin(alpha), 0],
820
+ [0, 1, 0, 0],
821
+ [-math.sin(alpha), 0, math.cos(alpha), 0],
822
+ [0, 0, 0, 1]
823
+ ])