siibra 0.4a77__py3-none-any.whl → 0.5a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

@@ -14,26 +14,26 @@
14
14
  # limitations under the License.
15
15
 
16
16
  from . import tabular
17
+ from ..feature import Compoundable
17
18
 
18
19
  from .. import anchor as _anchor
19
-
20
- from ...commons import logger, QUIET, siibra_tqdm
20
+ from ...commons import QUIET
21
21
  from ...locations import pointset
22
22
  from ...retrieval.repositories import RepositoryConnector
23
+ from ...retrieval.requests import HttpRequest
23
24
 
24
- from typing import Callable, Dict, List
25
+ from typing import Callable, List
25
26
  import pandas as pd
26
27
  import numpy as np
27
28
 
28
29
 
29
- class RegionalTimeseriesActivity(tabular.Tabular):
30
+ class RegionalTimeseriesActivity(tabular.Tabular, Compoundable):
30
31
  """
31
32
  Datasets that provide regional activity over time.
32
33
  """
33
34
 
34
- DESCRIPTION = (
35
- ""
36
- )
35
+ _filter_attrs = ["modality", "cohort", "subject"]
36
+ _compound_attrs = ["modality", "cohort"]
37
37
 
38
38
  def __init__(
39
39
  self,
@@ -42,103 +42,88 @@ class RegionalTimeseriesActivity(tabular.Tabular):
42
42
  regions: list,
43
43
  connector: RepositoryConnector,
44
44
  decode_func: Callable,
45
- files: Dict[str, str],
45
+ filename: str,
46
46
  anchor: _anchor.AnatomicalAnchor,
47
47
  timestep: str,
48
48
  description: str = "",
49
49
  datasets: list = [],
50
- paradigm: str = ""
50
+ subject: str = "average"
51
51
  ):
52
52
  """
53
53
  """
54
54
  tabular.Tabular.__init__(
55
55
  self,
56
56
  modality=modality,
57
- description=description or '\n'.join({ds.description for ds in datasets}),
57
+ description=description,
58
58
  anchor=anchor,
59
59
  datasets=datasets,
60
60
  data=None # lazy loading below
61
61
  )
62
62
  self.cohort = cohort.upper()
63
- self._connector = connector
64
- self._files = files
63
+ if isinstance(connector, str) and connector:
64
+ self._connector = HttpRequest(connector, decode_func)
65
+ else:
66
+ self._connector = connector
67
+ self._filename = filename
65
68
  self._decode_func = decode_func
66
69
  self.regions = regions
67
- self._tables = {}
68
- self.paradigm = paradigm
69
- self.timestep = timestep
70
+ self._table = None
71
+ self._subject = subject
72
+ val, unit = timestep.split(" ")
73
+ self.timestep = (float(val), unit)
70
74
 
71
75
  @property
72
- def subjects(self):
73
- """
74
- Returns the subject identifiers for which signal tables are available.
75
- """
76
- return list(self._files.keys())
76
+ def subject(self):
77
+ """Returns the subject identifiers for which the matrix represents."""
78
+ return self._subject
77
79
 
78
80
  @property
79
81
  def name(self):
80
- supername = super().name
81
- return f"{supername} with paradigm {self.paradigm}"
82
+ return f"{super().name} with cohort {self.cohort} - subject {self.subject}"
82
83
 
83
- def get_table(self, subject: str = None):
84
+ @property
85
+ def data(self) -> pd.DataFrame:
84
86
  """
85
- Returns a pandas dataframe where the column headers are regions and the
86
- indcies indicate disctrete timesteps.
87
-
88
- Parameters
89
- ----------
90
- subject: str, default: None
91
- Name of the subject (see RegionalTimeseriesActivity.subjects for available names).
92
- If None, the mean is taken in case of multiple available data tables.
93
- Returns
94
- -------
95
- pd.DataFrame
96
- A table with region names as the column and timesteps as indices.
87
+ Returns a table as a pandas dataframe where the index is a timeseries.
97
88
  """
98
- assert len(self) > 0
99
- if (subject is None) and (len(self) > 1):
100
- # multiple signal tables available, but no subject given - return mean table
101
- logger.info(
102
- f"No subject name supplied, returning mean signal table across {len(self)} subjects. "
103
- "You might alternatively specify an individual subject."
104
- )
105
- if "mean" not in self._tables:
106
- all_arrays = [
107
- self._connector.get(fname, decode_func=self._decode_func)
108
- for fname in siibra_tqdm(
109
- self._files.values(),
110
- total=len(self),
111
- desc=f"Averaging {len(self)} signal tables"
112
- )
113
- ]
114
- self._tables['mean'] = self._array_to_dataframe(np.stack(all_arrays).mean(0))
115
- return self._tables['mean'].copy()
116
- if subject is None:
117
- subject = next(iter(self._files.keys()))
118
- if subject not in self._files:
119
- raise ValueError(f"Subject name '{subject}' not known, use one of: {', '.join(self._files)}")
120
- if subject not in self._tables:
121
- self._tables[subject] = self._load_table(subject)
122
- return self._tables[subject].copy()
89
+ if self._table is None:
90
+ self._load_table()
91
+ return self._table.copy()
123
92
 
124
- def _load_table(self, subject: str):
93
+ def _load_table(self):
125
94
  """
126
95
  Extract the timeseries table.
127
96
  """
128
- assert subject in self.subjects
129
- array = self._connector.get(self._files[subject], decode_func=self._decode_func)
130
- return self._array_to_dataframe(array)
131
-
132
- def __len__(self):
133
- return len(self._files)
97
+ array = self._connector.get(self._filename, decode_func=self._decode_func)
98
+ if not isinstance(array, np.ndarray):
99
+ assert isinstance(array, pd.DataFrame)
100
+ array = array.to_numpy()
101
+ ncols = array.shape[1]
102
+ self._table = pd.DataFrame(
103
+ array,
104
+ index=pd.TimedeltaIndex(
105
+ np.arange(0, array.shape[0]) * self.timestep[0],
106
+ unit=self.timestep[1],
107
+ name="time"
108
+ )
109
+ )
110
+ parcellations = self.anchor.represented_parcellations()
111
+ assert len(parcellations) == 1
112
+ parc = next(iter(parcellations))
113
+ with QUIET:
114
+ columnmap = {
115
+ i: parc.get_region(regionname, allow_tuple=True)
116
+ for i, regionname in enumerate(self.regions)
117
+ }
118
+ if len(columnmap) == ncols:
119
+ remapper = {
120
+ label - min(columnmap.keys()): region
121
+ for label, region in columnmap.items()
122
+ }
123
+ self._table = self._table.rename(columns=remapper)
134
124
 
135
125
  def __str__(self):
136
- return "{} with paradigm {} for {} from {} cohort ({} signal tables)".format(
137
- self.modality, self.paradigm,
138
- "_".join(p.name for p in self.anchor.parcellations),
139
- self.cohort,
140
- len(self._files),
141
- )
126
+ return self.name
142
127
 
143
128
  def compute_centroids(self, space):
144
129
  """
@@ -172,31 +157,8 @@ class RegionalTimeseriesActivity(tabular.Tabular):
172
157
  )
173
158
  return result
174
159
 
175
- def _array_to_dataframe(self, array: np.ndarray) -> pd.DataFrame:
176
- """
177
- Convert a numpy array with the regional activity data to
178
- a DataFrame with regions as column headers and timesteps as indices.
179
- """
180
- df = pd.DataFrame(array)
181
- parcellations = self.anchor.represented_parcellations()
182
- assert len(parcellations) == 1
183
- parc = next(iter(parcellations))
184
- with QUIET:
185
- indexmap = {
186
- i: parc.get_region(regionname, allow_tuple=True)
187
- for i, regionname in enumerate(self.regions)
188
- }
189
- ncols = array.shape[1]
190
- if len(indexmap) == ncols:
191
- remapper = {
192
- label - min(indexmap.keys()): region
193
- for label, region in indexmap.items()
194
- }
195
- df = df.rename(columns=remapper)
196
- return df
197
-
198
160
  def plot(
199
- self, subject: str = None, regions: List[str] = None, *args,
161
+ self, regions: List[str] = None, *args,
200
162
  backend="matplotlib", **kwargs
201
163
  ):
202
164
  """
@@ -214,12 +176,12 @@ class RegionalTimeseriesActivity(tabular.Tabular):
214
176
  if regions is None:
215
177
  regions = self.regions
216
178
  indices = [self.regions.index(r) for r in regions]
217
- table = self.get_table(subject).iloc[:, indices]
179
+ table = self.data.iloc[:, indices]
218
180
  table.columns = [str(r) for r in table.columns]
219
181
  return table.mean().plot(kind="bar", *args, backend=backend, **kwargs)
220
182
 
221
183
  def plot_carpet(
222
- self, subject: str = None, regions: List[str] = None, *args,
184
+ self, regions: List[str] = None, *args,
223
185
  backend="plotly", **kwargs
224
186
  ):
225
187
  """
@@ -238,12 +200,17 @@ class RegionalTimeseriesActivity(tabular.Tabular):
238
200
  if regions is None:
239
201
  regions = self.regions
240
202
  indices = [self.regions.index(r) for r in regions]
241
- table = self.get_table(subject).iloc[:, indices]
203
+ table = self.data.iloc[:, indices].reset_index(drop=True)
242
204
  table.columns = [str(r) for r in table.columns]
205
+ kwargs["title"] = kwargs.get("title", f"{self.modality}" + f" for subject={self.subject}")
206
+ kwargs["labels"] = kwargs.get("labels", {
207
+ "xlabel": self.data.index.to_numpy(dtype='timedelta64[ms]')}
208
+ )
243
209
  from plotly.express import imshow
244
210
  return imshow(
211
+ *args,
245
212
  table.T,
246
- title=f"{self.modality}" + f" for subject={subject}" if subject else ""
213
+ **kwargs
247
214
  )
248
215
 
249
216
 
@@ -256,4 +223,16 @@ class RegionalBOLD(
256
223
  Blood-oxygen-level-dependent (BOLD) signals per region.
257
224
  """
258
225
 
259
- pass
226
+ _filter_attrs = RegionalTimeseriesActivity._filter_attrs + ["paradigm"]
227
+ _compound_attrs = RegionalTimeseriesActivity._compound_attrs + ["paradigm"]
228
+
229
+ def __init__(self, paradigm: str, **kwargs):
230
+ RegionalTimeseriesActivity.__init__(self, **kwargs)
231
+ self.paradigm = paradigm
232
+
233
+ # paradign is used to distinguish functional connectivity features from each other.
234
+ assert self.paradigm, "RegionalBOLD must have paradigm defined!"
235
+
236
+ @property
237
+ def name(self):
238
+ return f"{super().name}, paradigm {self.paradigm}"
@@ -29,15 +29,10 @@ from os import path
29
29
 
30
30
  class WagstylProfileLoader:
31
31
 
32
- REPO = "https://github.com/kwagstyl/cortical_layers_tutorial"
33
- BRANCH = "main"
34
- PROFILES_FILE_LEFT = "https://data-proxy.ebrains.eu/api/v1/public/buckets/d-26d25994-634c-40af-b88f-2a36e8e1d508/profiles/profiles_left.txt"
35
- PROFILES_FILE_RIGHT = "https://data-proxy.ebrains.eu/api/v1/public/buckets/d-26d25994-634c-40af-b88f-2a36e8e1d508/profiles/profiles_right.txt"
32
+ REPO = "https://github.com/kwagstyl/cortical_layers_tutorial/raw/main"
33
+ PROFILES_FILE_LEFT = "data/profiles_left.npy"
36
34
  THICKNESSES_FILE_LEFT = "data/thicknesses_left.npy"
37
- THICKNESSES_FILE_RIGHT = ""
38
- MESH_FILE_LEFT = "gray_left_327680.surf.gii"
39
- MESH_FILE_RIGHT = "gray_right_327680.surf.gii"
40
- BASEURL = "https://ftp.bigbrainproject.org/bigbrain-ftp/BigBrainRelease.2015/3D_Surfaces/Apr7_2016/gii/"
35
+ MESH_FILE_LEFT = "data/gray_left_327680.surf.gii"
41
36
  _profiles = None
42
37
  _vertices = None
43
38
  _boundary_depths = None
@@ -53,31 +48,25 @@ class WagstylProfileLoader:
53
48
  @classmethod
54
49
  def _load(cls):
55
50
  # read thicknesses, in mm, and normalize by their last column which is the total thickness
56
- thickness_left = requests.HttpRequest(f"{cls.REPO}/raw/{cls.BRANCH}/{cls.THICKNESSES_FILE_LEFT}").data.T
57
- thickness_right = np.zeros(shape=thickness_left.shape) # TODO: replace with thickness data for te right hemisphere
58
- thickness = np.concatenate((thickness_left[:, :-1], thickness_right[:, :-1])) # last column is the computed total thickness
59
- total_thickness = thickness.sum(1)
51
+ thickness = requests.HttpRequest(f"{cls.REPO}/{cls.THICKNESSES_FILE_LEFT}").data.T
52
+ total_thickness = thickness[:, :-1].sum(1) # last column is the computed total thickness
60
53
  valid = np.where(total_thickness > 0)[0]
61
54
  cls._boundary_depths = np.c_[np.zeros_like(valid), (thickness[valid, :] / total_thickness[valid, None]).cumsum(1)]
62
55
  cls._boundary_depths[:, -1] = 1 # account for float calculation errors
63
56
 
64
57
  # find profiles with valid thickness
65
- if not all(
66
- path.exists(cache.CACHE.build_filename(url))
67
- for url in [cls.PROFILES_FILE_LEFT, cls.PROFILES_FILE_RIGHT]
68
- ):
58
+ profile_l_url = f"{cls.REPO}/{cls.PROFILES_FILE_LEFT}"
59
+ if not path.exists(cache.CACHE.build_filename(profile_l_url)):
69
60
  logger.info(
70
61
  "First request to BigBrain profiles. Preprocessing the data "
71
62
  "now. This may take a little."
72
63
  )
73
- profiles_l = requests.HttpRequest(cls.PROFILES_FILE_LEFT).data.to_numpy()
74
- profiles_r = requests.HttpRequest(cls.PROFILES_FILE_RIGHT).data.to_numpy()
75
- cls._profiles = np.concatenate((profiles_l, profiles_r))[valid, :]
64
+ profiles_l_all = requests.HttpRequest(profile_l_url).data
65
+ cls._profiles = profiles_l_all[valid, :]
76
66
 
77
67
  # read mesh vertices
78
- mesh_left = requests.HttpRequest(f"{cls.BASEURL}/{cls.MESH_FILE_LEFT}").data
79
- mesh_right = requests.HttpRequest(f"{cls.BASEURL}/{cls.MESH_FILE_RIGHT}").data
80
- mesh_vertices = np.concatenate((mesh_left.darrays[0].data, mesh_right.darrays[0].data))
68
+ mesh_left = requests.HttpRequest(f"{cls.REPO}/{cls.MESH_FILE_LEFT}").data
69
+ mesh_vertices = mesh_left.darrays[0].data
81
70
  cls._vertices = mesh_vertices[valid, :]
82
71
 
83
72
  logger.debug(f"{cls._profiles.shape[0]} BigBrain intensity profiles.")
@@ -14,7 +14,71 @@
14
14
  # limitations under the License.
15
15
  """Handles spatial concepts and spatial operation like warping between spaces."""
16
16
 
17
- from .location import WholeBrain
17
+ from .location import WholeBrain, Location
18
18
  from .point import Point
19
19
  from .pointset import PointSet
20
20
  from .boundingbox import BoundingBox
21
+
22
+
23
+ def reassign_union(loc0: 'Location', loc1: 'Location') -> 'Location':
24
+ """
25
+ Add two locations of same or diffrent type to find their union as a
26
+ Location object.
27
+ Note
28
+ ----
29
+ `loc1` will be warped to `loc0` they are not in the same space.
30
+ Parameters
31
+ ----------
32
+ loc0 : Location
33
+ _description_
34
+ loc1 : Location
35
+ _description_
36
+ Returns
37
+ -------
38
+ Location
39
+ - Point U Point = PointSet
40
+ - Point U PointSet = PointSet
41
+ - PointSet U PointSet = PointSet
42
+ - BoundingBox U BoundingBox = BoundingBox
43
+ - BoundingBox U PointSet = BoundingBox
44
+ - BoundingBox U Point = BoundingBox
45
+ - WholeBrain U Location = NotImplementedError
46
+ (all operations are commutative)
47
+ """
48
+ if loc0 is None or loc1 is None:
49
+ return loc0 or loc1
50
+
51
+ if isinstance(loc0, WholeBrain) or isinstance(loc1, WholeBrain):
52
+ raise NotImplementedError("Union of WholeBrains is not yet implemented.")
53
+
54
+ loc1_w = loc1.warp(loc0.space) # adopt the space of the first location
55
+
56
+ if isinstance(loc0, Point): # turn Points to PointSets
57
+ return reassign_union(
58
+ PointSet([loc0], space=loc0.space, sigma_mm=loc0.sigma), loc1_w
59
+ )
60
+
61
+ if isinstance(loc0, PointSet):
62
+ if isinstance(loc1_w, PointSet):
63
+ points = set(loc0.points + loc1_w.points)
64
+ return PointSet(
65
+ points,
66
+ space=loc0.space,
67
+ sigma_mm=[p.sigma for p in points],
68
+ )
69
+ if isinstance(loc1_w, BoundingBox):
70
+ return reassign_union(loc0.boundingbox, loc1_w)
71
+
72
+ if isinstance(loc0, BoundingBox) and isinstance(loc1_w, BoundingBox):
73
+ points = [loc0.minpoint, loc0.maxpoint, loc1_w.minpoint, loc1_w.maxpoint]
74
+ return BoundingBox(
75
+ point1=[min(p[i] for p in points) for i in range(3)],
76
+ point2=[max(p[i] for p in points) for i in range(3)],
77
+ space=loc0.space,
78
+ sigma_mm=[loc0.minpoint.sigma, loc0.maxpoint.sigma]
79
+ )
80
+
81
+ return reassign_union(loc1_w, loc0)
82
+
83
+
84
+ Location.union = reassign_union
@@ -273,22 +273,6 @@ class BoundingBox(location.Location):
273
273
  .boundingbox
274
274
  )
275
275
 
276
- def union(self, other):
277
- """
278
- Computes the union of this boudning box with another one.
279
-
280
- Args:
281
- other (BoundingBox): Another bounding box
282
- """
283
- warped = other.warp(self.space)
284
- points = [self.minpoint, self.maxpoint, warped.minpoint, warped.maxpoint]
285
- return BoundingBox(
286
- point1=[min(p[i] for p in points) for i in range(3)],
287
- point2=[max(p[i] for p in points) for i in range(3)],
288
- space=self.space,
289
- sigma_mm=[self.minpoint.sigma, self.maxpoint.sigma]
290
- )
291
-
292
276
  def clip(self, xyzmax, xyzmin=(0, 0, 0)):
293
277
  """
294
278
  Returns a new bounding box obtained by clipping at the given maximum coordinate.
@@ -98,6 +98,19 @@ class Location(ABC):
98
98
  f"[{','.join(str(l) for l in iter(self))}]"
99
99
  )
100
100
 
101
+ def __repr__(self):
102
+ return f"{self.__class__.__name__}: {self}"
103
+
104
+ @staticmethod
105
+ def union(loc0: 'Location', loc1: 'Location') -> 'Location':
106
+ """
107
+ Reassigned at the locations module level for static typing and to avoid
108
+ circular imports. See siibra.locations.__init__.reassign_union()
109
+ """
110
+ raise NotImplementedError(
111
+ "This method is designed to be reassigned at the module level"
112
+ )
113
+
101
114
 
102
115
  class WholeBrain(Location):
103
116
  """
@@ -159,9 +159,7 @@ class PointSet(location.Location):
159
159
  return len(self.points)
160
160
 
161
161
  def __str__(self):
162
- return f"Set of points {self.space.name}: " + ", ".join(
163
- f"({','.join(str(v) for v in p)})" for p in self
164
- )
162
+ return f"Set of {len(self)} points in the {self.boundingbox}"
165
163
 
166
164
  @property
167
165
  def boundingbox(self):
siibra/retrieval/cache.py CHANGED
@@ -19,7 +19,7 @@ import os
19
19
  from appdirs import user_cache_dir
20
20
  import tempfile
21
21
 
22
- from ..commons import logger, SIIBRA_CACHEDIR, SKIP_CACHE_MAINTENANCE
22
+ from ..commons import logger, SIIBRA_CACHEDIR, SKIP_CACHEINIT_MAINTENANCE
23
23
 
24
24
 
25
25
  def assert_folder(folder):
@@ -64,7 +64,10 @@ class Cache:
64
64
  cls.folder = SIIBRA_CACHEDIR
65
65
  cls.folder = assert_folder(cls.folder)
66
66
  cls._instance = cls.__new__(cls)
67
- cls._instance.run_maintenance()
67
+ if SKIP_CACHEINIT_MAINTENANCE:
68
+ logger.debug("Will not run maintenance on cache as SKIP_CACHE_MAINTENANCE is set to True.")
69
+ else:
70
+ cls._instance.run_maintenance()
68
71
  return cls._instance
69
72
 
70
73
  def clear(self):
@@ -77,10 +80,6 @@ class Cache:
77
80
  def run_maintenance(self):
78
81
  """ Shrinks the cache by deleting oldest files first until the total size
79
82
  is below cache size (Cache.SIZE) given in GiB."""
80
- if SKIP_CACHE_MAINTENANCE:
81
- logger.debug("Will not run maintenance on cache as SKIP_CACHE_MAINTENANCE is set to True.")
82
- return
83
-
84
83
  # build sorted list of cache files and their os attributes
85
84
  files = [os.path.join(self.folder, fname) for fname in os.listdir(self.folder)]
86
85
  sfiles = sorted([(fn, os.stat(fn)) for fn in files], key=lambda t: t[1].st_atime)
@@ -128,7 +128,7 @@ class EbrainsDataset(EbrainsBaseDataset):
128
128
  return self._id
129
129
 
130
130
  @property
131
- def detail(self):
131
+ def _detail(self):
132
132
  if not self._cached_data:
133
133
  match = re.search(r"([a-f0-9-]+)$", self.id)
134
134
  instance_id = match.group(1)
@@ -145,7 +145,7 @@ class EbrainsDataset(EbrainsBaseDataset):
145
145
  @property
146
146
  def name(self) -> str:
147
147
  if self._name is None:
148
- self._name = self.detail.get("name")
148
+ self._name = self._detail.get("name")
149
149
  return self._name
150
150
 
151
151
  @property
@@ -154,16 +154,16 @@ class EbrainsDataset(EbrainsBaseDataset):
154
154
  {
155
155
  "url": f if f.startswith("http") else f"https://doi.org/{f}",
156
156
  }
157
- for f in self.detail.get("kgReference", [])
157
+ for f in self._detail.get("kgReference", [])
158
158
  ]
159
159
 
160
160
  @property
161
161
  def description(self) -> str:
162
- return self.detail.get("description")
162
+ return self._detail.get("description")
163
163
 
164
164
  @property
165
165
  def contributors(self) -> List[EbrainsDatasetPerson]:
166
- return self.detail.get("contributors")
166
+ return self._detail.get("contributors")
167
167
 
168
168
  @property
169
169
  def ebrains_page(self):
@@ -171,16 +171,16 @@ class EbrainsDataset(EbrainsBaseDataset):
171
171
 
172
172
  @property
173
173
  def custodians(self) -> EbrainsDatasetPerson:
174
- return self.detail.get("custodians")
174
+ return self._detail.get("custodians")
175
175
 
176
176
  @property
177
177
  def LICENSE(self) -> str:
178
- return self.detail.get("license", "No license information is found.")
178
+ return self._detail.get("license", "No license information is found.")
179
179
 
180
180
 
181
181
  class EbrainsV3DatasetVersion(EbrainsBaseDataset):
182
182
  @staticmethod
183
- def parse_person(d: dict) -> EbrainsDatasetPerson:
183
+ def _parse_person(d: dict) -> EbrainsDatasetPerson:
184
184
  assert "https://openminds.ebrains.eu/core/Person" in d.get("type"), "Cannot convert a non person to a person dict!"
185
185
  _id = d.get("id")
186
186
  name = f"{d.get('givenName')} {d.get('familyName')}"
@@ -199,7 +199,7 @@ class EbrainsV3DatasetVersion(EbrainsBaseDataset):
199
199
  self._cached_data = cached_data
200
200
 
201
201
  @property
202
- def detail(self):
202
+ def _detail(self):
203
203
  if not self._cached_data:
204
204
  match = re.search(r"([a-f0-9-]+)$", self._id)
205
205
  instance_id = match.group(1)
@@ -219,30 +219,30 @@ class EbrainsV3DatasetVersion(EbrainsBaseDataset):
219
219
 
220
220
  @property
221
221
  def name(self) -> str:
222
- fullname = self.detail.get("fullName")
222
+ fullname = self._detail.get("fullName")
223
223
  if not fullname:
224
224
  for dataset in self.is_version_of:
225
225
  if fullname:
226
226
  break
227
227
  fullname = dataset.name
228
- version_id = self.detail.get("versionIdentifier")
228
+ version_id = self._detail.get("versionIdentifier")
229
229
  return f"{fullname} ({version_id})"
230
230
 
231
231
  @property
232
232
  def is_version_of(self):
233
233
  if not hasattr(self, "_is_version_of"):
234
- self._is_version_of = [EbrainsV3Dataset(id=id.get("id")) for id in self.detail.get("isVersionOf", [])]
234
+ self._is_version_of = [EbrainsV3Dataset(id=id.get("id")) for id in self._detail.get("isVersionOf", [])]
235
235
  return self._is_version_of
236
236
 
237
237
  @property
238
238
  def urls(self) -> List[EbrainsDatasetUrl]:
239
239
  return [{
240
240
  "url": doi.get("identifier", None)
241
- } for doi in self.detail.get("doi", [])]
241
+ } for doi in self._detail.get("doi", [])]
242
242
 
243
243
  @property
244
244
  def description(self) -> str:
245
- description = self.detail.get("description")
245
+ description = self._detail.get("description")
246
246
  for ds in self.is_version_of:
247
247
  if description:
248
248
  break
@@ -251,7 +251,7 @@ class EbrainsV3DatasetVersion(EbrainsBaseDataset):
251
251
 
252
252
  @property
253
253
  def contributors(self) -> List[EbrainsDatasetPerson]:
254
- return [EbrainsV3DatasetVersion.parse_person(d) for d in self.detail.get("author", [])]
254
+ return [EbrainsV3DatasetVersion._parse_person(d) for d in self._detail.get("author", [])]
255
255
 
256
256
  @property
257
257
  def ebrains_page(self) -> str:
@@ -261,19 +261,19 @@ class EbrainsV3DatasetVersion(EbrainsBaseDataset):
261
261
 
262
262
  @property
263
263
  def custodians(self) -> EbrainsDatasetPerson:
264
- return [EbrainsV3DatasetVersion.parse_person(d) for d in self.detail.get("custodian", [])]
264
+ return [EbrainsV3DatasetVersion._parse_person(d) for d in self._detail.get("custodian", [])]
265
265
 
266
266
  @property
267
- def version_changes(self):
268
- return self.detail.get("versionInnovation", "")
267
+ def version_changelog(self):
268
+ return self._detail.get("versionInnovation", "")
269
269
 
270
270
  @property
271
271
  def version_identifier(self):
272
- return self.detail.get("versionIdentifier", "")
272
+ return self._detail.get("versionIdentifier", "")
273
273
 
274
274
  @property
275
275
  def LICENSE(self) -> str:
276
- return self.detail.get("license", "No license information is found.")
276
+ return self._detail.get("license", "No license information is found.")
277
277
 
278
278
 
279
279
  class EbrainsV3Dataset(EbrainsBaseDataset):
@@ -290,16 +290,16 @@ class EbrainsV3Dataset(EbrainsBaseDataset):
290
290
 
291
291
  @property
292
292
  def name(self) -> str:
293
- return self.detail.get("fullName")
293
+ return self._detail.get("fullName")
294
294
 
295
295
  @property
296
296
  def urls(self) -> List[EbrainsDatasetUrl]:
297
297
  return [{
298
298
  "url": doi.get("identifier", None)
299
- } for doi in self.detail.get("doi", [])]
299
+ } for doi in self._detail.get("doi", [])]
300
300
 
301
301
  @property
302
- def detail(self):
302
+ def _detail(self):
303
303
  if not self._cached_data:
304
304
  match = re.search(r"([a-f0-9-]+)$", self._id)
305
305
  instance_id = match.group(1)
@@ -315,7 +315,7 @@ class EbrainsV3Dataset(EbrainsBaseDataset):
315
315
 
316
316
  @property
317
317
  def description(self) -> str:
318
- return self.detail.get("description", "")
318
+ return self._detail.get("description", "")
319
319
 
320
320
  @property
321
321
  def contributors(self):
@@ -336,15 +336,15 @@ class EbrainsV3Dataset(EbrainsBaseDataset):
336
336
 
337
337
  @property
338
338
  def custodians(self) -> EbrainsDatasetPerson:
339
- return [EbrainsV3DatasetVersion.parse_person(d) for d in self.detail.get("custodian", [])]
339
+ return [EbrainsV3DatasetVersion._parse_person(d) for d in self._detail.get("custodian", [])]
340
340
 
341
341
  @property
342
342
  def version_ids(self) -> List['str']:
343
- return [version.get("id") for version in self.detail.get("versions", [])]
343
+ return [version.get("id") for version in self._detail.get("versions", [])]
344
344
 
345
345
  @property
346
346
  def LICENSE(self) -> str:
347
- return self.detail.get("license", "No license information is found.")
347
+ return self._detail.get("license", "No license information is found.")
348
348
 
349
349
 
350
350
  class GenericDataset():