ONE-api 3.0b0__tar.gz → 3.0b3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {one_api-3.0b0 → one_api-3.0b3/ONE_api.egg-info}/PKG-INFO +2 -2
  2. {one_api-3.0b0 → one_api-3.0b3}/ONE_api.egg-info/requires.txt +1 -1
  3. {one_api-3.0b0/ONE_api.egg-info → one_api-3.0b3}/PKG-INFO +2 -2
  4. {one_api-3.0b0 → one_api-3.0b3}/one/__init__.py +1 -1
  5. {one_api-3.0b0 → one_api-3.0b3}/one/alf/__init__.py +1 -1
  6. {one_api-3.0b0 → one_api-3.0b3}/one/alf/cache.py +164 -17
  7. {one_api-3.0b0 → one_api-3.0b3}/one/alf/exceptions.py +15 -5
  8. {one_api-3.0b0 → one_api-3.0b3}/one/alf/io.py +44 -42
  9. {one_api-3.0b0 → one_api-3.0b3}/one/alf/path.py +58 -25
  10. {one_api-3.0b0 → one_api-3.0b3}/one/alf/spec.py +43 -20
  11. {one_api-3.0b0 → one_api-3.0b3}/one/api.py +178 -304
  12. {one_api-3.0b0 → one_api-3.0b3}/one/converters.py +43 -34
  13. {one_api-3.0b0 → one_api-3.0b3}/one/params.py +29 -19
  14. {one_api-3.0b0 → one_api-3.0b3}/one/registration.py +29 -28
  15. {one_api-3.0b0 → one_api-3.0b3}/one/remote/aws.py +21 -19
  16. {one_api-3.0b0 → one_api-3.0b3}/one/remote/base.py +9 -6
  17. {one_api-3.0b0 → one_api-3.0b3}/one/remote/globus.py +57 -55
  18. {one_api-3.0b0 → one_api-3.0b3}/one/util.py +36 -39
  19. {one_api-3.0b0 → one_api-3.0b3}/one/webclient.py +102 -70
  20. {one_api-3.0b0 → one_api-3.0b3}/pyproject.toml +11 -0
  21. {one_api-3.0b0 → one_api-3.0b3}/requirements.txt +1 -1
  22. {one_api-3.0b0 → one_api-3.0b3}/LICENSE +0 -0
  23. {one_api-3.0b0 → one_api-3.0b3}/MANIFEST.in +0 -0
  24. {one_api-3.0b0 → one_api-3.0b3}/ONE_api.egg-info/SOURCES.txt +0 -0
  25. {one_api-3.0b0 → one_api-3.0b3}/ONE_api.egg-info/dependency_links.txt +0 -0
  26. {one_api-3.0b0 → one_api-3.0b3}/ONE_api.egg-info/top_level.txt +0 -0
  27. {one_api-3.0b0 → one_api-3.0b3}/README.md +0 -0
  28. {one_api-3.0b0 → one_api-3.0b3}/one/remote/__init__.py +0 -0
  29. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/datasets.pqt +0 -0
  30. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/params/.caches +0 -0
  31. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/params/.test.alyx.internationalbrainlab.org +0 -0
  32. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/1f187d80fd59677b395fcdb18e68e4401bfa1cc9 +0 -0
  33. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/3f51aa2e0baa42438467906f56a457c91a221898 +0 -0
  34. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/47893cf67c985e6361cdee009334963f49fb0746 +0 -0
  35. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/535d0e9a1e2c1efbdeba0d673b131e00361a2edb +0 -0
  36. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/5618bea3484a52cd893616f07903f0e49e023ba1 +0 -0
  37. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/6dc96f7e9bcc6ac2e7581489b9580a6cd3f28293 +0 -0
  38. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/db1731fb8df0208944ae85f76718430813a8bf50 +0 -0
  39. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/dcce48259bb929661f60a02a48563f70aa6185b3 +0 -0
  40. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/rest_responses/f530d6022f61cdc9e38cc66beb3cb71f3003c9a1 +0 -0
  41. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/sessions.pqt +0 -0
  42. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/test_dbs.json +0 -0
  43. {one_api-3.0b0 → one_api-3.0b3}/one/tests/fixtures/test_img.png +0 -0
  44. {one_api-3.0b0 → one_api-3.0b3}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ONE-api
3
- Version: 3.0b0
3
+ Version: 3.0b3
4
4
  Summary: Open Neurophysiology Environment
5
5
  Author: IBL Staff
6
6
  License: MIT
@@ -12,7 +12,7 @@ Project-URL: Changelog, https://github.com/int-brain-lab/ONE/blob/main/CHANGELOG
12
12
  Requires-Python: >=3.10
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
- Requires-Dist: flake8>=3.7.8
15
+ Requires-Dist: ruff
16
16
  Requires-Dist: numpy>=1.18
17
17
  Requires-Dist: pandas>=1.5.0
18
18
  Requires-Dist: tqdm>=4.32.1
@@ -1,4 +1,4 @@
1
- flake8>=3.7.8
1
+ ruff
2
2
  numpy>=1.18
3
3
  pandas>=1.5.0
4
4
  tqdm>=4.32.1
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ONE-api
3
- Version: 3.0b0
3
+ Version: 3.0b3
4
4
  Summary: Open Neurophysiology Environment
5
5
  Author: IBL Staff
6
6
  License: MIT
@@ -12,7 +12,7 @@ Project-URL: Changelog, https://github.com/int-brain-lab/ONE/blob/main/CHANGELOG
12
12
  Requires-Python: >=3.10
13
13
  Description-Content-Type: text/markdown
14
14
  License-File: LICENSE
15
- Requires-Dist: flake8>=3.7.8
15
+ Requires-Dist: ruff
16
16
  Requires-Dist: numpy>=1.18
17
17
  Requires-Dist: pandas>=1.5.0
18
18
  Requires-Dist: tqdm>=4.32.1
@@ -1,2 +1,2 @@
1
1
  """The Open Neurophysiology Environment (ONE) API."""
2
- __version__ = '3.0b0'
2
+ __version__ = '3.0b3'
@@ -1 +1 @@
1
- """Constructing, parsing, validating and loading ALyx Files (ALF)"""
1
+ """Constructing, parsing, validating and loading ALyx Files (ALF)."""
@@ -9,6 +9,7 @@ Examples
9
9
  >>> cache_dir = 'path/to/data'
10
10
  >>> make_parquet_db(cache_dir)
11
11
  >>> one = One(cache_dir=cache_dir)
12
+
12
13
  """
13
14
 
14
15
  # -------------------------------------------------------------------------------------------------
@@ -25,6 +26,7 @@ import logging
25
26
  import pandas as pd
26
27
  import numpy as np
27
28
  from packaging import version
29
+ from iblutil.util import Bunch
28
30
  from iblutil.io import parquet
29
31
  from iblutil.io.hashfile import md5
30
32
 
@@ -32,8 +34,9 @@ from one.alf.spec import QC, is_uuid_string
32
34
  from one.alf.io import iter_sessions
33
35
  from one.alf.path import session_path_parts, get_alf_path
34
36
 
35
- __all__ = ['make_parquet_db', 'patch_cache', 'remove_missing_datasets',
36
- 'remove_cache_table_files', 'EMPTY_DATASETS_FRAME', 'EMPTY_SESSIONS_FRAME', 'QC_TYPE']
37
+ __all__ = [
38
+ 'make_parquet_db', 'patch_tables', 'merge_tables', 'QC_TYPE', 'remove_table_files',
39
+ 'remove_missing_datasets', 'load_tables', 'EMPTY_DATASETS_FRAME', 'EMPTY_SESSIONS_FRAME']
37
40
  _logger = logging.getLogger(__name__)
38
41
 
39
42
  # -------------------------------------------------------------------------------------------------
@@ -107,6 +110,7 @@ def _get_session_info(rel_ses_path):
107
110
  The task protocol (empty str).
108
111
  str
109
112
  The associated project (empty str).
113
+
110
114
  """
111
115
  lab, subject, s_date, num = session_path_parts(rel_ses_path, as_dict=False, assert_valid=True)
112
116
  eid = _ses_str_id(rel_ses_path)
@@ -142,6 +146,7 @@ def _get_dataset_info(dset_path, ses_eid=None, compute_hash=False):
142
146
  Whether the file exists.
143
147
  str
144
148
  The QC value for the dataset ('NOT_SET').
149
+
145
150
  """
146
151
  rel_dset_path = get_alf_path(dset_path.relative_to_session())
147
152
  ses_eid = ses_eid or _ses_str_id(dset_path.session_path())
@@ -188,13 +193,13 @@ def _ids_to_uuid(df_ses, df_dsets):
188
193
  # -------------------------------------------------------------------------------------------------
189
194
 
190
195
  def _metadata(origin):
191
- """
192
- Metadata dictionary for Parquet files.
196
+ """Metadata dictionary for Parquet files.
193
197
 
194
198
  Parameters
195
199
  ----------
196
200
  origin : str, pathlib.Path
197
201
  Path to full directory, or computer name / db name.
202
+
198
203
  """
199
204
  return {
200
205
  'date_created': datetime.datetime.now().isoformat(sep=' ', timespec='minutes'),
@@ -203,8 +208,7 @@ def _metadata(origin):
203
208
 
204
209
 
205
210
  def _make_sessions_df(root_dir) -> pd.DataFrame:
206
- """
207
- Given a root directory, recursively finds all sessions and returns a sessions DataFrame.
211
+ """Given a root directory, recursively finds all sessions and returns a sessions DataFrame.
208
212
 
209
213
  Parameters
210
214
  ----------
@@ -215,6 +219,7 @@ def _make_sessions_df(root_dir) -> pd.DataFrame:
215
219
  -------
216
220
  pandas.DataFrame
217
221
  A pandas DataFrame of session info.
222
+
218
223
  """
219
224
  rows = []
220
225
  for full_path in iter_sessions(root_dir):
@@ -229,8 +234,7 @@ def _make_sessions_df(root_dir) -> pd.DataFrame:
229
234
 
230
235
 
231
236
  def _make_datasets_df(root_dir, hash_files=False) -> pd.DataFrame:
232
- """
233
- Given a root directory, recursively finds all datasets and returns a datasets DataFrame.
237
+ """Given a root directory, recursively finds all datasets and returns a datasets DataFrame.
234
238
 
235
239
  Parameters
236
240
  ----------
@@ -243,6 +247,7 @@ def _make_datasets_df(root_dir, hash_files=False) -> pd.DataFrame:
243
247
  -------
244
248
  pandas.DataFrame
245
249
  A pandas DataFrame of dataset info.
250
+
246
251
  """
247
252
  # Go through sessions and append datasets
248
253
  rows = []
@@ -255,8 +260,7 @@ def _make_datasets_df(root_dir, hash_files=False) -> pd.DataFrame:
255
260
 
256
261
 
257
262
  def make_parquet_db(root_dir, out_dir=None, hash_ids=True, hash_files=False, lab=None):
258
- """
259
- Given a data directory, index the ALF datasets and save the generated cache tables.
263
+ """Given a data directory, index the ALF datasets and save the generated cache tables.
260
264
 
261
265
  Parameters
262
266
  ----------
@@ -281,6 +285,7 @@ def make_parquet_db(root_dir, out_dir=None, hash_ids=True, hash_files=False, lab
281
285
  The full path of the saved sessions parquet table.
282
286
  pathlib.Path
283
287
  The full path of the saved datasets parquet table.
288
+
284
289
  """
285
290
  root_dir = Path(root_dir).resolve()
286
291
 
@@ -323,8 +328,7 @@ def make_parquet_db(root_dir, out_dir=None, hash_ids=True, hash_files=False, lab
323
328
 
324
329
 
325
330
  def cast_index_object(df: pd.DataFrame, dtype: type = uuid.UUID) -> pd.Index:
326
- """
327
- Cast the index object to the specified dtype.
331
+ """Cast the index object to the specified dtype.
328
332
 
329
333
  NB: The data frame index will remain as 'object', however the underlying object type will be
330
334
  modified.
@@ -340,6 +344,7 @@ def cast_index_object(df: pd.DataFrame, dtype: type = uuid.UUID) -> pd.Index:
340
344
  -------
341
345
  pandas.DataFrame
342
346
  An updated data frame with a new index data type.
347
+
343
348
  """
344
349
  if isinstance(df.index, pd.MultiIndex):
345
350
  # df.index = df.index.map(lambda x: tuple(map(UUID, x)))
@@ -353,9 +358,148 @@ def cast_index_object(df: pd.DataFrame, dtype: type = uuid.UUID) -> pd.Index:
353
358
  return df
354
359
 
355
360
 
356
- def remove_missing_datasets(cache_dir, tables=None, remove_empty_sessions=True, dry=True):
361
+ def load_tables(tables_dir, glob_pattern='*.pqt'):
362
+ """Load parquet cache files from a local directory.
363
+
364
+ Parameters
365
+ ----------
366
+ tables_dir : str, pathlib.Path
367
+ The directory location of the parquet files.
368
+ glob_pattern : str
369
+ A glob pattern to match the cache files.
370
+
371
+
372
+ Returns
373
+ -------
374
+ Bunch
375
+ A Bunch object containing the loaded cache tables and associated metadata.
376
+
357
377
  """
358
- Remove dataset files and session folders that are not in the provided cache.
378
+ meta = {
379
+ 'expired': False,
380
+ 'created_time': None,
381
+ 'loaded_time': None,
382
+ 'modified_time': None,
383
+ 'saved_time': None,
384
+ 'raw': {}
385
+ }
386
+ caches = Bunch({
387
+ 'datasets': EMPTY_DATASETS_FRAME.copy(),
388
+ 'sessions': EMPTY_SESSIONS_FRAME.copy(),
389
+ '_meta': meta})
390
+ INDEX_KEY = '.?id'
391
+ for cache_file in Path(tables_dir).glob(glob_pattern):
392
+ table = cache_file.stem
393
+ # we need to keep this part fast enough for transient objects
394
+ cache, meta['raw'][table] = parquet.load(cache_file)
395
+ if 'date_created' not in meta['raw'][table]:
396
+ _logger.warning(f"{cache_file} does not appear to be a valid table. Skipping")
397
+ continue
398
+ meta['loaded_time'] = datetime.datetime.now()
399
+
400
+ # Set the appropriate index if none already set
401
+ if isinstance(cache.index, pd.RangeIndex):
402
+ idx_columns = sorted(cache.filter(regex=INDEX_KEY).columns)
403
+ if len(idx_columns) == 0:
404
+ raise KeyError('Failed to set index')
405
+ cache.set_index(idx_columns, inplace=True)
406
+
407
+ # Patch older tables
408
+ cache = patch_tables(cache, meta['raw'][table].get('min_api_version'), table)
409
+
410
+ # Cast indices to UUID
411
+ cache = cast_index_object(cache, uuid.UUID)
412
+
413
+ # Check sorted
414
+ # Sorting makes MultiIndex indexing O(N) -> O(1)
415
+ if not cache.index.is_monotonic_increasing:
416
+ cache.sort_index(inplace=True)
417
+
418
+ caches[table] = cache
419
+
420
+ created = [datetime.datetime.fromisoformat(x['date_created'])
421
+ for x in meta['raw'].values() if 'date_created' in x]
422
+ if created:
423
+ meta['created_time'] = min(created)
424
+ return caches
425
+
426
+
427
+ def merge_tables(cache, strict=False, **kwargs):
428
+ """Update the cache tables with new records.
429
+
430
+ Parameters
431
+ ----------
432
+ dict
433
+ A map of cache tables to update.
434
+ strict : bool
435
+ If not True, the columns don't need to match. Extra columns in input tables are
436
+ dropped and missing columns are added and filled with np.nan.
437
+ kwargs
438
+ pandas.DataFrame or pandas.Series to insert/update for each table.
439
+
440
+ Returns
441
+ -------
442
+ datetime.datetime:
443
+ A timestamp of when the cache was updated.
444
+
445
+ Example
446
+ -------
447
+ >>> session, datasets = ses2records(self.get_details(eid, full=True))
448
+ ... self._update_cache_from_records(sessions=session, datasets=datasets)
449
+
450
+ Raises
451
+ ------
452
+ AssertionError
453
+ When strict is True the input columns must exactly match those oo the cache table,
454
+ including the order.
455
+ KeyError
456
+ One or more of the keyword arguments does not match a table in cache.
457
+
458
+ """
459
+ updated = None
460
+ for table, records in kwargs.items():
461
+ if records is None or records.empty:
462
+ continue
463
+ if table not in cache:
464
+ raise KeyError(f'Table "{table}" not in cache')
465
+ if isinstance(records, pd.Series):
466
+ records = pd.DataFrame([records])
467
+ records.index.set_names(cache[table].index.names, inplace=True)
468
+ # Drop duplicate indices
469
+ records = records[~records.index.duplicated(keep='first')]
470
+ if not strict:
471
+ # Deal with case where there are extra columns in the cache
472
+ extra_columns = list(set(cache[table].columns) - set(records.columns))
473
+ # Convert these columns to nullable, if required
474
+ cache_columns = cache[table][extra_columns]
475
+ cache[table][extra_columns] = cache_columns.convert_dtypes()
476
+ column_ids = map(list(cache[table].columns).index, extra_columns)
477
+ for col, n in sorted(zip(extra_columns, column_ids), key=lambda x: x[1]):
478
+ dtype = cache[table][col].dtype
479
+ nan = getattr(dtype, 'na_value', np.nan)
480
+ val = records.get('exists', True) if col.startswith('exists_') else nan
481
+ records.insert(n, col, val)
482
+ # Drop any extra columns in the records that aren't in cache table
483
+ to_drop = set(records.columns) - set(cache[table].columns)
484
+ records = records.drop(to_drop, axis=1)
485
+ records = records.reindex(columns=cache[table].columns)
486
+ assert set(cache[table].columns) == set(records.columns)
487
+ records = records.astype(cache[table].dtypes)
488
+ # Update existing rows
489
+ to_update = records.index.isin(cache[table].index)
490
+ cache[table].loc[records.index[to_update], :] = records[to_update]
491
+ # Assign new rows
492
+ to_assign = records[~to_update]
493
+ frames = [cache[table], to_assign]
494
+ # Concatenate and sort
495
+ cache[table] = pd.concat(frames).sort_index()
496
+ updated = datetime.datetime.now()
497
+ cache['_meta']['modified_time'] = updated
498
+ return updated
499
+
500
+
501
+ def remove_missing_datasets(cache_dir, tables=None, remove_empty_sessions=True, dry=True):
502
+ """Remove dataset files and session folders that are not in the provided cache.
359
503
 
360
504
  NB: This *does not* remove entries from the cache tables that are missing on disk.
361
505
  Non-ALF files are not removed. Empty sessions that exist in the sessions table are not removed.
@@ -374,13 +518,14 @@ def remove_missing_datasets(cache_dir, tables=None, remove_empty_sessions=True,
374
518
  -------
375
519
  list
376
520
  A sorted list of paths to be removed.
521
+
377
522
  """
378
523
  cache_dir = Path(cache_dir)
379
524
  if tables is None:
380
525
  tables = {}
381
526
  for name in ('datasets', 'sessions'):
382
527
  table, m = parquet.load(cache_dir / f'{name}.pqt')
383
- tables[name] = patch_cache(table, m.get('min_api_version'), name)
528
+ tables[name] = patch_tables(table, m.get('min_api_version'), name)
384
529
 
385
530
  INDEX_KEY = '.?id'
386
531
  for name in tables:
@@ -429,7 +574,7 @@ def remove_missing_datasets(cache_dir, tables=None, remove_empty_sessions=True,
429
574
  return sorted(to_delete)
430
575
 
431
576
 
432
- def remove_cache_table_files(folder, tables=('sessions', 'datasets')):
577
+ def remove_table_files(folder, tables=('sessions', 'datasets')):
433
578
  """Delete cache tables on disk.
434
579
 
435
580
  Parameters
@@ -444,6 +589,7 @@ def remove_cache_table_files(folder, tables=('sessions', 'datasets')):
444
589
  -------
445
590
  list of pathlib.Path
446
591
  A list of the removed files.
592
+
447
593
  """
448
594
  filenames = ('cache_info.json', *(f'{t}.pqt' for t in tables))
449
595
  removed = []
@@ -478,7 +624,7 @@ def _cache_int2str(table: pd.DataFrame) -> pd.DataFrame:
478
624
  return table
479
625
 
480
626
 
481
- def patch_cache(table: pd.DataFrame, min_api_version=None, name=None) -> pd.DataFrame:
627
+ def patch_tables(table: pd.DataFrame, min_api_version=None, name=None) -> pd.DataFrame:
482
628
  """Reformat older cache tables to comply with this version of ONE.
483
629
 
484
630
  Currently this function will 1. convert integer UUIDs to string UUIDs; 2. rename the 'project'
@@ -492,6 +638,7 @@ def patch_cache(table: pd.DataFrame, min_api_version=None, name=None) -> pd.Data
492
638
  The minimum API version supported by this cache table.
493
639
  name : {'dataset', 'session'} str
494
640
  The name of the table.
641
+
495
642
  """
496
643
  min_version = version.parse(min_api_version or '0.0.0')
497
644
  table = _cache_int2str(table)
@@ -13,7 +13,9 @@ class ALFError(Exception):
13
13
  explanation : str
14
14
  An optional, verbose but general explanation of the error class. All errors will display
15
15
  the same explanation.
16
+
16
17
  """
18
+
17
19
  explanation = ''
18
20
 
19
21
  def __init__(self, *args, terse=False):
@@ -33,6 +35,7 @@ class ALFError(Exception):
33
35
 
34
36
  >>> raise ALFError('invalid/path/one', 'invalid/path/two')
35
37
  one.alf.exceptions.ALFError: "invalid/path/one", "invalid/path/two"
38
+
36
39
  """
37
40
  if args:
38
41
  if len(args) == 1 and isinstance(args[0], str):
@@ -50,19 +53,22 @@ class ALFError(Exception):
50
53
 
51
54
 
52
55
  class AlyxSubjectNotFound(ALFError):
53
- """'Subject not found' error"""
56
+ """'Subject not found' error."""
57
+
54
58
  explanation = 'The subject was not found in Alyx database'
55
59
 
56
60
 
57
61
  class ALFObjectNotFound(ALFError):
58
- """'Object not found' error"""
62
+ """'Object not found' error."""
63
+
59
64
  explanation = ('The ALF object was not found. This may occur if the object or namespace or '
60
65
  'incorrectly formatted e.g. the object "_ibl_trials.intervals.npy" would be '
61
66
  'found with the filters `object="trials", namespace="ibl"`')
62
67
 
63
68
 
64
69
  class ALFMultipleObjectsFound(ALFError):
65
- """'Multiple objects found' error"""
70
+ """'Multiple objects found' error."""
71
+
66
72
  explanation = ('Dataset files belonging to more than one object found. '
67
73
  'ALF names have the pattern '
68
74
  '(_namespace_)object.attribute(_timescale).extension, e.g. for the file '
@@ -70,7 +76,8 @@ class ALFMultipleObjectsFound(ALFError):
70
76
 
71
77
 
72
78
  class ALFMultipleCollectionsFound(ALFError):
73
- """'Multiple collections found' error"""
79
+ """'Multiple collections found' error."""
80
+
74
81
  explanation = ('The matching object/file(s) belong to more than one collection. '
75
82
  'ALF names have the pattern '
76
83
  'collection/(_namespace_)object.attribute(_timescale).extension, e.g. for the '
@@ -78,7 +85,8 @@ class ALFMultipleCollectionsFound(ALFError):
78
85
 
79
86
 
80
87
  class ALFMultipleRevisionsFound(ALFError):
81
- """'Multiple objects found' error"""
88
+ """'Multiple objects found' error."""
89
+
82
90
  explanation = ('The matching object/file(s) belong to more than one revision. '
83
91
  'Multiple datasets in different revision folders were found with no default '
84
92
  'specified.')
@@ -86,10 +94,12 @@ class ALFMultipleRevisionsFound(ALFError):
86
94
 
87
95
  class ALFWarning(Warning):
88
96
  """Cautions when loading ALF datasets."""
97
+
89
98
  pass
90
99
 
91
100
 
92
101
  class ALFInvalid(ALFError, ValueError):
93
102
  """ALF path invalid."""
103
+
94
104
  explanation = ('The file path provided is does not match the ALF path specification defined '
95
105
  'in `one.alf.spec`.')