ONE-api 3.0b5__py3-none-any.whl → 3.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
one/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
1
  """The Open Neurophysiology Environment (ONE) API."""
2
- __version__ = '3.0b5'
2
+ __version__ = '3.1.0'
one/alf/cache.py CHANGED
@@ -26,7 +26,7 @@ import logging
26
26
  import pandas as pd
27
27
  import numpy as np
28
28
  from packaging import version
29
- from iblutil.util import Bunch
29
+ from iblutil.util import Bunch, ensure_list
30
30
  from iblutil.io import parquet
31
31
  from iblutil.io.hashfile import md5
32
32
 
@@ -35,8 +35,9 @@ from one.alf.io import iter_sessions
35
35
  from one.alf.path import session_path_parts, get_alf_path
36
36
 
37
37
  __all__ = [
38
- 'make_parquet_db', 'patch_tables', 'merge_tables', 'QC_TYPE', 'remove_table_files',
39
- 'remove_missing_datasets', 'load_tables', 'EMPTY_DATASETS_FRAME', 'EMPTY_SESSIONS_FRAME']
38
+ 'make_parquet_db', 'load_tables', 'patch_tables', 'merge_tables',
39
+ 'remove_table_files', 'remove_missing_datasets', 'default_cache',
40
+ 'QC_TYPE', 'EMPTY_DATASETS_FRAME', 'EMPTY_SESSIONS_FRAME']
40
41
  _logger = logging.getLogger(__name__)
41
42
 
42
43
  # -------------------------------------------------------------------------------------------------
@@ -259,6 +260,33 @@ def _make_datasets_df(root_dir, hash_files=False) -> pd.DataFrame:
259
260
  return pd.DataFrame(rows, columns=DATASETS_COLUMNS).astype(DATASETS_COLUMNS)
260
261
 
261
262
 
263
+ def default_cache(origin=''):
264
+ """Returns an empty cache dictionary with the default tables.
265
+
266
+ Parameters
267
+ ----------
268
+ origin : str, optional
269
+ The origin of the cache (e.g. a computer name or database name).
270
+
271
+ Returns
272
+ -------
273
+ Bunch
274
+ A Bunch object containing the loaded cache tables and associated metadata.
275
+
276
+ """
277
+ table_meta = _metadata(origin)
278
+ return Bunch({
279
+ 'datasets': EMPTY_DATASETS_FRAME.copy(),
280
+ 'sessions': EMPTY_SESSIONS_FRAME.copy(),
281
+ '_meta': {
282
+ 'created_time': None,
283
+ 'loaded_time': None,
284
+ 'modified_time': None,
285
+ 'saved_time': None,
286
+ 'raw': {k: table_meta.copy() for k in ('datasets', 'sessions')}}
287
+ })
288
+
289
+
262
290
  def make_parquet_db(root_dir, out_dir=None, hash_ids=True, hash_files=False, lab=None):
263
291
  """Given a data directory, index the ALF datasets and save the generated cache tables.
264
292
 
@@ -375,17 +403,8 @@ def load_tables(tables_dir, glob_pattern='*.pqt'):
375
403
  A Bunch object containing the loaded cache tables and associated metadata.
376
404
 
377
405
  """
378
- meta = {
379
- 'created_time': None,
380
- 'loaded_time': None,
381
- 'modified_time': None,
382
- 'saved_time': None,
383
- 'raw': {}
384
- }
385
- caches = Bunch({
386
- 'datasets': EMPTY_DATASETS_FRAME.copy(),
387
- 'sessions': EMPTY_SESSIONS_FRAME.copy(),
388
- '_meta': meta})
406
+ caches = default_cache()
407
+ meta = caches['_meta']
389
408
  INDEX_KEY = '.?id'
390
409
  for cache_file in Path(tables_dir).glob(glob_pattern):
391
410
  table = cache_file.stem
@@ -425,9 +444,12 @@ def load_tables(tables_dir, glob_pattern='*.pqt'):
425
444
  return caches
426
445
 
427
446
 
428
- def merge_tables(cache, strict=False, **kwargs):
447
+ def merge_tables(cache, strict=False, origin=None, **kwargs):
429
448
  """Update the cache tables with new records.
430
449
 
450
+ Note: A copy of the tables in cache may be returned if the original tables are immutable.
451
+ This can happen when tables are loaded from a parquet file.
452
+
431
453
  Parameters
432
454
  ----------
433
455
  dict
@@ -435,6 +457,8 @@ def merge_tables(cache, strict=False, **kwargs):
435
457
  strict : bool
436
458
  If not True, the columns don't need to match. Extra columns in input tables are
437
459
  dropped and missing columns are added and filled with np.nan.
460
+ origin : str
461
+ The origin of the cache (e.g. a computer name or database name).
438
462
  kwargs
439
463
  pandas.DataFrame or pandas.Series to insert/update for each table.
440
464
 
@@ -488,13 +512,31 @@ def merge_tables(cache, strict=False, **kwargs):
488
512
  records = records.astype(cache[table].dtypes)
489
513
  # Update existing rows
490
514
  to_update = records.index.isin(cache[table].index)
491
- cache[table].loc[records.index[to_update], :] = records[to_update]
515
+ try:
516
+ cache[table].loc[records.index[to_update], :] = records[to_update]
517
+ except ValueError as e:
518
+ if 'assignment destination is read-only' in str(e):
519
+ # NB: nullable integer and categorical dtypes may be backed by immutable arrays
520
+ # after loading from parquet and therefore must be copied before assignment
521
+ cache[table] = cache[table].copy()
522
+ cache[table].loc[records.index[to_update], :] = records[to_update]
523
+ else:
524
+ raise e # pragma: no cover
525
+
492
526
  # Assign new rows
493
527
  to_assign = records[~to_update]
494
528
  frames = [cache[table], to_assign]
495
529
  # Concatenate and sort
496
530
  cache[table] = pd.concat(frames).sort_index()
497
531
  updated = datetime.datetime.now()
532
+ # Update the table metadata with the origin
533
+ if origin is not None:
534
+ table_meta = cache['_meta']['raw'].get(table, {})
535
+ if not table_meta.get('origin'):
536
+ table_meta['origin'] = origin
537
+ else:
538
+ table_meta['origin'] = set((*ensure_list(table_meta['origin']), origin))
539
+ cache['_meta']['raw'][table] = table_meta
498
540
  cache['_meta']['modified_time'] = updated
499
541
  return updated
500
542
 
one/api.py CHANGED
@@ -6,6 +6,7 @@ import logging
6
6
  from weakref import WeakMethod
7
7
  from datetime import datetime, timedelta
8
8
  from functools import lru_cache, partial
9
+ from itertools import chain
9
10
  from inspect import unwrap
10
11
  from pathlib import Path, PurePosixPath
11
12
  from typing import Any, Union, Optional, List
@@ -31,7 +32,7 @@ import one.alf.exceptions as alferr
31
32
  from one.alf.path import ALFPath
32
33
  from .alf.cache import (
33
34
  make_parquet_db, load_tables, remove_table_files, merge_tables,
34
- EMPTY_DATASETS_FRAME, EMPTY_SESSIONS_FRAME, cast_index_object)
35
+ default_cache, cast_index_object)
35
36
  from .alf.spec import is_uuid, is_uuid_string, QC, to_alf
36
37
  from . import __version__
37
38
  from one.converters import ConversionMixin, session_record2path, ses2records, datasets2records
@@ -49,7 +50,7 @@ class One(ConversionMixin):
49
50
  """An API for searching and loading data on a local filesystem."""
50
51
 
51
52
  _search_terms = (
52
- 'dataset', 'date_range', 'laboratory', 'number',
53
+ 'datasets', 'date_range', 'laboratory', 'number',
53
54
  'projects', 'subject', 'task_protocol', 'dataset_qc_lte'
54
55
  )
55
56
 
@@ -113,16 +114,7 @@ class One(ConversionMixin):
113
114
 
114
115
  def _reset_cache(self):
115
116
  """Replace the cache object with a Bunch that contains the right fields."""
116
- self._cache = Bunch({
117
- 'datasets': EMPTY_DATASETS_FRAME.copy(),
118
- 'sessions': EMPTY_SESSIONS_FRAME.copy(),
119
- '_meta': {
120
- 'created_time': None,
121
- 'loaded_time': None,
122
- 'modified_time': None,
123
- 'saved_time': None,
124
- 'raw': {}} # map of original table metadata
125
- })
117
+ self._cache = default_cache()
126
118
 
127
119
  def _remove_table_files(self, tables=None):
128
120
  """Delete cache tables on disk.
@@ -216,6 +208,14 @@ class One(ConversionMixin):
216
208
  caches = load_tables(save_dir)
217
209
  merge_tables(
218
210
  caches, **{k: v for k, v in self._cache.items() if not k.startswith('_')})
211
+ # Ensure we use the minimum created date for each table
212
+ for table in caches['_meta']['raw']:
213
+ raw_meta = [x['_meta']['raw'].get(table, {}) for x in (caches, self._cache)]
214
+ created = filter(None, (x.get('date_created') for x in raw_meta))
215
+ if any(created := list(created)):
216
+ created = min(map(datetime.fromisoformat, created))
217
+ created = created.isoformat(sep=' ', timespec='minutes')
218
+ meta['raw'][table]['date_created'] = created
219
219
 
220
220
  with FileLock(save_dir, log=_logger, timeout=TIMEOUT, timeout_action='delete'):
221
221
  _logger.info('Saving cache tables...')
@@ -319,10 +319,8 @@ class One(ConversionMixin):
319
319
 
320
320
  Parameters
321
321
  ----------
322
- dataset : str, list
323
- One or more dataset names. Returns sessions containing all these datasets.
324
- A dataset matches if it contains the search string e.g. 'wheel.position' matches
325
- '_ibl_wheel.position.npy'.
322
+ datasets : str, list
323
+ One or more (exact) dataset names. Returns sessions containing all of these datasets.
326
324
  dataset_qc_lte : str, int, one.alf.spec.QC
327
325
  A dataset QC value, returns sessions with datasets at or below this QC value, including
328
326
  those with no QC set. If `dataset` not passed, sessions with any passing QC datasets
@@ -370,7 +368,9 @@ class One(ConversionMixin):
370
368
 
371
369
  Search for sessions on a given date, in a given lab, containing trials and spike data.
372
370
 
373
- >>> eids = one.search(date='2023-01-01', lab='churchlandlab', dataset=['trials', 'spikes'])
371
+ >>> eids = one.search(
372
+ ... date='2023-01-01', lab='churchlandlab',
373
+ ... datasets=['trials.table.pqt', 'spikes.times.npy'])
374
374
 
375
375
  Search for sessions containing trials and spike data where QC for both are WARNING or less.
376
376
 
@@ -397,13 +397,14 @@ class One(ConversionMixin):
397
397
 
398
398
  def all_present(x, dsets, exists=True):
399
399
  """Returns true if all datasets present in Series."""
400
- return all(any(x.str.contains(y, regex=self.wildcards) & exists) for y in dsets)
400
+ name = x.str.rsplit('/', n=1, expand=True).iloc[:, -1]
401
+ return all(any(name.str.fullmatch(y) & exists) for y in dsets)
401
402
 
402
403
  # Iterate over search filters, reducing the sessions table
403
404
  sessions = self._cache['sessions']
404
405
 
405
406
  # Ensure sessions filtered in a particular order, with datasets last
406
- search_order = ('date_range', 'number', 'dataset')
407
+ search_order = ('date_range', 'number', 'datasets')
407
408
 
408
409
  def sort_fcn(itm):
409
410
  return -1 if itm[0] not in search_order else search_order.index(itm[0])
@@ -430,12 +431,15 @@ class One(ConversionMixin):
430
431
  query = ensure_list(value)
431
432
  sessions = sessions[sessions[key].isin(map(int, query))]
432
433
  # Dataset/QC check is biggest so this should be done last
433
- elif key == 'dataset' or (key == 'dataset_qc_lte' and 'dataset' not in queries):
434
+ elif key == 'datasets' or (key == 'dataset_qc_lte' and 'datasets' not in queries):
434
435
  datasets = self._cache['datasets']
435
436
  qc = QC.validate(queries.get('dataset_qc_lte', 'FAIL')).name # validate value
436
437
  has_dset = sessions.index.isin(datasets.index.get_level_values('eid'))
438
+ if not has_dset.any():
439
+ sessions = sessions.iloc[0:0] # No datasets for any sessions
440
+ continue
437
441
  datasets = datasets.loc[(sessions.index.values[has_dset], ), :]
438
- query = ensure_list(value if key == 'dataset' else '')
442
+ query = ensure_list(value if key == 'datasets' else '')
439
443
  # For each session check any dataset both contains query and exists
440
444
  mask = (
441
445
  (datasets
@@ -1025,7 +1029,7 @@ class One(ConversionMixin):
1025
1029
  """
1026
1030
  query_type = query_type or self.mode
1027
1031
  datasets = self.list_datasets(
1028
- eid, details=True, query_type=query_type, keep_eid_index=True, revision=revision)
1032
+ eid, details=True, query_type=query_type, keep_eid_index=True)
1029
1033
 
1030
1034
  if len(datasets) == 0:
1031
1035
  raise alferr.ALFObjectNotFound(obj)
@@ -1711,7 +1715,13 @@ class OneAlyx(One):
1711
1715
  remote_created = datetime.fromisoformat(cache_info['date_created'])
1712
1716
  local_created = cache_meta.get('created_time', None)
1713
1717
  fresh = local_created and (remote_created - local_created) < timedelta(minutes=1)
1714
- if fresh and not different_tag:
1718
+ # The local cache may have been created locally more recently, but if it doesn't
1719
+ # contain the same tag or origin, we need to download the remote one.
1720
+ origin = cache_info.get('origin', 'unknown')
1721
+ local_origin = (x.get('origin', []) for x in raw_meta)
1722
+ local_origin = set(chain.from_iterable(map(ensure_list, local_origin)))
1723
+ different_origin = origin not in local_origin
1724
+ if fresh and not (different_tag or different_origin):
1715
1725
  _logger.info('No newer cache available')
1716
1726
  return cache_meta['loaded_time']
1717
1727
 
@@ -1725,19 +1735,27 @@ class OneAlyx(One):
1725
1735
  self._tables_dir = self._tables_dir or self.cache_dir
1726
1736
 
1727
1737
  # Check if the origin has changed. This is to warn users if downloading from a
1728
- # different database to the one currently loaded.
1729
- prev_origin = list(set(filter(None, (x.get('origin') for x in raw_meta))))
1730
- origin = cache_info.get('origin', 'unknown')
1731
- if prev_origin and origin not in prev_origin:
1738
+ # different database to the one currently loaded. When building the cache from
1739
+ # remote queries the origin is set to the Alyx database URL. If the cache info
1740
+ # origin name and URL are different, warn the user.
1741
+ if different_origin and local_origin and self.alyx.base_url not in local_origin:
1732
1742
  warnings.warn(
1733
1743
  'Downloading cache tables from another origin '
1734
- f'("{origin}" instead of "{", ".join(prev_origin)}")')
1744
+ f'("{origin}" instead of "{", ".join(local_origin)}")')
1735
1745
 
1736
1746
  # Download the remote cache files
1737
1747
  _logger.info('Downloading remote caches...')
1738
1748
  files = self.alyx.download_cache_tables(cache_info.get('location'), self._tables_dir)
1739
1749
  assert any(files)
1740
- return super(OneAlyx, self).load_cache(self._tables_dir) # Reload cache after download
1750
+ # Reload cache after download
1751
+ loaded_time = super(OneAlyx, self).load_cache(self._tables_dir)
1752
+ # Add db URL to origin set so we know where the cache came from
1753
+ for raw_meta in self._cache['_meta']['raw'].values():
1754
+ table_origin = set(filter(None, ensure_list(raw_meta.get('origin', []))))
1755
+ if origin in table_origin:
1756
+ table_origin.add(self.alyx.base_url)
1757
+ raw_meta['origin'] = table_origin
1758
+ return loaded_time
1741
1759
  except (requests.exceptions.HTTPError, wc.HTTPError, requests.exceptions.SSLError) as ex:
1742
1760
  _logger.debug(ex)
1743
1761
  _logger.error(f'{type(ex).__name__}: Failed to load the remote cache file')
@@ -1847,7 +1865,8 @@ class OneAlyx(One):
1847
1865
  return self._cache['datasets'].iloc[0:0] if details else [] # Return empty
1848
1866
  session, datasets = ses2records(self.alyx.rest('sessions', 'read', id=eid))
1849
1867
  # Add to cache tables
1850
- merge_tables(self._cache, sessions=session, datasets=datasets.copy())
1868
+ merge_tables(
1869
+ self._cache, sessions=session, datasets=datasets.copy(), origin=self.alyx.base_url)
1851
1870
  if datasets is None or datasets.empty:
1852
1871
  return self._cache['datasets'].iloc[0:0] if details else [] # Return empty
1853
1872
  assert set(datasets.index.unique('eid')) == {eid}
@@ -1999,7 +2018,7 @@ class OneAlyx(One):
1999
2018
  rec = self.alyx.rest('insertions', 'read', id=str(pid))
2000
2019
  return UUID(rec['session']), rec['name']
2001
2020
 
2002
- def eid2pid(self, eid, query_type=None, details=False):
2021
+ def eid2pid(self, eid, query_type=None, details=False, **kwargs) -> (UUID, str, list):
2003
2022
  """Given an experiment UUID (eID), return the probe IDs and labels (i.e. ALF collection).
2004
2023
 
2005
2024
  NB: Requires a connection to the Alyx database.
@@ -2013,6 +2032,8 @@ class OneAlyx(One):
2013
2032
  Query mode - options include 'remote', and 'refresh'.
2014
2033
  details : bool
2015
2034
  Additionally return the complete Alyx records from insertions endpoint.
2035
+ kwargs
2036
+ Additional parameters to filter insertions Alyx endpoint.
2016
2037
 
2017
2038
  Returns
2018
2039
  -------
@@ -2023,6 +2044,15 @@ class OneAlyx(One):
2023
2044
  list of dict (optional)
2024
2045
  If details is true, returns the Alyx records from insertions endpoint.
2025
2046
 
2047
+ Examples
2048
+ --------
2049
+ Get the probe IDs and details for a given session ID
2050
+
2051
+ >>> pids, labels, recs = one.eid2pid(eid, details=True)
2052
+
2053
+ Get the probe ID for a given session ID and label
2054
+
2055
+ >>> (pid,), _ = one.eid2pid(eid, details=False, name='probe00')
2026
2056
  """
2027
2057
  query_type = query_type or self.mode
2028
2058
  if query_type == 'local' and 'insertions' not in self._cache.keys():
@@ -2030,7 +2060,7 @@ class OneAlyx(One):
2030
2060
  eid = self.to_eid(eid) # Ensure we have a UUID str
2031
2061
  if not eid:
2032
2062
  return (None,) * (3 if details else 2)
2033
- recs = self.alyx.rest('insertions', 'list', session=eid)
2063
+ recs = self.alyx.rest('insertions', 'list', session=eid, **kwargs)
2034
2064
  pids = [UUID(x['id']) for x in recs]
2035
2065
  labels = [x['name'] for x in recs]
2036
2066
  if details:
@@ -2173,7 +2203,8 @@ class OneAlyx(One):
2173
2203
  # Build sessions table
2174
2204
  session_records = (x['session_info'] for x in insertions_records)
2175
2205
  sessions_df = pd.DataFrame(next(zip(*map(ses2records, session_records))))
2176
- return merge_tables(self._cache, insertions=df, sessions=sessions_df)
2206
+ return merge_tables(
2207
+ self._cache, insertions=df, sessions=sessions_df, origin=self.alyx.base_url)
2177
2208
 
2178
2209
  def search(self, details=False, query_type=None, **kwargs):
2179
2210
  """Searches sessions matching the given criteria and returns a list of matching eids.
@@ -2192,10 +2223,8 @@ class OneAlyx(One):
2192
2223
 
2193
2224
  Parameters
2194
2225
  ----------
2195
- dataset : str
2196
- A (partial) dataset name. Returns sessions containing matching datasets.
2197
- A dataset matches if it contains the search string e.g. 'wheel.position' matches
2198
- '_ibl_wheel.position.npy'. C.f. `datasets` argument.
2226
+ datasets : str, list
2227
+ One or more (exact) dataset names. Returns sessions containing all of these datasets.
2199
2228
  date_range : str, list, datetime.datetime, datetime.date, pandas.timestamp
2200
2229
  A single date to search or a list of 2 dates that define the range (inclusive). To
2201
2230
  define only the upper or lower date bound, set the other element to None.
@@ -2222,11 +2251,12 @@ class OneAlyx(One):
2222
2251
  A str or list of lab location (as per Alyx definition) name.
2223
2252
  Note: this corresponds to the specific rig, not the lab geographical location per se.
2224
2253
  dataset_types : str, list
2225
- One or more of dataset_types.
2226
- datasets : str, list
2227
- One or more (exact) dataset names. Returns sessions containing all of these datasets.
2254
+ One or more of dataset_types. Unlike with `datasets`, the dataset types for the
2255
+ sessions returned may not be reachable (i.e. for recent sessions the datasets may not
2256
+ yet be available).
2228
2257
  dataset_qc_lte : int, str, one.alf.spec.QC
2229
- The maximum QC value for associated datasets.
2258
+ The maximum QC value for associated datasets. NB: Without `datasets`, not all
2259
+ associated datasets with the matching QC values are guarenteed to be reachable.
2230
2260
  details : bool
2231
2261
  If true also returns a dict of dataset details.
2232
2262
  query_type : str, None
@@ -2271,6 +2301,9 @@ class OneAlyx(One):
2271
2301
  - In default and local mode, when the one.wildcards flag is True (default), queries are
2272
2302
  interpreted as regular expressions. To turn this off set one.wildcards to False.
2273
2303
  - In remote mode regular expressions are only supported using the `django` argument.
2304
+ - In remote mode, only the `datasets` argument returns sessions where datasets are
2305
+ registered *and* exist. Using `dataset_types` or `dataset_qc_lte` without `datasets`
2306
+ will not check that the datasets are reachable.
2274
2307
 
2275
2308
  """
2276
2309
  query_type = query_type or self.mode
@@ -2340,7 +2373,7 @@ class OneAlyx(One):
2340
2373
 
2341
2374
  """
2342
2375
  df = pd.DataFrame(next(zip(*map(ses2records, session_records))))
2343
- return merge_tables(self._cache, sessions=df)
2376
+ return merge_tables(self._cache, sessions=df, origin=self.alyx.base_url)
2344
2377
 
2345
2378
  def _download_datasets(self, dsets, **kwargs) -> List[ALFPath]:
2346
2379
  """Download a single or multitude of datasets if stored on AWS.
one/remote/globus.py CHANGED
@@ -1211,7 +1211,7 @@ class Globus(DownloadClient):
1211
1211
  async def task_wait_async(self, task_id, polling_interval=10, timeout=10):
1212
1212
  """Asynchronously wait until a Task is complete or fails, with a time limit.
1213
1213
 
1214
- If the task status is ACTIVE after timout, returns False, otherwise returns True.
1214
+ If the task status is ACTIVE after timeout, returns False, otherwise returns True.
1215
1215
 
1216
1216
  Parameters
1217
1217
  ----------
one/webclient.py CHANGED
@@ -1155,7 +1155,8 @@ class AlyxClient:
1155
1155
  assert endpoint_scheme[action]['action'] == 'get'
1156
1156
  # add to url data if it is a string
1157
1157
  if id:
1158
- # this is a special case of the list where we query a uuid. Usually read is better
1158
+ # this is a special case of the list where we query a uuid
1159
+ # usually read is better but list may return fewer data and therefore be faster
1159
1160
  if 'django' in kwargs.keys():
1160
1161
  kwargs['django'] = kwargs['django'] + ','
1161
1162
  else:
@@ -1163,6 +1164,9 @@ class AlyxClient:
1163
1164
  kwargs['django'] = f"{kwargs['django']}pk,{id}"
1164
1165
  # otherwise, look for a dictionary of filter terms
1165
1166
  if kwargs:
1167
+ # if django arg is present but is None, server will return a cryptic 500 status
1168
+ if 'django' in kwargs and kwargs['django'] is None:
1169
+ del kwargs['django']
1166
1170
  # Convert all lists in query params to comma separated list
1167
1171
  query_params = {k: ','.join(map(str, ensure_list(v))) for k, v in kwargs.items()}
1168
1172
  url = update_url_params(url, query_params)
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: ONE-api
3
- Version: 3.0b5
3
+ Version: 3.1.0
4
4
  Summary: Open Neurophysiology Environment
5
5
  Author: IBL Staff
6
6
  License: MIT
@@ -21,6 +21,7 @@ Requires-Dist: iblutil>=1.14.0
21
21
  Requires-Dist: packaging
22
22
  Requires-Dist: boto3
23
23
  Requires-Dist: pyyaml
24
+ Dynamic: license-file
24
25
 
25
26
  # Open Neurophysiology Environment
26
27
  [![Coverage Status](https://coveralls.io/repos/github/int-brain-lab/ONE/badge.svg?branch=main)](https://coveralls.io/github/int-brain-lab/ONE?branch=main)
@@ -1,12 +1,12 @@
1
- one/__init__.py,sha256=nInf_OtC7ZRulDTsGaWoCYJCQdioyk1iYSfMN-_fbMU,76
2
- one/api.py,sha256=IWAjCBhw4DCLmfxcwVkOl0bgRlu2RXIV6Nyfey8HNas,131576
1
+ one/__init__.py,sha256=Q7LfKkuQmQEcSUXTSIWORj3wSuhlVKr37-BMozKEim4,76
2
+ one/api.py,sha256=xehJBIDHIF_jpn63tSd4E2_IWbPuo0-q7yL4nYepoXQ,133682
3
3
  one/converters.py,sha256=icKlwPyxf3tJtyOFBj_SG06QDLIZLdTGalCSk1-cAvk,30733
4
4
  one/params.py,sha256=zwR0Yq09ztROfH3fJsCUc-IDs_PBixT3WU3dm1vX728,15050
5
5
  one/registration.py,sha256=cWvQFAzCF04wMZJjdOzBPJkYOJ3BO2KEgqtVG7qOlmA,36177
6
6
  one/util.py,sha256=NUG_dTz3_4GXYG49qSql6mFCBkaVaq7_XdecRPRszJQ,20173
7
- one/webclient.py,sha256=CfHKn6eZu-woed_VEeY8DV6WTzb49u41-lBTRe-EYnM,50132
7
+ one/webclient.py,sha256=s7O5S9DhGnxj6g2xMfq1NsyvoBXd_Zz9UEw74A9VnBE,50409
8
8
  one/alf/__init__.py,sha256=DaFIi7PKlurp5HnyNOpJuIlY3pyhiottFpJfxR59VsY,70
9
- one/alf/cache.py,sha256=NDN-Ii8E19C6IH1C7EQEIIgPFah3AtM9jhjLeHuBIjM,22735
9
+ one/alf/cache.py,sha256=6qX_eLEwVdUkoMkxDm0tREi3lIN3AkQEg4UP3SAD2zE,24453
10
10
  one/alf/exceptions.py,sha256=6Gw8_ZObLnuYUpY4i0UyU4IA0kBZMBnJBGQyzIcDlUM,3427
11
11
  one/alf/io.py,sha256=7mmd1wJwh7qok0WKkpGzkLxEuw4WSnaQ6EkGVvaODLI,30798
12
12
  one/alf/path.py,sha256=LpB0kdTPBzuEdAWGe5hFDWCKSq4FgCdiDk8LwVzv-T0,46793
@@ -14,7 +14,7 @@ one/alf/spec.py,sha256=eWW4UDMsyYO52gzadZ-l09X2adVbXwpRKUBtd7NlYMw,20711
14
14
  one/remote/__init__.py,sha256=pasT1r9t2F2_-hbx-5EKlJ-yCM9ItucQbeDHUbFJcyA,40
15
15
  one/remote/aws.py,sha256=DurvObYa8rgyYQMWeaq1ol7PsdJoE9bVT1NnH2rxRRE,10241
16
16
  one/remote/base.py,sha256=cmS5TwCPjlSLuoN2UN995O9N-4Zr8ZHz3in_iRopMgs,4406
17
- one/remote/globus.py,sha256=3a3r5n0j5KuwNs-khu9hRoNjFx4ODoZfuHgrgToEEgM,47510
17
+ one/remote/globus.py,sha256=GZTuORKFGKmuO9a71YcaURXIozmjIj_--mD5hqsZmLw,47511
18
18
  one/tests/fixtures/datasets.pqt,sha256=oYfOoGJfT1HN_rj-zpAaNpzzJsneekvDqg4zQ6XOQgk,29918
19
19
  one/tests/fixtures/sessions.pqt,sha256=KmBvSi_o0fhSnFhOjQOTq1BHemZsbn89BcSVw4Ecp60,6916
20
20
  one/tests/fixtures/test_dbs.json,sha256=Dq6IBOjofB6YdsEJEZDSDe6Gh4WebKX4Erx7Hs2i8WQ,310
@@ -30,8 +30,8 @@ one/tests/fixtures/rest_responses/6dc96f7e9bcc6ac2e7581489b9580a6cd3f28293,sha25
30
30
  one/tests/fixtures/rest_responses/db1731fb8df0208944ae85f76718430813a8bf50,sha256=Dki7cTDg1bhbtPkuiv9NPRsuhM7TuicxoLcGtRD4D8g,209
31
31
  one/tests/fixtures/rest_responses/dcce48259bb929661f60a02a48563f70aa6185b3,sha256=skaKl6sPgTyrufCIGNVNJfccXM-jSjYvAdyqNS3HXuA,416
32
32
  one/tests/fixtures/rest_responses/f530d6022f61cdc9e38cc66beb3cb71f3003c9a1,sha256=EOqhNIVcmZ7j7aF09g5StFY434f2xbxwZLHwfeM4tug,22530
33
- ONE_api-3.0b5.dist-info/LICENSE,sha256=W6iRQJcr-tslNfY4gL98IWvPtpe0E3tcWCFOD3IFUqg,1087
34
- ONE_api-3.0b5.dist-info/METADATA,sha256=V71vo5_2W9rsjtyhah07thKb0zXKS0VkM53Zw3eYRu8,4148
35
- ONE_api-3.0b5.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
36
- ONE_api-3.0b5.dist-info/top_level.txt,sha256=LIsI2lzmA5jh8Zrw5dzMdE3ydLgmq-WF6rpoxSVDSAY,4
37
- ONE_api-3.0b5.dist-info/RECORD,,
33
+ one_api-3.1.0.dist-info/licenses/LICENSE,sha256=W6iRQJcr-tslNfY4gL98IWvPtpe0E3tcWCFOD3IFUqg,1087
34
+ one_api-3.1.0.dist-info/METADATA,sha256=WbdWuBvggO9WaRznnpxA5auRVJ2MAqXpTplYUKN-3GI,4170
35
+ one_api-3.1.0.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
36
+ one_api-3.1.0.dist-info/top_level.txt,sha256=LIsI2lzmA5jh8Zrw5dzMdE3ydLgmq-WF6rpoxSVDSAY,4
37
+ one_api-3.1.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (80.7.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5