eegdash 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eegdash might be problematic. Click here for more details.

Files changed (75) hide show
  1. eegdash/data_utils.py → data_utils.py +131 -5
  2. {eegdash-0.0.1.dist-info → eegdash-0.0.3.dist-info}/METADATA +75 -8
  3. eegdash-0.0.3.dist-info/RECORD +8 -0
  4. {eegdash-0.0.1.dist-info → eegdash-0.0.3.dist-info}/WHEEL +1 -1
  5. eegdash-0.0.3.dist-info/top_level.txt +3 -0
  6. main.py +199 -0
  7. eegdash/SignalStore/__init__.py +0 -0
  8. eegdash/SignalStore/signalstore/__init__.py +0 -3
  9. eegdash/SignalStore/signalstore/adapters/read_adapters/abstract_read_adapter.py +0 -13
  10. eegdash/SignalStore/signalstore/adapters/read_adapters/domain_modeling/schema_read_adapter.py +0 -16
  11. eegdash/SignalStore/signalstore/adapters/read_adapters/domain_modeling/vocabulary_read_adapter.py +0 -19
  12. eegdash/SignalStore/signalstore/adapters/read_adapters/handmade_records/excel_study_organizer_read_adapter.py +0 -114
  13. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/axona/axona_read_adapter.py +0 -912
  14. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/ReadIntanSpikeFile.py +0 -140
  15. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/intan_read_adapter.py +0 -29
  16. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/__init__.py +0 -0
  17. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/data_to_result.py +0 -62
  18. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/get_bytes_per_data_block.py +0 -36
  19. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/notch_filter.py +0 -50
  20. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/qstring.py +0 -41
  21. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/read_header.py +0 -135
  22. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/read_one_data_block.py +0 -45
  23. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/load_intan_rhd_format.py +0 -204
  24. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/__init__.py +0 -0
  25. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/data_to_result.py +0 -60
  26. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/get_bytes_per_data_block.py +0 -37
  27. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/notch_filter.py +0 -50
  28. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/qstring.py +0 -41
  29. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/read_header.py +0 -153
  30. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/read_one_data_block.py +0 -47
  31. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/load_intan_rhs_format.py +0 -213
  32. eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/neurodata_without_borders/neurodata_without_borders_read_adapter.py +0 -14
  33. eegdash/SignalStore/signalstore/operations/__init__.py +0 -4
  34. eegdash/SignalStore/signalstore/operations/handler_executor.py +0 -22
  35. eegdash/SignalStore/signalstore/operations/handler_factory.py +0 -41
  36. eegdash/SignalStore/signalstore/operations/handlers/base_handler.py +0 -44
  37. eegdash/SignalStore/signalstore/operations/handlers/domain/property_model_handlers.py +0 -79
  38. eegdash/SignalStore/signalstore/operations/handlers/domain/schema_handlers.py +0 -3
  39. eegdash/SignalStore/signalstore/operations/helpers/abstract_helper.py +0 -17
  40. eegdash/SignalStore/signalstore/operations/helpers/neuroscikit_extractor.py +0 -33
  41. eegdash/SignalStore/signalstore/operations/helpers/neuroscikit_rawio.py +0 -165
  42. eegdash/SignalStore/signalstore/operations/helpers/spikeinterface_helper.py +0 -100
  43. eegdash/SignalStore/signalstore/operations/helpers/wrappers/neo_wrappers.py +0 -21
  44. eegdash/SignalStore/signalstore/operations/helpers/wrappers/nwb_wrappers.py +0 -27
  45. eegdash/SignalStore/signalstore/store/__init__.py +0 -8
  46. eegdash/SignalStore/signalstore/store/data_access_objects.py +0 -1181
  47. eegdash/SignalStore/signalstore/store/datafile_adapters.py +0 -131
  48. eegdash/SignalStore/signalstore/store/repositories.py +0 -928
  49. eegdash/SignalStore/signalstore/store/store_errors.py +0 -68
  50. eegdash/SignalStore/signalstore/store/unit_of_work.py +0 -97
  51. eegdash/SignalStore/signalstore/store/unit_of_work_provider.py +0 -67
  52. eegdash/SignalStore/signalstore/utilities/data_adapters/spike_interface_adapters/si_recording.py +0 -1
  53. eegdash/SignalStore/signalstore/utilities/data_adapters/spike_interface_adapters/si_sorter.py +0 -1
  54. eegdash/SignalStore/signalstore/utilities/testing/data_mocks.py +0 -513
  55. eegdash/SignalStore/signalstore/utilities/tools/dataarrays.py +0 -49
  56. eegdash/SignalStore/signalstore/utilities/tools/mongo_records.py +0 -25
  57. eegdash/SignalStore/signalstore/utilities/tools/operation_response.py +0 -78
  58. eegdash/SignalStore/signalstore/utilities/tools/purge_orchestration_response.py +0 -21
  59. eegdash/SignalStore/signalstore/utilities/tools/quantities.py +0 -15
  60. eegdash/SignalStore/signalstore/utilities/tools/strings.py +0 -38
  61. eegdash/SignalStore/signalstore/utilities/tools/time.py +0 -17
  62. eegdash/SignalStore/tests/conftest.py +0 -799
  63. eegdash/SignalStore/tests/data/valid_data/data_arrays/make_fake_data.py +0 -59
  64. eegdash/SignalStore/tests/unit/store/conftest.py +0 -0
  65. eegdash/SignalStore/tests/unit/store/test_data_access_objects.py +0 -1235
  66. eegdash/SignalStore/tests/unit/store/test_repositories.py +0 -1309
  67. eegdash/SignalStore/tests/unit/store/test_unit_of_work.py +0 -7
  68. eegdash/SignalStore/tests/unit/test_ci_cd.py +0 -8
  69. eegdash/aws_ingest.py +0 -29
  70. eegdash/main.py +0 -17
  71. eegdash/signalstore_data_utils.py +0 -280
  72. eegdash-0.0.1.dist-info/RECORD +0 -72
  73. eegdash-0.0.1.dist-info/top_level.txt +0 -1
  74. /eegdash/__init__.py → /__init__.py +0 -0
  75. {eegdash-0.0.1.dist-info → eegdash-0.0.3.dist-info}/LICENSE +0 -0
@@ -1,213 +0,0 @@
1
- #! /bin/env python
2
- #
3
- # Michael Gibson 17 July 2015
4
- # Modified Zeke Arneodo Dec 2017
5
- # Modified Adrian Foy January 2023
6
-
7
- import sys, struct, math, os, time
8
- import numpy as np
9
-
10
- from intanutil.read_header import read_header
11
- from intanutil.get_bytes_per_data_block import get_bytes_per_data_block
12
- from intanutil.read_one_data_block import read_one_data_block
13
- from intanutil.notch_filter import notch_filter
14
- from intanutil.data_to_result import data_to_result
15
-
16
-
17
- def read_data(filename):
18
- """Reads Intan Technologies RHD2000 data file generated by evaluation board GUI.
19
-
20
- Data are returned in a dictionary, for future extensibility.
21
- """
22
- tic = time.time()
23
- with open(filename, 'rb') as fid:
24
- filesize = os.path.getsize(filename)
25
-
26
- header = read_header(fid)
27
- # return header
28
-
29
- print('Found {} amplifier channel{}.'.format(header['num_amplifier_channels'],
30
- plural(header['num_amplifier_channels'])))
31
- print('Found {} board ADC channel{}.'.format(header['num_board_adc_channels'],
32
- plural(header['num_board_adc_channels'])))
33
- print('Found {} board DAC channel{}.'.format(header['num_board_dac_channels'],
34
- plural(header['num_board_dac_channels'])))
35
- print('Found {} board digital input channel{}.'.format(header['num_board_dig_in_channels'],
36
- plural(header['num_board_dig_in_channels'])))
37
- print('Found {} board digital output channel{}.'.format(header['num_board_dig_out_channels'],
38
- plural(header['num_board_dig_out_channels'])))
39
- print('')
40
-
41
- # Determine how many samples the data file contains.
42
- bytes_per_block = get_bytes_per_data_block(header)
43
- print('{} bytes per data block'.format(bytes_per_block))
44
- # How many data blocks remain in this file?
45
- data_present = False
46
- bytes_remaining = filesize - fid.tell()
47
- if bytes_remaining > 0:
48
- data_present = True
49
-
50
- if bytes_remaining % bytes_per_block != 0:
51
- raise Exception('Something is wrong with file size : should have a whole number of data blocks')
52
-
53
- num_data_blocks = int(bytes_remaining / bytes_per_block)
54
-
55
- num_amplifier_samples = 128 * num_data_blocks
56
- num_board_adc_samples = 128 * num_data_blocks
57
- num_board_dac_samples = 128 * num_data_blocks
58
- num_board_dig_in_samples = 128 * num_data_blocks
59
- num_board_dig_out_samples = 128 * num_data_blocks
60
-
61
- record_time = num_amplifier_samples / header['sample_rate']
62
-
63
- if data_present:
64
- print('File contains {:0.3f} seconds of data. Amplifiers were sampled at {:0.2f} kS/s.'.format(record_time,
65
- header[
66
- 'sample_rate'] / 1000))
67
- else:
68
- print('Header file contains no data. Amplifiers were sampled at {:0.2f} kS/s.'.format(
69
- header['sample_rate'] / 1000))
70
-
71
- if data_present:
72
- # Pre-allocate memory for data.
73
- print('')
74
- print('Allocating memory for data...')
75
-
76
- data = {}
77
- data['t'] = np.zeros(num_amplifier_samples, dtype=np.int_)
78
-
79
- data['amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint)
80
-
81
- if header['dc_amplifier_data_saved']:
82
- data['dc_amplifier_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.uint) * header['dc_amplifier_data_saved']
83
-
84
- data['stim_data_raw'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.int_)
85
- data['stim_data'] = np.zeros([header['num_amplifier_channels'], num_amplifier_samples], dtype=np.int_)
86
-
87
- data['board_adc_data'] = np.zeros([header['num_board_adc_channels'], num_board_adc_samples], dtype=np.uint)
88
- data['board_dac_data'] = np.zeros([header['num_board_dac_channels'], num_board_dac_samples], dtype=np.uint)
89
-
90
- # by default, this script interprets digital events (digital inputs, outputs, amp settle, compliance limit, and charge recovery) as booleans
91
- # if unsigned int values are preferred (0 for False, 1 for True), replace the 'dtype=np.bool_' argument with 'dtype=np.uint' as shown
92
- # the commented line below illustrates this for digital input data; the same can be done for the other digital data types
93
-
94
- #data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.uint)
95
- data['board_dig_in_data'] = np.zeros([header['num_board_dig_in_channels'], num_board_dig_in_samples], dtype=np.bool_)
96
- data['board_dig_in_raw'] = np.zeros(num_board_dig_in_samples, dtype=np.uint)
97
- data['board_dig_out_data'] = np.zeros([header['num_board_dig_out_channels'], num_board_dig_out_samples], dtype=np.bool_)
98
- data['board_dig_out_raw'] = np.zeros(num_board_dig_out_samples, dtype=np.uint)
99
-
100
- # Read sampled data from file.
101
- print('Reading data from file...')
102
-
103
- # Initialize indices used in looping
104
- indices = {}
105
- indices['amplifier'] = 0
106
- indices['aux_input'] = 0
107
- indices['board_adc'] = 0
108
- indices['board_dac'] = 0
109
- indices['board_dig_in'] = 0
110
- indices['board_dig_out'] = 0
111
-
112
- print_increment = 10
113
- percent_done = print_increment
114
- for i in (range(num_data_blocks)):
115
- read_one_data_block(data, header, indices, fid)
116
- # Increment all indices indices in 128
117
- indices = {k: v + 128 for k, v in indices.items()}
118
-
119
- fraction_done = 100 * (1.0 * i / num_data_blocks)
120
- if fraction_done >= percent_done:
121
- print('{}% done...'.format(percent_done))
122
- percent_done = percent_done + print_increment
123
-
124
- # Make sure we have read exactly the right amount of data.
125
- bytes_remaining = filesize - fid.tell()
126
- if bytes_remaining != 0: raise Exception('Error: End of file not reached.')
127
-
128
- # end of reading data file.
129
- # return data
130
-
131
- if (data_present):
132
- print('Parsing data...')
133
-
134
- # Extract digital input channels to separate variables.
135
- for i in range(header['num_board_dig_in_channels']):
136
- data['board_dig_in_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_in_raw'],
137
- (1 << header['board_dig_in_channels'][i][
138
- 'native_order'])), 0)
139
-
140
- # Extract digital output channels to separate variables.
141
- for i in range(header['num_board_dig_out_channels']):
142
- data['board_dig_out_data'][i, :] = np.not_equal(np.bitwise_and(data['board_dig_out_raw'],
143
- (1 << header['board_dig_out_channels'][i][
144
- 'native_order'])), 0)
145
-
146
- # Extract stimulation data
147
- data['compliance_limit_data'] = np.bitwise_and(data['stim_data_raw'], 32768) >= 1 # get 2^15 bit, interpret as True or False
148
- data['charge_recovery_data'] = np.bitwise_and(data['stim_data_raw'], 16384) >= 1 # get 2^14 bit, interpret as True or False
149
- data['amp_settle_data'] = np.bitwise_and(data['stim_data_raw'], 8192) >= 1 # get 2^13 bit, interpret as True or False
150
- data['stim_polarity'] = 1 - (2*(np.bitwise_and(data['stim_data_raw'], 256) >> 8)) # get 2^8 bit, interpret as +1 for 0_bit or -1 for 1_bit
151
-
152
- curr_amp = np.bitwise_and(data['stim_data_raw'], 255) # get least-significant 8 bits corresponding to the current amplitude
153
- data['stim_data'] = curr_amp * data['stim_polarity'] # multiply current amplitude by the correct sign
154
-
155
- # # Scale voltage levels appropriately.
156
- data['amplifier_data'] = np.multiply(0.195,
157
- (
158
- data['amplifier_data'].astype(np.int32) - 32768)) # units = microvolts
159
- data['stim_data'] = np.multiply(header['stim_step_size'], data['stim_data'] / 1.0e-6)
160
-
161
- if header['dc_amplifier_data_saved']:
162
- data['dc_amplifier_data'] = np.multiply(-0.01923,
163
- (data['dc_amplifier_data'].astype(
164
- np.int32) - 512)) # units = volts
165
-
166
- data['board_adc_data'] = np.multiply(0.0003125, (data['board_adc_data'].astype(np.int32) - 32768)) # units = volts
167
- data['board_dac_data'] = np.multiply(0.0003125, (data['board_dac_data'].astype(np.int32) - 32768)) # units = volts
168
-
169
- # Check for gaps in timestamps.
170
- num_gaps = np.sum(np.not_equal(data['t'][1:] - data['t'][:-1], 1))
171
- if num_gaps == 0:
172
- print('No missing timestamps in data.')
173
- else:
174
- print('Warning: {0} gaps in timestamp data found. Time scale will not be uniform!'.format(num_gaps))
175
-
176
- # Scale time steps (units = seconds).
177
- data['t'] = data['t'] / header['sample_rate']
178
-
179
- # If the software notch filter was selected during the recording, apply the
180
- # same notch filter to amplifier data here.
181
- if header['notch_filter_frequency'] > 0 and header['version']['major'] < 3:
182
- print_increment = 10
183
- percent_done = print_increment
184
- for i in range(header['num_amplifier_channels']):
185
- data['amplifier_data'][i, :] = notch_filter(data['amplifier_data'][i, :], header['sample_rate'],
186
- header['notch_filter_frequency'], 10)
187
- if fraction_done >= percent_done:
188
- print('{}% done...'.format(percent_done))
189
- percent_done = percent_done + print_increment
190
- else:
191
- data = []
192
-
193
- # Move variables to result struct.
194
- result = data_to_result(header, data, data_present)
195
-
196
- print('Done! Elapsed time: {0:0.1f} seconds'.format(time.time() - tic))
197
-
198
- return result
199
-
200
-
201
- def plural(n):
202
- """Utility function to optionally pluralize words based on the value of n.
203
- """
204
-
205
- if n == 1:
206
- return ''
207
- else:
208
- return 's'
209
-
210
-
211
- if __name__ == '__main__':
212
- a = read_data(sys.argv[1])
213
- #print(a)
@@ -1,14 +0,0 @@
1
- from signalstore.operations.importers.adapters.abstract_read_adapter import AbstractReadAdapter
2
-
3
- #import pynwb
4
-
5
- class NeurodataWithoutBordersReadAdapter(AbstractReadAdapter):
6
- def __init__(self, path):
7
- self.path = path
8
-
9
- def read(self):
10
- """Reads a NWB file and converts each data object into an xarray.DataArray with
11
- the appropriate dimensions, coordinates and metadata attributes for the
12
- Neuroscikit data model.
13
- """
14
- pass
@@ -1,4 +0,0 @@
1
- from eegdash.SignalStore.signalstore.operations.handler_executor import HandlerExecutor
2
- from eegdash.SignalStore.signalstore.operations.handler_factory import HandlerFactory
3
-
4
- __all__ = ["HandlerExecutor", "HandlerFactory"]
@@ -1,22 +0,0 @@
1
- class BaseHandlerExecutor:
2
- def __init__(self, handler_factory):
3
- self.handler_factory = handler_factory
4
- def list_handlers(self):
5
- return self.handler_factory.list_handlers()
6
-
7
- def help(self, handler_name):
8
- return self.handler_factory.help(handler_name)
9
-
10
- def help_all(self):
11
- return self.handler_factory.help_all()
12
-
13
- #def validate_request(self, request):
14
- # """Validate a request."""
15
- # pass
16
-
17
- class HandlerExecutor(BaseHandlerExecutor):
18
- def __init__(self, handler_factory):
19
- super().__init__(handler_factory)
20
-
21
- def do(self, handler_name, **kwargs):
22
- return self.handler_factory.create(handler_name).execute(**kwargs)
@@ -1,41 +0,0 @@
1
- import os
2
- import importlib.util
3
-
4
- from eegdash.SignalStore.signalstore.operations.handlers.base_handler import BaseHandler
5
-
6
- class HandlerFactory:
7
- def __init__(self, uow_provider, base_path='signalstore.operations.handlers', base_dir='src/operations/handlers'):
8
- self._uow_provider = uow_provider
9
- self.base_path = base_path
10
- self.base_dir = base_dir
11
- self._handlers = self._discover_handlers()
12
-
13
- def _discover_handlers(self):
14
- """Recursively search for handler classes and return a dict of {handler_name: handler_class}."""
15
- handlers = {}
16
- for root, dirs, files in os.walk(self.base_dir):
17
- for file in files:
18
- if file.endswith('_handler.py') or file.endswith('_handlers.py'):
19
- module_path = os.path.join(root, file).replace("/", ".")[:-3] # Convert UPath to module notation and remove .py
20
- for name, obj in vars(importlib.import_module(module_path)).items():
21
- if isinstance(obj, type) and issubclass(obj, BaseHandler) and obj != BaseHandler:
22
- handlers[name] = obj
23
- return handlers
24
-
25
- def create(self, handler_name):
26
- handler_class = self._handlers.get(handler_name)
27
- if not handler_class:
28
- raise Exception(f"Handler {handler_name} not found.")
29
- return handler_class(self._uow_provider)
30
-
31
- def list_handlers(self):
32
- """List available handlers."""
33
- return list(self._handlers.keys())
34
-
35
- def get_handler_help(self, handler_name):
36
- """Get documentation for a specific handler."""
37
- handler_class = self._handlers.get(handler_name)
38
- if handler_class:
39
- return handler_class.__doc__ or f"No documentation available for {handler_name}"
40
- else:
41
- raise Exception(f"Handler {handler_name} not found.")
@@ -1,44 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from pydantic import BaseModel
3
-
4
- class BaseHandler(ABC):
5
- def __init__(self, unit_of_work_provider, project_name):
6
- self._uow_provider = unit_of_work_provider
7
- self._project_name = project_name
8
-
9
- @property
10
- def unit_of_work(self):
11
- return self._uow_provider(self._project_name)
12
-
13
- @abstractmethod
14
- def execute(self):
15
- # use with self.unit_of_work() as uow:
16
- pass
17
-
18
- class BaseHandlerResponse(BaseModel):
19
- status: str
20
- result: dict
21
- effects: dict
22
- error: str
23
-
24
- class SmallHandlerResponse(BaseHandlerResponse):
25
- __slots__ = ["status", "result", "effects", "error"]
26
- def __init__(self, status, result, effects, error):
27
- self.status = status
28
- self.result = result # Note that this is a dictionary and the structure of the result must be included in the docstrings of each handler
29
- self.effects = effects
30
- self.error = error
31
-
32
- def __repr__(self):
33
- return f"HandlerResponse(status={self.status}, result={self.result}, effects={self.effects}, error={self.error})"
34
-
35
- def __str__(self):
36
- return f"HandlerResponse(status={self.status} result={self.result} effects={self.effects} error={self.error})"
37
-
38
- class SmallHandlerSuccessResponse(SmallHandlerResponse):
39
- def __init__(self, result, effects):
40
- super().__init__("SUCCESS", result, effects, None)
41
-
42
- class SmallHandlerFailureResponse(SmallHandlerResponse):
43
- def __init__(self, error):
44
- super().__init__("FAILURE", None, None, error)
@@ -1,79 +0,0 @@
1
- from signalstore.operations.handlers.base_handler import BaseHandler, HandlerSuccessResponse, HandlerFailureResponse
2
-
3
- import json
4
-
5
- class AddPropertyModelFromJSONHandler(BaseHandler):
6
- """Add a term to the database."""
7
-
8
- def execute(self, term_path):
9
- with self.uow_provider() as uow:
10
- try:
11
- with open(term_path) as f:
12
- term = json.load(f)
13
- uow.domain_models.add(term)
14
- effects = uow.commit()
15
- return HandlerSuccessResponse(None, effects)
16
- except Exception as e:
17
- return HandlerFailureResponse(e)
18
-
19
- class AddPropertyModelDictHandler(BaseHandler):
20
- """Add a term to the database."""
21
-
22
- def execute(self, term):
23
- with self.uow_provider() as uow:
24
- try:
25
- uow.domain_models.add(term)
26
- effects = uow.commit()
27
- return HandlerSuccessResponse(None, effects)
28
- except Exception as e:
29
- return HandlerFailureResponse(e)
30
-
31
- class DeletePropertyModelHandler(BaseHandler):
32
- """Delete a term from the database."""
33
-
34
- def execute(self, name):
35
- with self.uow_provider() as uow:
36
- try:
37
- uow.domain_models.delete(name=name)
38
- effects = uow.commit()
39
- return HandlerSuccessResponse(None, effects)
40
- except Exception as e:
41
- return HandlerFailureResponse(e)
42
-
43
-
44
- class GetPropertyModelHandler(BaseHandler):
45
- """Get a term from the database."""
46
-
47
- def execute(self, name):
48
- with self.uow_provider() as uow:
49
- try:
50
- term = uow.domain_models.get(schema_name=name)
51
- # check that term is a property model
52
- if term["schema_type"] != "property_model":
53
- term = None
54
- return HandlerSuccessResponse(term, None)
55
- except Exception as e:
56
- return HandlerFailureResponse(e)
57
-
58
- class ListPropertyModelsHandler(BaseHandler):
59
- """List all domain_models in the database."""
60
-
61
- def execute(self):
62
- with self.uow_provider() as uow:
63
- try:
64
- terms = uow.domain_models.query({"schema_type": "property_model"})
65
- return HandlerSuccessResponse(terms, None)
66
- except Exception as e:
67
- return HandlerFailureResponse(e)
68
-
69
- class ListPropertyModelsHandler(BaseHandler):
70
- """List all term names in the database."""
71
-
72
- def execute(self):
73
- with self.uow_provider() as uow:
74
- try:
75
- terms = uow.domain_models.query({"schema_type": "property_model"})
76
- names = [term["name"] for term in terms]
77
- return HandlerSuccessResponse(names, None)
78
- except Exception as e:
79
- return HandlerFailureResponse(e)
@@ -1,3 +0,0 @@
1
- from signalstore.operations.handlers.base_handler import BaseHandler, HandlerException
2
- import json
3
-
@@ -1,17 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from datetime import datetime, timezone
3
-
4
- class AbstractFunctionalHelper:
5
- pass
6
-
7
- class AbstractMutableHelper:
8
- def __init__(self, attrs, state):
9
- if not isinstance(attrs, dict):
10
- raise TypeError("attrs must be a dictionary")
11
- if isinstance(state, type(None)):
12
- raise TypeError("state cannot be None")
13
- self.attrs = attrs
14
- self.state = state
15
-
16
- def __copy__(self):
17
- return self.__class__(**self.__dict__)
@@ -1,33 +0,0 @@
1
- from spikeinterface.extractors.neoextractors.neobaseextractor import NeoBaseRecordingExtractor, NeoBaseSortingExtractor
2
- from neo import rawio
3
- from signalstore.operations.helpers.neuroscikit_rawio import NeuroSciKitRawIO
4
- from signalstorestore.data_access_objects import DataArrayDAO, RecordDAO
5
-
6
-
7
- # Because of how the NeoBaseRecordingExtractor works, our rawio class needs to be a member of the
8
- # neo.rawio module.
9
- rawio.NeuroSciKitRawIO = NeuroSciKitRawIO
10
-
11
-
12
- class NeuroSciKitExtractor(NeoBaseRecordingExtractor):
13
- mode = 'folder' # Not sure what, if anything, this does.
14
- NeoRawIOClass = 'NeuroSciKitRawIO'
15
- name = "neuroscikit"
16
-
17
- def __init__(self, data_dao: DataArrayDAO, record_dao: RecordDAO, dataarray_hrid: str, stream_id=None, stream_name=None, all_annotations=False):
18
- neo_kwargs = self.map_to_neo_kwargs(data_dao, record_dao, dataarray_hrid)
19
- NeoBaseRecordingExtractor.__init__(self, stream_id=stream_id,
20
- stream_name=stream_name,
21
- all_annotations=all_annotations,
22
- **neo_kwargs)
23
- self._kwargs.update(dict(data_dao=data_dao, record_dao=record_dao, dataarray_hrid=dataarray_hrid))
24
-
25
- @classmethod
26
- def map_to_neo_kwargs(cls, data_dao, record_dao, dataarray_hrid):
27
- neo_kwargs = {
28
- 'data_dao': data_dao,
29
- 'record_dao': record_dao,
30
- 'dataarray_hrid': dataarray_hrid,
31
- }
32
- return neo_kwargs
33
-
@@ -1,165 +0,0 @@
1
- from neo.rawio.baserawio import (BaseRawIO, _signal_channel_dtype, _signal_stream_dtype,
2
- _spike_channel_dtype, _event_channel_dtype)
3
-
4
- import numpy as np
5
- from copy import deepcopy
6
-
7
- import pickle
8
- import neo
9
- import re
10
-
11
-
12
- class NeuroSciKitRawIO(BaseRawIO):
13
- """
14
- Class for "reading" fake data from the neuroscikit repos, meant to provide data equivalent to
15
- that provided by MEArecRawIO for h5 files.
16
- """
17
- extensions = ['h5']
18
- rawmode = 'one-file'
19
-
20
- def __init__(self, unit_of_work, dataarray_hrid: str):
21
- BaseRawIO.__init__(self)
22
- self._uow = unit_of_work
23
- # check that the uow is open (i.e. has been called using with UnitOfWork(project) as uow:)
24
-
25
- self._dataarray_hrid = dataarray_hrid
26
-
27
- def _source_name(self):
28
- return self._dataarray_hrid
29
-
30
- def _parse_header(self):
31
- self._sampling_rate = self._data_dao.get('spikeinterface_sampling_rate', self._dataarray_hrid).data
32
- self._recordings = self._data_dao.get('spikeinterface_recordings', self._dataarray_hrid).data
33
-
34
- self._num_frames, self._num_channels = self._recordings.shape
35
-
36
- signal_streams = np.array([('Signals', '0')], dtype=_signal_stream_dtype)
37
-
38
- sig_channels = []
39
- for c in range(self._num_channels):
40
- ch_name = 'ch{}'.format(c)
41
- chan_id = str(c + 1)
42
- sr = self._sampling_rate # Hz
43
- dtype = self._recordings.dtype
44
- units = 'uV'
45
- gain = 1.
46
- offset = 0.
47
- stream_id = '0'
48
- sig_channels.append((ch_name, chan_id, sr, dtype, units, gain, offset, stream_id))
49
- sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
50
-
51
- # creating units channels
52
- spike_channels = []
53
-
54
- hrid_pat = re.compile(fr'{self._dataarray_hrid}-[0-9]+')
55
- spiketrain_records = self._record_dao.find({'schema_name': 'spikeinterface_spiketrain', 'hrid': {'$regex': hrid_pat}, 'time_of_removal': None})
56
- self._spiketrains = [
57
- neo.SpikeTrain(
58
- pickle.loads(spiketrain_record["times"]),
59
- pickle.loads(spiketrain_record["t_stop"]),
60
- units=pickle.loads(spiketrain_record["units"]),
61
- dtype=pickle.loads(spiketrain_record["dtype"]),
62
- copy=spiketrain_record["copy"],
63
- sampling_rate=pickle.loads(spiketrain_record["sampling_rate"]),
64
- t_start=pickle.loads(spiketrain_record["t_start"]),
65
- waveforms=pickle.loads(spiketrain_record["waveforms"]),
66
- left_sweep=spiketrain_record["left_sweep"],
67
- name=spiketrain_record["name"],
68
- file_origin=spiketrain_record["file_origin"],
69
- description=spiketrain_record["description"],
70
- array_annotations=spiketrain_record["array_annotations"],
71
- **pickle.loads(spiketrain_record["annotations_pickle"]),
72
- )
73
- for spiketrain_record in spiketrain_records]
74
-
75
- for c in range(len(self._spiketrains)):
76
- unit_name = 'unit{}'.format(c)
77
- unit_id = '#{}'.format(c)
78
- # if spiketrains[c].waveforms is not None:
79
- wf_units = ''
80
- wf_gain = 1.
81
- wf_offset = 0.
82
- wf_left_sweep = 0
83
- wf_sampling_rate = self._sampling_rate
84
- spike_channels.append((unit_name, unit_id, wf_units, wf_gain,
85
- wf_offset, wf_left_sweep, wf_sampling_rate))
86
- spike_channels = np.array(spike_channels, dtype=_spike_channel_dtype)
87
-
88
- event_channels = []
89
- event_channels = np.array(event_channels, dtype=_event_channel_dtype)
90
-
91
- self.header = {}
92
- self.header['nb_block'] = 1
93
- self.header['nb_segment'] = [1]
94
- self.header['signal_streams'] = signal_streams
95
- self.header['signal_channels'] = sig_channels
96
- self.header['spike_channels'] = spike_channels
97
- self.header['event_channels'] = event_channels
98
-
99
- info_record = self._record_dao.get('spikeinterface_recording_info', self._dataarray_hrid)
100
- self._info = pickle.loads(info_record['info_pickle'])
101
-
102
- self._generate_minimal_annotations()
103
- for block_index in range(1):
104
- bl_ann = self.raw_annotations['blocks'][block_index]
105
- bl_ann['mearec_info'] = deepcopy(self._info)
106
-
107
- def _segment_t_start(self, block_index, seg_index):
108
- all_starts = [[0.]]
109
- return all_starts[block_index][seg_index]
110
-
111
- def _segment_t_stop(self, block_index, seg_index):
112
- t_stop = self._num_frames / self._sampling_rate
113
- all_stops = [[t_stop]]
114
- return all_stops[block_index][seg_index]
115
-
116
- def _get_signal_size(self, block_index, seg_index, stream_index):
117
- assert stream_index == 0
118
- return self._num_frames
119
-
120
- def _get_signal_t_start(self, block_index, seg_index, stream_index):
121
- assert stream_index == 0
122
- return self._segment_t_start(block_index, seg_index)
123
-
124
- def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
125
- stream_index, channel_indexes):
126
- if i_start is None:
127
- i_start = 0
128
- if i_stop is None:
129
- i_stop = self._num_frames
130
-
131
- if channel_indexes is None:
132
- channel_indexes = slice(self._num_channels)
133
- if isinstance(channel_indexes, slice):
134
- raw_signals = self._recordings[i_start:i_stop, channel_indexes]
135
- else:
136
- # sort channels because h5py neeeds sorted indexes
137
- if np.any(np.diff(channel_indexes) < 0):
138
- sorted_channel_indexes = np.sort(channel_indexes)
139
- sorted_idx = np.array([list(sorted_channel_indexes).index(ch)
140
- for ch in channel_indexes])
141
- raw_signals = self._recordings[i_start:i_stop, sorted_channel_indexes]
142
- raw_signals = raw_signals[:, sorted_idx]
143
- else:
144
- raw_signals = self._recordings[i_start:i_stop, channel_indexes]
145
- return raw_signals
146
-
147
- def _spike_count(self, block_index, seg_index, unit_index):
148
- return len(self._spiketrains[unit_index])
149
-
150
- def _get_spike_timestamps(self, block_index, seg_index, unit_index, t_start, t_stop):
151
- spike_timestamps = self._spiketrains[unit_index].times.magnitude
152
- if t_start is None:
153
- t_start = self._segment_t_start(block_index, seg_index)
154
- if t_stop is None:
155
- t_stop = self._segment_t_stop(block_index, seg_index)
156
- timestamp_idxs = np.where((spike_timestamps >= t_start) & (spike_timestamps < t_stop))
157
-
158
- return spike_timestamps[timestamp_idxs]
159
-
160
- def _rescale_spike_timestamp(self, spike_timestamps, dtype):
161
- return spike_timestamps.astype(dtype)
162
-
163
- def _get_spike_raw_waveforms(self, block_index, seg_index,
164
- spike_channel_index, t_start, t_stop):
165
- return None