eegdash 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eegdash might be problematic. Click here for more details.
- eegdash/SignalStore/__init__.py +0 -0
- eegdash/SignalStore/signalstore/__init__.py +3 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/abstract_read_adapter.py +13 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/domain_modeling/schema_read_adapter.py +16 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/domain_modeling/vocabulary_read_adapter.py +19 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/handmade_records/excel_study_organizer_read_adapter.py +114 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/axona/axona_read_adapter.py +912 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/ReadIntanSpikeFile.py +140 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/intan_read_adapter.py +29 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/__init__.py +0 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/data_to_result.py +62 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/get_bytes_per_data_block.py +36 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/notch_filter.py +50 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/qstring.py +41 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/read_header.py +135 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/intanutil/read_one_data_block.py +45 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhd_format/load_intan_rhd_format.py +204 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/__init__.py +0 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/data_to_result.py +60 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/get_bytes_per_data_block.py +37 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/notch_filter.py +50 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/qstring.py +41 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/read_header.py +153 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/intanutil/read_one_data_block.py +47 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/intan/load_intan_rhs_format/load_intan_rhs_format.py +213 -0
- eegdash/SignalStore/signalstore/adapters/read_adapters/recording_acquisitions/neurodata_without_borders/neurodata_without_borders_read_adapter.py +14 -0
- eegdash/SignalStore/signalstore/operations/__init__.py +4 -0
- eegdash/SignalStore/signalstore/operations/handler_executor.py +22 -0
- eegdash/SignalStore/signalstore/operations/handler_factory.py +41 -0
- eegdash/SignalStore/signalstore/operations/handlers/base_handler.py +44 -0
- eegdash/SignalStore/signalstore/operations/handlers/domain/property_model_handlers.py +79 -0
- eegdash/SignalStore/signalstore/operations/handlers/domain/schema_handlers.py +3 -0
- eegdash/SignalStore/signalstore/operations/helpers/abstract_helper.py +17 -0
- eegdash/SignalStore/signalstore/operations/helpers/neuroscikit_extractor.py +33 -0
- eegdash/SignalStore/signalstore/operations/helpers/neuroscikit_rawio.py +165 -0
- eegdash/SignalStore/signalstore/operations/helpers/spikeinterface_helper.py +100 -0
- eegdash/SignalStore/signalstore/operations/helpers/wrappers/neo_wrappers.py +21 -0
- eegdash/SignalStore/signalstore/operations/helpers/wrappers/nwb_wrappers.py +27 -0
- eegdash/SignalStore/signalstore/store/__init__.py +8 -0
- eegdash/SignalStore/signalstore/store/data_access_objects.py +1181 -0
- eegdash/SignalStore/signalstore/store/datafile_adapters.py +131 -0
- eegdash/SignalStore/signalstore/store/repositories.py +928 -0
- eegdash/SignalStore/signalstore/store/store_errors.py +68 -0
- eegdash/SignalStore/signalstore/store/unit_of_work.py +97 -0
- eegdash/SignalStore/signalstore/store/unit_of_work_provider.py +67 -0
- eegdash/SignalStore/signalstore/utilities/data_adapters/spike_interface_adapters/si_recording.py +1 -0
- eegdash/SignalStore/signalstore/utilities/data_adapters/spike_interface_adapters/si_sorter.py +1 -0
- eegdash/SignalStore/signalstore/utilities/testing/data_mocks.py +513 -0
- eegdash/SignalStore/signalstore/utilities/tools/dataarrays.py +49 -0
- eegdash/SignalStore/signalstore/utilities/tools/mongo_records.py +25 -0
- eegdash/SignalStore/signalstore/utilities/tools/operation_response.py +78 -0
- eegdash/SignalStore/signalstore/utilities/tools/purge_orchestration_response.py +21 -0
- eegdash/SignalStore/signalstore/utilities/tools/quantities.py +15 -0
- eegdash/SignalStore/signalstore/utilities/tools/strings.py +38 -0
- eegdash/SignalStore/signalstore/utilities/tools/time.py +17 -0
- eegdash/SignalStore/tests/conftest.py +799 -0
- eegdash/SignalStore/tests/data/valid_data/data_arrays/make_fake_data.py +59 -0
- eegdash/SignalStore/tests/unit/store/conftest.py +0 -0
- eegdash/SignalStore/tests/unit/store/test_data_access_objects.py +1235 -0
- eegdash/SignalStore/tests/unit/store/test_repositories.py +1309 -0
- eegdash/SignalStore/tests/unit/store/test_unit_of_work.py +7 -0
- eegdash/SignalStore/tests/unit/test_ci_cd.py +8 -0
- eegdash/__init__.py +1 -0
- eegdash/aws_ingest.py +29 -0
- eegdash/data_utils.py +213 -0
- eegdash/main.py +17 -0
- eegdash/signalstore_data_utils.py +280 -0
- eegdash-0.0.1.dist-info/LICENSE +20 -0
- eegdash-0.0.1.dist-info/METADATA +72 -0
- eegdash-0.0.1.dist-info/RECORD +72 -0
- eegdash-0.0.1.dist-info/WHEEL +5 -0
- eegdash-0.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1309 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from datetime import datetime, timedelta
|
|
3
|
+
from signalstore.store.repositories import *
|
|
4
|
+
|
|
5
|
+
class TestDomainModelRepository:
|
|
6
|
+
# get tests (test all expected behaviors of get())
|
|
7
|
+
# ------------------------------------------------
|
|
8
|
+
# Category 1: get a domain model that exists
|
|
9
|
+
# Test 1.1: get a metamodel that exists
|
|
10
|
+
# Test 1.2: get a property model that exists
|
|
11
|
+
# Test 1.3: get a data model that exists
|
|
12
|
+
# Category 2: get a domain model that does not exist
|
|
13
|
+
# Test 2.1: get a model that does not exist (error)
|
|
14
|
+
# Category 3: get a domain model that exists but is invalid
|
|
15
|
+
# Test 3.1: iterate over all invalid models from conftest and check for ValidationError
|
|
16
|
+
# Category 4: bad arguments
|
|
17
|
+
# Test 4.1: get a domain model with a bad schema_name argument (error)
|
|
18
|
+
|
|
19
|
+
@pytest.mark.parametrize("schema_name", ['record_metamodel', 'xarray_dataarray_metamodel'])
|
|
20
|
+
def test_get_metamodel_that_exists(self, populated_domain_repo, schema_name):
|
|
21
|
+
metamodel = populated_domain_repo.get(schema_name)
|
|
22
|
+
assert metamodel is not None
|
|
23
|
+
assert metamodel.get('schema_name') == schema_name
|
|
24
|
+
assert metamodel.get('schema_type') == 'metamodel'
|
|
25
|
+
|
|
26
|
+
@pytest.mark.parametrize("schema_name", ['unit_of_measure', 'dimension_of_measure', 'time_of_save', 'time_of_removal'])
|
|
27
|
+
def test_get_property_model_that_exists(self, populated_domain_repo, schema_name):
|
|
28
|
+
property_model = populated_domain_repo.get(schema_name)
|
|
29
|
+
assert property_model is not None
|
|
30
|
+
assert property_model.get('schema_name') == schema_name
|
|
31
|
+
assert property_model.get('schema_type') == 'property_model'
|
|
32
|
+
|
|
33
|
+
@pytest.mark.parametrize("schema_name", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
34
|
+
def test_get_data_model_that_exists(self, populated_domain_repo, schema_name):
|
|
35
|
+
data_model = populated_domain_repo.get(schema_name)
|
|
36
|
+
assert data_model is not None
|
|
37
|
+
assert data_model.get('schema_name') == schema_name
|
|
38
|
+
assert data_model.get('schema_type') == 'data_model'
|
|
39
|
+
|
|
40
|
+
def test_get_metamodel_that_does_not_exist(self, populated_domain_repo):
|
|
41
|
+
assert populated_domain_repo.get('does_not_exist') is None
|
|
42
|
+
|
|
43
|
+
@pytest.mark.parametrize("invalid_name_idx", range(17))
|
|
44
|
+
def test_get_invalid_model_that_exists(self, populated_domain_repo, invalid_model_schema_names, invalid_name_idx):
|
|
45
|
+
schema_name = invalid_model_schema_names[invalid_name_idx]
|
|
46
|
+
with pytest.raises(ValidationError):
|
|
47
|
+
populated_domain_repo.get(schema_name)
|
|
48
|
+
assert False, f"Should have raised a ValidationError for schema_name: {schema_name}"
|
|
49
|
+
|
|
50
|
+
@pytest.mark.parametrize("bad_schema_name", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c"), {"hash": "map"}])
|
|
51
|
+
def test_get_model_with_bad_schema_name(self, populated_domain_repo, bad_schema_name):
|
|
52
|
+
with pytest.raises(DomainRepositoryTypeError):
|
|
53
|
+
populated_domain_repo.get(bad_schema_name)
|
|
54
|
+
assert False, f"Should have raised a TypeError for schema_name: {bad_schema_name}"
|
|
55
|
+
|
|
56
|
+
# find tests (test all expected behaviors of find())
|
|
57
|
+
# --------------------------------------------------
|
|
58
|
+
# Category 1: find models that exist
|
|
59
|
+
# Test 1.1: find all valid models
|
|
60
|
+
# Test 1.2: find all valid metamodels
|
|
61
|
+
# Test 1.3: find all property models
|
|
62
|
+
# Test 1.4: find all data models
|
|
63
|
+
# Test 1.5: find all data models with a specific metamodel_ref
|
|
64
|
+
# Category 2: find with query that does not match any models
|
|
65
|
+
# Test 2.1: find model with schema_name that does not exist; should return empty list
|
|
66
|
+
# Test 2.2: find model with schema_type that does not exist; should return empty list
|
|
67
|
+
# Test 2.3: find model with metamodel_ref that does not exist; should return empty list
|
|
68
|
+
# Category 3: find with query that matches models but is invalid
|
|
69
|
+
# Test 3.1: iterate over all invalid schemas from conftest and check for ValidationError
|
|
70
|
+
# Category 4: bad arguments
|
|
71
|
+
# Test 4.1: find with bad filter argument (error)
|
|
72
|
+
# Test 4.2: find with bad projection argument (error)
|
|
73
|
+
# Test 4.3: find with bad sort argument (error)
|
|
74
|
+
|
|
75
|
+
def test_find_all_models(self, populated_valid_only_domain_repo):
|
|
76
|
+
models = populated_valid_only_domain_repo.find({})
|
|
77
|
+
n = len(models)
|
|
78
|
+
all = populated_valid_only_domain_repo._dao._collection.find({})
|
|
79
|
+
all = len(list(all))
|
|
80
|
+
assert n == all
|
|
81
|
+
|
|
82
|
+
def test_find_all_metamodels(self, populated_valid_only_domain_repo):
|
|
83
|
+
models = populated_valid_only_domain_repo.find({'schema_type': 'metamodel'})
|
|
84
|
+
n = len(models)
|
|
85
|
+
all = populated_valid_only_domain_repo._dao._collection.find({'schema_type': 'metamodel'})
|
|
86
|
+
all = len(list(all))
|
|
87
|
+
assert n == all
|
|
88
|
+
|
|
89
|
+
def test_find_all_property_models(self, populated_valid_only_domain_repo):
|
|
90
|
+
models = populated_valid_only_domain_repo.find({'schema_type': 'property_model'})
|
|
91
|
+
n = len(models)
|
|
92
|
+
all = populated_valid_only_domain_repo._dao._collection.find({'schema_type': 'property_model'})
|
|
93
|
+
all = len(list(all))
|
|
94
|
+
assert n == all
|
|
95
|
+
|
|
96
|
+
def test_find_all_data_models(self, populated_valid_only_domain_repo):
|
|
97
|
+
models = populated_valid_only_domain_repo.find({'schema_type': 'data_model'})
|
|
98
|
+
n = len(models)
|
|
99
|
+
all = populated_valid_only_domain_repo._dao._collection.find({'schema_type': 'data_model'})
|
|
100
|
+
all = len(list(all))
|
|
101
|
+
assert n == all
|
|
102
|
+
|
|
103
|
+
@pytest.mark.parametrize("metamodel_ref", ['record_metamodel', 'xarray_dataarray_metamodel'])
|
|
104
|
+
def test_find_data_models_with_specific_metamodel_ref(self, populated_valid_only_domain_repo, metamodel_ref):
|
|
105
|
+
models = populated_valid_only_domain_repo.find({'schema_type': 'data_model', 'metamodel_ref': metamodel_ref})
|
|
106
|
+
assert len(models) > 0
|
|
107
|
+
for model in models:
|
|
108
|
+
assert model.get('metamodel_ref') == metamodel_ref
|
|
109
|
+
|
|
110
|
+
def test_find_model_with_schema_name_that_does_not_exist(self, populated_valid_only_domain_repo):
|
|
111
|
+
models = populated_valid_only_domain_repo.find({'schema_name': 'does_not_exist'})
|
|
112
|
+
assert len(models) == 0
|
|
113
|
+
|
|
114
|
+
def test_find_model_with_schema_type_that_does_not_exist(self, populated_valid_only_domain_repo):
|
|
115
|
+
models = populated_valid_only_domain_repo.find({'schema_type': 'does_not_exist'})
|
|
116
|
+
assert len(models) == 0
|
|
117
|
+
|
|
118
|
+
def test_find_model_with_metamodel_ref_that_does_not_exist(self, populated_valid_only_domain_repo):
|
|
119
|
+
models = populated_valid_only_domain_repo.find({'metamodel_ref': 'does_not_exist'})
|
|
120
|
+
assert len(models) == 0
|
|
121
|
+
|
|
122
|
+
@pytest.mark.parametrize("invalid_name_idx", range(17))
|
|
123
|
+
def test_find_invalid_model_that_exists(self, populated_domain_repo, invalid_model_schema_names, invalid_name_idx):
|
|
124
|
+
schema_name = invalid_model_schema_names[invalid_name_idx]
|
|
125
|
+
with pytest.raises(ValidationError):
|
|
126
|
+
populated_domain_repo.find({'schema_name': schema_name})
|
|
127
|
+
assert False, f"Should have raised a ValidationError for schema_name: {schema_name}"
|
|
128
|
+
|
|
129
|
+
@pytest.mark.parametrize("bad_filter", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
130
|
+
def test_find_with_bad_filter(self, populated_valid_only_domain_repo, bad_filter):
|
|
131
|
+
with pytest.raises(DomainRepositoryTypeError):
|
|
132
|
+
populated_valid_only_domain_repo.find(bad_filter)
|
|
133
|
+
assert False, f"Should have raised a TypeError for filter: {bad_filter}"
|
|
134
|
+
|
|
135
|
+
@pytest.mark.parametrize("bad_projection", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
136
|
+
def test_find_with_bad_projection(self, populated_valid_only_domain_repo, bad_projection):
|
|
137
|
+
with pytest.raises(DomainRepositoryTypeError):
|
|
138
|
+
populated_valid_only_domain_repo.find({}, bad_projection)
|
|
139
|
+
assert False, f"Should have raised a TypeError for projection: {bad_projection}"
|
|
140
|
+
|
|
141
|
+
# exists tests (test all expected behaviors of exists())
|
|
142
|
+
# ------------------------------------------------------
|
|
143
|
+
# Category 1: exists with model that exists
|
|
144
|
+
# Test 1.1: exists with model that exists returns true
|
|
145
|
+
# Category 2: exists with model that does not exist
|
|
146
|
+
# Test 2.1: exists with model that does not exist returns false
|
|
147
|
+
# Category 3: exists with model that exists but is invalid
|
|
148
|
+
# Test 3.1: iterate over all invalid schemas from conftest; should return True for all
|
|
149
|
+
# Category 4: bad arguments
|
|
150
|
+
# Test 4.1: exists with bad schema_name argument (error)
|
|
151
|
+
|
|
152
|
+
@pytest.mark.parametrize("schema_name", ['record_metamodel', 'xarray_dataarray_metamodel'])
|
|
153
|
+
def test_exists_with_model_that_exists(self, populated_domain_repo, schema_name):
|
|
154
|
+
assert populated_domain_repo.exists(schema_name)
|
|
155
|
+
|
|
156
|
+
def test_exists_with_model_that_does_not_exist(self, populated_domain_repo):
|
|
157
|
+
assert not populated_domain_repo.exists('does_not_exist')
|
|
158
|
+
|
|
159
|
+
@pytest.mark.parametrize("invalid_name_idx", range(17))
|
|
160
|
+
def test_exists_with_invalid_model_that_exists(self, populated_domain_repo, invalid_model_schema_names, invalid_name_idx):
|
|
161
|
+
schema_name = invalid_model_schema_names[invalid_name_idx]
|
|
162
|
+
assert populated_domain_repo.exists(schema_name), f"Should have returned true for schema_name: {schema_name}"
|
|
163
|
+
|
|
164
|
+
@pytest.mark.parametrize("bad_schema_name", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c"), {"hash": "map"}])
|
|
165
|
+
def test_exists_with_bad_schema_name(self, populated_domain_repo, bad_schema_name):
|
|
166
|
+
with pytest.raises(DomainRepositoryTypeError):
|
|
167
|
+
populated_domain_repo.exists(bad_schema_name)
|
|
168
|
+
assert False, f"Should have raised a TypeError for schema_name: {bad_schema_name}"
|
|
169
|
+
|
|
170
|
+
# add tests (test all expected behaviors of add())
|
|
171
|
+
# ------------------------------------------------
|
|
172
|
+
# Category 1: add a model that does not exist
|
|
173
|
+
# Test 1.1: add a metamodel that does not exist
|
|
174
|
+
# Test 1.2: add a property model that does not exist
|
|
175
|
+
# Test 1.3: add a data model that does not exist
|
|
176
|
+
# Category 2: add a model that exists
|
|
177
|
+
# Test 2.1: add a metamodel that exists (error)
|
|
178
|
+
# Test 2.2: add a property model that exists (error)
|
|
179
|
+
# Test 2.3: add a data model that exists (error)
|
|
180
|
+
# Category 3: add a model that does not exist but is invalid
|
|
181
|
+
# Test 3.1: iterate over all invalid schemas from conftest and check for ValidationError
|
|
182
|
+
# Category 4: bad arguments
|
|
183
|
+
# Test 4.1: add a model with a bad model argument (error)
|
|
184
|
+
|
|
185
|
+
def test_add_metamodel_that_does_not_exist(self, populated_domain_repo, metamodels):
|
|
186
|
+
new_metamodel = metamodels[0].copy()
|
|
187
|
+
new_name = 'new_metamodel'
|
|
188
|
+
new_metamodel['schema_name'] = new_name
|
|
189
|
+
assert not populated_domain_repo.exists(new_name)
|
|
190
|
+
populated_domain_repo.add(new_metamodel)
|
|
191
|
+
assert populated_domain_repo.exists(new_name)
|
|
192
|
+
|
|
193
|
+
def test_add_property_model_that_does_not_exist(self, populated_domain_repo, property_models):
|
|
194
|
+
new_property_model = property_models[0].copy()
|
|
195
|
+
new_name = 'new_property_model'
|
|
196
|
+
new_property_model['schema_name'] = new_name
|
|
197
|
+
assert not populated_domain_repo.exists(new_name)
|
|
198
|
+
populated_domain_repo.add(new_property_model)
|
|
199
|
+
assert populated_domain_repo.exists(new_name)
|
|
200
|
+
|
|
201
|
+
def test_add_data_model_that_does_not_exist(self, populated_domain_repo, data_models):
|
|
202
|
+
new_data_model = data_models[0].copy()
|
|
203
|
+
new_name = 'new_data_model'
|
|
204
|
+
new_data_model['schema_name'] = new_name
|
|
205
|
+
assert not populated_domain_repo.exists(new_name)
|
|
206
|
+
populated_domain_repo.add(new_data_model)
|
|
207
|
+
assert populated_domain_repo.exists(new_name)
|
|
208
|
+
|
|
209
|
+
def test_add_metamodel_that_exists(self, populated_domain_repo, metamodels):
|
|
210
|
+
existing_metamodel = metamodels[0].copy()
|
|
211
|
+
assert populated_domain_repo.exists(existing_metamodel['schema_name'])
|
|
212
|
+
with pytest.raises(DomainRepositoryModelAlreadyExistsError):
|
|
213
|
+
populated_domain_repo.add(existing_metamodel)
|
|
214
|
+
assert False, f"Should have raised a DomainRepositoryExistsError for schema_name: {existing_metamodel['schema_name']}"
|
|
215
|
+
|
|
216
|
+
def test_add_property_model_that_exists(self, populated_domain_repo, property_models):
|
|
217
|
+
existing_property_model = property_models[0].copy()
|
|
218
|
+
assert populated_domain_repo.exists(existing_property_model['schema_name'])
|
|
219
|
+
with pytest.raises(DomainRepositoryModelAlreadyExistsError):
|
|
220
|
+
populated_domain_repo.add(existing_property_model)
|
|
221
|
+
assert False, f"Should have raised a DomainRepositoryExistsError for schema_name: {existing_property_model['schema_name']}"
|
|
222
|
+
|
|
223
|
+
def test_add_data_model_that_exists(self, populated_domain_repo, data_models):
|
|
224
|
+
existing_data_model = data_models[0].copy()
|
|
225
|
+
assert populated_domain_repo.exists(existing_data_model['schema_name'])
|
|
226
|
+
with pytest.raises(DomainRepositoryModelAlreadyExistsError):
|
|
227
|
+
populated_domain_repo.add(existing_data_model)
|
|
228
|
+
assert False, f"Should have raised a DomainRepositoryExistsError for schema_name: {existing_data_model['schema_name']}"
|
|
229
|
+
|
|
230
|
+
def test_add_invalid_metamodel_that_does_not_exist(self, populated_domain_repo, invalid_property_models):
|
|
231
|
+
invalid_model = invalid_property_models[0].copy()
|
|
232
|
+
new_name = 'invalid_model'
|
|
233
|
+
invalid_model['schema_name'] = new_name
|
|
234
|
+
assert not populated_domain_repo.exists(new_name)
|
|
235
|
+
with pytest.raises(ValidationError):
|
|
236
|
+
populated_domain_repo.add(invalid_model)
|
|
237
|
+
assert False, f"Should have raised a ValidationError for schema_name: {new_name}"
|
|
238
|
+
|
|
239
|
+
def test_add_invalid_property_model_that_does_not_exist(self, populated_domain_repo, invalid_data_models):
|
|
240
|
+
invalid_model = invalid_data_models[0].copy()
|
|
241
|
+
new_name = 'invalid_model'
|
|
242
|
+
invalid_model['schema_name'] = new_name
|
|
243
|
+
assert not populated_domain_repo.exists(new_name)
|
|
244
|
+
with pytest.raises(ValidationError):
|
|
245
|
+
populated_domain_repo.add(invalid_model)
|
|
246
|
+
assert False, f"Should have raised a ValidationError for schema_name: {new_name}"
|
|
247
|
+
|
|
248
|
+
def test_add_invalid_data_model_that_does_not_exist(self, populated_domain_repo, invalid_data_models):
|
|
249
|
+
invalid_model = invalid_data_models[0].copy()
|
|
250
|
+
new_name = 'invalid_model'
|
|
251
|
+
invalid_model['schema_name'] = new_name
|
|
252
|
+
assert not populated_domain_repo.exists(new_name)
|
|
253
|
+
with pytest.raises(ValidationError):
|
|
254
|
+
populated_domain_repo.add(invalid_model)
|
|
255
|
+
assert False, f"Should have raised a ValidationError for schema_name: {new_name}"
|
|
256
|
+
|
|
257
|
+
@pytest.mark.parametrize("bad_model", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
258
|
+
def test_add_model_with_bad_model(self, populated_domain_repo, bad_model):
|
|
259
|
+
with pytest.raises(DomainRepositoryTypeError):
|
|
260
|
+
populated_domain_repo.add(bad_model)
|
|
261
|
+
assert False, f"Should have raised a TypeError for model: {bad_model}"
|
|
262
|
+
|
|
263
|
+
# remove tests (test all expected behaviors of remove())
|
|
264
|
+
# ------------------------------------------------------
|
|
265
|
+
# Category 1: remove a model that exists
|
|
266
|
+
# Test 1.1: remove a metamodel that exists; check they nolonger exist; check get returns None
|
|
267
|
+
# Test 1.2: remove a property model that exists; check they nolonger exist; check get returns None
|
|
268
|
+
# Test 1.3: remove a data model that exists; check they nolonger exist; check get returns None
|
|
269
|
+
# Category 2: remove a model that does not exist
|
|
270
|
+
# Test 2.1: remove a model that does not exist (error)
|
|
271
|
+
# Category 3: remove a model that exists but is invalid
|
|
272
|
+
# Test 3.1: iterate over all invalid schemas from conftest; check that they nologner exist; check get returns None
|
|
273
|
+
# Category 4: bad arguments
|
|
274
|
+
# Test 4.1: remove a model with a bad schema_name argument (error)
|
|
275
|
+
|
|
276
|
+
@pytest.mark.parametrize("schema_name", ['record_metamodel', 'xarray_dataarray_metamodel'])
|
|
277
|
+
def test_remove_metamodel_that_exists(self, populated_domain_repo, schema_name):
|
|
278
|
+
assert populated_domain_repo.exists(schema_name)
|
|
279
|
+
populated_domain_repo.remove(schema_name)
|
|
280
|
+
assert not populated_domain_repo.exists(schema_name)
|
|
281
|
+
assert populated_domain_repo.get(schema_name) is None
|
|
282
|
+
|
|
283
|
+
@pytest.mark.parametrize("schema_name", ['unit_of_measure', 'dimension_of_measure', 'time_of_save', 'time_of_removal'])
|
|
284
|
+
def test_remove_property_model_that_exists(self, populated_domain_repo, schema_name):
|
|
285
|
+
assert populated_domain_repo.exists(schema_name)
|
|
286
|
+
populated_domain_repo.remove(schema_name)
|
|
287
|
+
assert not populated_domain_repo.exists(schema_name)
|
|
288
|
+
assert populated_domain_repo.get(schema_name) is None
|
|
289
|
+
|
|
290
|
+
@pytest.mark.parametrize("schema_name", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
291
|
+
def test_remove_data_model_that_exists(self, populated_domain_repo, schema_name):
|
|
292
|
+
assert populated_domain_repo.exists(schema_name)
|
|
293
|
+
populated_domain_repo.remove(schema_name)
|
|
294
|
+
assert not populated_domain_repo.exists(schema_name)
|
|
295
|
+
assert populated_domain_repo.get(schema_name) is None
|
|
296
|
+
|
|
297
|
+
def test_remove_model_that_does_not_exist(self, populated_domain_repo):
|
|
298
|
+
with pytest.raises(DomainRepositoryModelNotFoundError):
|
|
299
|
+
populated_domain_repo.remove('does_not_exist')
|
|
300
|
+
assert False, f"Should have raised a DomainRepositoryDoesNotExistError for schema_name: does_not_exist"
|
|
301
|
+
|
|
302
|
+
@pytest.mark.parametrize("invalid_name_idx", range(17))
|
|
303
|
+
def test_remove_invalid_model_that_exists(self, populated_domain_repo, invalid_model_schema_names, invalid_name_idx):
|
|
304
|
+
schema_name = invalid_model_schema_names[invalid_name_idx]
|
|
305
|
+
assert populated_domain_repo.exists(schema_name)
|
|
306
|
+
populated_domain_repo.remove(schema_name)
|
|
307
|
+
assert not populated_domain_repo.exists(schema_name)
|
|
308
|
+
assert populated_domain_repo.get(schema_name) is None
|
|
309
|
+
|
|
310
|
+
@pytest.mark.parametrize("bad_schema_name", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c"), {"hash": "map"}])
|
|
311
|
+
def test_remove_model_with_bad_schema_name(self, populated_domain_repo, bad_schema_name):
|
|
312
|
+
with pytest.raises(DomainRepositoryTypeError):
|
|
313
|
+
populated_domain_repo.remove(bad_schema_name)
|
|
314
|
+
assert False, f"Should have raised a TypeError for schema_name: {bad_schema_name}"
|
|
315
|
+
|
|
316
|
+
# undo tests (test all expected behaviors of undo())
|
|
317
|
+
# --------------------------------------------------
|
|
318
|
+
# Category 1: undo add
|
|
319
|
+
# Test 1.1: undo adding a new model
|
|
320
|
+
# Test 1.2: undo all the adding that was done on initialization of the populated_domain_repo fixture
|
|
321
|
+
# Category 2: undo remove
|
|
322
|
+
# Test 2.1: undo removing a model
|
|
323
|
+
# Category 3: undo when there is nothing to undo
|
|
324
|
+
# Test 3.1: undo when there is nothing to undo returns None
|
|
325
|
+
|
|
326
|
+
def test_undo_add(self, populated_domain_repo, metamodels):
|
|
327
|
+
new_metamodel = metamodels[0].copy()
|
|
328
|
+
new_name = 'new_metamodel'
|
|
329
|
+
new_metamodel['schema_name'] = new_name
|
|
330
|
+
assert not populated_domain_repo.exists(new_name)
|
|
331
|
+
n_ops = len(populated_domain_repo._operation_history)
|
|
332
|
+
populated_domain_repo.add(new_metamodel)
|
|
333
|
+
assert len(populated_domain_repo._operation_history) == n_ops + 1
|
|
334
|
+
assert populated_domain_repo.exists(new_name)
|
|
335
|
+
result = populated_domain_repo.undo()
|
|
336
|
+
assert result.schema_name == new_name
|
|
337
|
+
assert not populated_domain_repo.exists(new_name)
|
|
338
|
+
assert len(populated_domain_repo._operation_history) == n_ops
|
|
339
|
+
|
|
340
|
+
def test_undo_all_adds(self, empty_domain_repo, metamodels, property_models, data_models):
|
|
341
|
+
for model_type in [metamodels, property_models, data_models]:
|
|
342
|
+
for model in model_type:
|
|
343
|
+
empty_domain_repo.add(model)
|
|
344
|
+
assert empty_domain_repo.exists(model['schema_name'])
|
|
345
|
+
assert len(empty_domain_repo._operation_history) == len(metamodels) + len(property_models) + len(data_models)
|
|
346
|
+
results = empty_domain_repo.undo_all()
|
|
347
|
+
assert len(results) == len(metamodels) + len(property_models) + len(data_models)
|
|
348
|
+
assert len(empty_domain_repo._operation_history) == 0
|
|
349
|
+
for model_type in [metamodels, property_models, data_models]:
|
|
350
|
+
for model in model_type:
|
|
351
|
+
assert not empty_domain_repo.exists(model['schema_name'])
|
|
352
|
+
|
|
353
|
+
def test_undo_remove(self, populated_domain_repo):
|
|
354
|
+
schema_name = 'record_metamodel'
|
|
355
|
+
assert populated_domain_repo.exists(schema_name)
|
|
356
|
+
n_ops = len(populated_domain_repo._operation_history)
|
|
357
|
+
populated_domain_repo.remove(schema_name)
|
|
358
|
+
assert len(populated_domain_repo._operation_history) == n_ops + 1
|
|
359
|
+
assert not populated_domain_repo.exists(schema_name)
|
|
360
|
+
result = populated_domain_repo.undo()
|
|
361
|
+
assert result.schema_name == schema_name
|
|
362
|
+
assert populated_domain_repo.exists(schema_name)
|
|
363
|
+
assert len(populated_domain_repo._operation_history) == n_ops
|
|
364
|
+
|
|
365
|
+
def test_undo_all_remove(self, populated_valid_only_domain_repo, metamodels, property_models, data_models):
|
|
366
|
+
for model_type in [metamodels, property_models, data_models]:
|
|
367
|
+
for model in model_type:
|
|
368
|
+
populated_valid_only_domain_repo.remove(model['schema_name'])
|
|
369
|
+
assert not populated_valid_only_domain_repo.exists(model['schema_name'])
|
|
370
|
+
assert len(populated_valid_only_domain_repo._operation_history) == len(metamodels) + len(property_models) + len(data_models)
|
|
371
|
+
results = populated_valid_only_domain_repo.undo_all()
|
|
372
|
+
assert len(results) == len(metamodels) + len(property_models) + len(data_models)
|
|
373
|
+
assert len(populated_valid_only_domain_repo._operation_history) == 0
|
|
374
|
+
|
|
375
|
+
def test_undo_when_nothing_to_undo(self, populated_domain_repo):
|
|
376
|
+
n_ops = len(populated_domain_repo._operation_history)
|
|
377
|
+
populated_domain_repo.undo()
|
|
378
|
+
assert len(populated_domain_repo._operation_history) == n_ops
|
|
379
|
+
|
|
380
|
+
# clear operation history tests (test all expected behaviors of clear_operation_history())
|
|
381
|
+
# no tests because this is just a one-liner that sets the operation history to an empty list
|
|
382
|
+
|
|
383
|
+
# list_marked_for_deletion tests (test all expected behaviors of list_marked_for_deletion())
|
|
384
|
+
# No tests because this passes through to the DAO and is tested there
|
|
385
|
+
|
|
386
|
+
# purge tests (test all expected behaviors of purge())
|
|
387
|
+
# no tests because this passes through to the DAO and is tested there
|
|
388
|
+
|
|
389
|
+
# validate tests (test all expected behaviors of validate())
|
|
390
|
+
# ----------------------------------------------------------
|
|
391
|
+
# Category 1: validate a model that is valid
|
|
392
|
+
# Test 1.1: validate a metamodel that is valid
|
|
393
|
+
# Test 1.2: validate a property model that is valid
|
|
394
|
+
# Test 1.3: validate a data model that is valid
|
|
395
|
+
# Category 2: validate a model that is invalid (all the ways it can be invalid)
|
|
396
|
+
# validation requirements
|
|
397
|
+
# - schema_name must be a string
|
|
398
|
+
# - schema_name must not be empty
|
|
399
|
+
# - schema_name must not contain spaces
|
|
400
|
+
# - schema_name must not contain upper case characters
|
|
401
|
+
# - schema_name must contain alphanumeric characters and underscores only
|
|
402
|
+
# - schema_name must not contain double underscores
|
|
403
|
+
# - schema_name must not contain leading or trailing underscores
|
|
404
|
+
# - schema_name must not contain leading numbers
|
|
405
|
+
# - schema_name must not contain the substring 'time_of_removal' because it is used in filenames in the file dao
|
|
406
|
+
# - schema_type must be one of 'metamodel', 'property_model', 'data_model'
|
|
407
|
+
# - metamodel_ref must be a string
|
|
408
|
+
# - metamodel_ref must be a valid schema_name
|
|
409
|
+
# - metamodel_ref must be an existing valid metamodel
|
|
410
|
+
# - schema_description must be a string
|
|
411
|
+
# - schema_description must not be empty
|
|
412
|
+
# - schema_description must not contain leading or trailing spaces
|
|
413
|
+
# - json_schema must be a dict
|
|
414
|
+
# - json_schema must not be empty
|
|
415
|
+
# - json_schema must contain a 'type' key
|
|
416
|
+
# - if the 'model_type' is 'metamodel' or 'data_model' then json_schema['type'] must be the string 'object'
|
|
417
|
+
# - json_schema must be a generally valid json schema
|
|
418
|
+
# - if schema_type is 'data_model' then it must contain a 'metamodel_ref' key
|
|
419
|
+
# - schema_title cannot be empty
|
|
420
|
+
# - schema_title can only contain alphanumeric characters and spaces
|
|
421
|
+
# - schema_title should have capital letters at the start of each word
|
|
422
|
+
# Test 2.1: validate a model that is invalid because schema_name is not a string
|
|
423
|
+
# Test 2.2: validate a model that is invalid because schema_name is empty
|
|
424
|
+
# Test 2.3: validate a model that is invalid because schema_name contains spaces
|
|
425
|
+
# Test 2.4: validate a model that is invalid because schema_name contains uppercase characters
|
|
426
|
+
# Test 2.5: validate a model that is invalid because schema_name contains other invalid characters
|
|
427
|
+
# Test 2.6: validate a model that is invalid because schema_name contains double underscores
|
|
428
|
+
# Test 2.7: validate a model that is invalid because schema_name contains leading or trailing underscores
|
|
429
|
+
# Test 2.8: validate a model that is invalid because schema_name contains leading numbers
|
|
430
|
+
# Test 2.9: validate a model that is invalid because schema_name contains the substring 'time_of_removal'
|
|
431
|
+
# Test 2.10: validate a model that is invalid because schema_type is not one of 'metamodel', 'property_model', 'data_model'
|
|
432
|
+
# Test 2.11: validate a model that is invalid because metamodel_ref is not a string
|
|
433
|
+
# Test 2.12: validate a model that is invalid because metamodel_ref is not a valid schema_name
|
|
434
|
+
# Test 2.13: validate a model that is invalid because metamodel_ref is not an existing valid metamodel
|
|
435
|
+
# Test 2.14: validate a model that is invalid because schema_description is not a string
|
|
436
|
+
# Test 2.15: validate a model that is invalid because schema_description is empty
|
|
437
|
+
# Test 2.16: validate a model that is invalid because schema_description contains leading or trailing spaces
|
|
438
|
+
# Test 2.17: validate a model that is invalid because json_schema is not a dict
|
|
439
|
+
# Test 2.18: validate a model that is invalid because json_schema is empty
|
|
440
|
+
# Test 2.19: validate a model that is invalid because json_schema does not contain a 'type' key
|
|
441
|
+
# Test 2.20: validate a data model that is invalid because json_schema['type'] is not the string 'object'
|
|
442
|
+
# Test 2.21: validate a metamodel that is invalid because json_schema['type'] is not the string 'object'
|
|
443
|
+
# Test 2.22: validate a model that is invalid because json_schema is not a generally valid json schema
|
|
444
|
+
# Test 2.23: validate a model that is invalid because schema_type is 'data_model' but it does not contain a 'metamodel_ref' key
|
|
445
|
+
# Category 3: bad arguments
|
|
446
|
+
# Test 3.1: validate a model with a bad model argument (error)
|
|
447
|
+
|
|
448
|
+
@pytest.mark.parametrize("schema_name", ['record_metamodel', 'xarray_dataarray_metamodel'])
|
|
449
|
+
def test_validate_metamodel_that_is_valid(self, populated_domain_repo, schema_name):
|
|
450
|
+
metamodel = populated_domain_repo.get(schema_name)
|
|
451
|
+
assert metamodel is not None
|
|
452
|
+
populated_domain_repo._validate(metamodel)
|
|
453
|
+
|
|
454
|
+
@pytest.mark.parametrize("schema_name", ['unit_of_measure', 'dimension_of_measure', 'time_of_save', 'time_of_removal'])
|
|
455
|
+
def test_validate_property_model_that_is_valid(self, populated_domain_repo, schema_name):
|
|
456
|
+
property_model = populated_domain_repo.get(schema_name)
|
|
457
|
+
assert property_model is not None
|
|
458
|
+
populated_domain_repo._validate(property_model)
|
|
459
|
+
|
|
460
|
+
@pytest.mark.parametrize("schema_name", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
461
|
+
def test_validate_data_model_that_is_valid(self, populated_domain_repo, schema_name):
|
|
462
|
+
data_model = populated_domain_repo.get(schema_name)
|
|
463
|
+
assert data_model is not None
|
|
464
|
+
populated_domain_repo._validate(data_model)
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
@pytest.mark.parametrize("bad_type_value", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
468
|
+
def test_validate_model_that_is_invalid_because_schema_name_is_not_a_string(self, populated_domain_repo, models, bad_type_value):
|
|
469
|
+
# get the number of data models
|
|
470
|
+
n = len(models)
|
|
471
|
+
# generate a random number between 0 and n-1
|
|
472
|
+
idx = np.random.randint(0, n-1)
|
|
473
|
+
# get a valid data model
|
|
474
|
+
model = models[idx].copy()
|
|
475
|
+
# validate model before changing it
|
|
476
|
+
populated_domain_repo._validate(model)
|
|
477
|
+
# change schema_name to be an int
|
|
478
|
+
model['schema_name'] = bad_type_value
|
|
479
|
+
with pytest.raises(ValidationError):
|
|
480
|
+
populated_domain_repo._validate(model)
|
|
481
|
+
assert False, f"Should have raised a ValidationError for schema_name: 1"
|
|
482
|
+
|
|
483
|
+
@pytest.mark.parametrize("bad_value", ['', ' ', ' ', '5tarts_with_number', 'double___underscore', 'contains-dash', 'contains space', 'has_non_@alpha_num*ric_chars', 'hasCapitalLetters', '_leading_underscore', 'trailing_underscore_', '_starting_underscore'])
|
|
484
|
+
def test_validate_model_that_is_invalid_because_schema_name_is_invalid(self, populated_domain_repo, models, bad_value):
|
|
485
|
+
# get the number of data models
|
|
486
|
+
n = len(models)
|
|
487
|
+
# generate a random number between 0 and n-1
|
|
488
|
+
idx = np.random.randint(0, n-1)
|
|
489
|
+
# get a valid data model
|
|
490
|
+
data_model = models[idx].copy()
|
|
491
|
+
# validate model before changing it
|
|
492
|
+
populated_domain_repo._validate(data_model)
|
|
493
|
+
# change schema_name to be an int
|
|
494
|
+
data_model['schema_name'] = bad_value
|
|
495
|
+
with pytest.raises(ValidationError):
|
|
496
|
+
populated_domain_repo._validate(data_model)
|
|
497
|
+
assert False, f"Should have raised a ValidationError for schema_name: {bad_value}"
|
|
498
|
+
|
|
499
|
+
@pytest.mark.parametrize("bad_value", ['not_a_valid_schema_type', 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
500
|
+
def test_validate_model_that_is_invalid_because_schema_type_is_invalid(self, populated_domain_repo, models, bad_value):
|
|
501
|
+
# get the number of data models
|
|
502
|
+
n = len(models)
|
|
503
|
+
# generate a random number between 0 and n-1
|
|
504
|
+
idx = np.random.randint(0, n-1)
|
|
505
|
+
# get a valid data model
|
|
506
|
+
data_model = models[idx].copy()
|
|
507
|
+
# validate model before changing it
|
|
508
|
+
populated_domain_repo._validate(data_model)
|
|
509
|
+
# change schema_name to be an int
|
|
510
|
+
data_model['schema_type'] = bad_value
|
|
511
|
+
with pytest.raises(ValidationError):
|
|
512
|
+
populated_domain_repo._validate(data_model)
|
|
513
|
+
assert False, f"Should have raised a ValidationError for schema_type: {bad_value}"
|
|
514
|
+
|
|
515
|
+
@pytest.mark.parametrize("bad_value", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
516
|
+
def test_validate_model_that_is_invalid_because_metamodel_ref_is_not_a_string(self, populated_domain_repo, models, bad_value):
|
|
517
|
+
# get the number of data models
|
|
518
|
+
n = len(models)
|
|
519
|
+
# generate a random number between 0 and n-1
|
|
520
|
+
idx = np.random.randint(0, n-1)
|
|
521
|
+
# get a valid data model
|
|
522
|
+
data_model = models[idx].copy()
|
|
523
|
+
# validate model before changing it
|
|
524
|
+
populated_domain_repo._validate(data_model)
|
|
525
|
+
# change schema_name to be an int
|
|
526
|
+
data_model['metamodel_ref'] = bad_value
|
|
527
|
+
with pytest.raises(ValidationError):
|
|
528
|
+
populated_domain_repo._validate(data_model)
|
|
529
|
+
assert False, f"Should have raised a ValidationError for metamodel_ref: {bad_value}"
|
|
530
|
+
|
|
531
|
+
@pytest.mark.parametrize("bad_value", ['', ' ', ' ', '5tarts_with_number', 'double___underscore', 'contains-dash', 'contains space', 'has_non_@alpha_num*ric_chars', 'hasCapitalLetters', '_leading_underscore', 'trailing_underscore_', '_starting_underscore'])
|
|
532
|
+
def test_validate_model_that_is_invalid_because_metamodel_ref_is_invalid(self, populated_domain_repo, models, bad_value):
|
|
533
|
+
# get the number of data models
|
|
534
|
+
n = len(models)
|
|
535
|
+
# generate a random number between 0 and n-1
|
|
536
|
+
idx = np.random.randint(0, n-1)
|
|
537
|
+
# get a valid data model
|
|
538
|
+
data_model = models[idx].copy()
|
|
539
|
+
# validate model before changing it
|
|
540
|
+
populated_domain_repo._validate(data_model)
|
|
541
|
+
# change schema_name to be an int
|
|
542
|
+
data_model['metamodel_ref'] = bad_value
|
|
543
|
+
with pytest.raises(ValidationError):
|
|
544
|
+
populated_domain_repo._validate(data_model)
|
|
545
|
+
assert False, f"Should have raised a ValidationError for metamodel_ref: {bad_value}"
|
|
546
|
+
|
|
547
|
+
@pytest.mark.parametrize("not_existing_ref", ["does_not_exist", "not_a_valid_schema_name"])
|
|
548
|
+
def test_validate_model_that_is_invalid_because_metamodel_ref_does_not_exist(self, populated_domain_repo, models, not_existing_ref):
|
|
549
|
+
# get the number of data models
|
|
550
|
+
n = len(models)
|
|
551
|
+
# generate a random number between 0 and n-1
|
|
552
|
+
idx = np.random.randint(0, n-1)
|
|
553
|
+
# get a valid data model
|
|
554
|
+
data_model = models[idx].copy()
|
|
555
|
+
# validate model before changing it
|
|
556
|
+
populated_domain_repo._validate(data_model)
|
|
557
|
+
# change schema_name to be an int
|
|
558
|
+
data_model['metamodel_ref'] = not_existing_ref
|
|
559
|
+
with pytest.raises(ValidationError):
|
|
560
|
+
populated_domain_repo._validate(data_model)
|
|
561
|
+
assert False, f"Should have raised a ValidationError for metamodel_ref: {not_existing_ref}"
|
|
562
|
+
|
|
563
|
+
@pytest.mark.parametrize("bad_value", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
564
|
+
def test_validate_model_that_is_invalid_because_schema_description_is_not_a_string(self, populated_domain_repo, models, bad_value):
|
|
565
|
+
# get the number of data models
|
|
566
|
+
n = len(models)
|
|
567
|
+
# generate a random number between 0 and n-1
|
|
568
|
+
idx = np.random.randint(0, n-1)
|
|
569
|
+
# get a valid data model
|
|
570
|
+
data_model = models[idx].copy()
|
|
571
|
+
# validate model before changing it
|
|
572
|
+
populated_domain_repo._validate(data_model)
|
|
573
|
+
# change schema_description to be an int
|
|
574
|
+
data_model['schema_description'] = bad_value
|
|
575
|
+
with pytest.raises(ValidationError):
|
|
576
|
+
populated_domain_repo._validate(data_model)
|
|
577
|
+
assert False, f"Should have raised a ValidationError for schema_description: {bad_value}"
|
|
578
|
+
|
|
579
|
+
@pytest.mark.parametrize("bad_value", ['', ' ', ' '])
|
|
580
|
+
def test_validate_model_that_is_invalid_because_schema_description_is_empty(self, populated_domain_repo, models, bad_value):
|
|
581
|
+
# get the number of data models
|
|
582
|
+
n = len(models)
|
|
583
|
+
# generate a random number between 0 and n-1
|
|
584
|
+
idx = np.random.randint(0, n-1)
|
|
585
|
+
# get a valid data model
|
|
586
|
+
data_model = models[idx].copy()
|
|
587
|
+
# validate model before changing it
|
|
588
|
+
populated_domain_repo._validate(data_model)
|
|
589
|
+
# change schema_description to be an int
|
|
590
|
+
data_model['schema_description'] = bad_value
|
|
591
|
+
with pytest.raises(ValidationError):
|
|
592
|
+
populated_domain_repo._validate(data_model)
|
|
593
|
+
assert False, f"Should have raised a ValidationError for schema_description: {bad_value}"
|
|
594
|
+
|
|
595
|
+
@pytest.mark.parametrize("bad_value", [' starts_with_space', 'ends_with_space ', ' has_space '])
|
|
596
|
+
def test_validate_model_that_is_invalid_because_schema_description_contains_leading_or_trailing_spaces(self, populated_domain_repo, models, bad_value):
|
|
597
|
+
# get the number of data models
|
|
598
|
+
n = len(models)
|
|
599
|
+
# generate a random number between 0 and n-1
|
|
600
|
+
idx = np.random.randint(0, n-1)
|
|
601
|
+
# get a valid data model
|
|
602
|
+
data_model = models[idx].copy()
|
|
603
|
+
data_model['schema_description'] = bad_value
|
|
604
|
+
with pytest.raises(ValidationError):
|
|
605
|
+
populated_domain_repo._validate(data_model)
|
|
606
|
+
assert False, f"Should have raised a ValidationError for schema_description: {bad_value}"
|
|
607
|
+
|
|
608
|
+
@pytest.mark.parametrize("bad_value", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
609
|
+
def test_validate_model_that_is_invalid_because_json_schema_is_not_a_dict(self, populated_domain_repo, models, bad_value):
|
|
610
|
+
# get the number of data models
|
|
611
|
+
n = len(models)
|
|
612
|
+
# generate a random number between 0 and n-1
|
|
613
|
+
idx = 0
|
|
614
|
+
# get a valid data model
|
|
615
|
+
data_model = models[idx].copy()
|
|
616
|
+
# validate model before changing it
|
|
617
|
+
populated_domain_repo._validate(data_model)
|
|
618
|
+
# change json_schema to be an int
|
|
619
|
+
data_model['json_schema'] = bad_value
|
|
620
|
+
with pytest.raises(ValidationError):
|
|
621
|
+
populated_domain_repo._validate(data_model)
|
|
622
|
+
assert False, f"Should have raised a ValidationError for json_schema: {bad_value}"
|
|
623
|
+
|
|
624
|
+
def test_validate_model_that_is_invalid_because_json_schema_does_not_have_type_key(self, populated_domain_repo, models):
|
|
625
|
+
# get the number of data models
|
|
626
|
+
n = len(models)
|
|
627
|
+
# generate a random number between 0 and n-1
|
|
628
|
+
idx = 0
|
|
629
|
+
# get a valid data model
|
|
630
|
+
data_model = models[idx].copy()
|
|
631
|
+
data_model['json_schema'] = data_model['json_schema'].copy()
|
|
632
|
+
# validate model before changing it
|
|
633
|
+
populated_domain_repo._validate(data_model)
|
|
634
|
+
# remove the type key from json_schema
|
|
635
|
+
del data_model['json_schema']['type']
|
|
636
|
+
with pytest.raises(ValidationError):
|
|
637
|
+
populated_domain_repo._validate(data_model)
|
|
638
|
+
assert False, f"Should have raised a ValidationError for json_schema: {data_model['json_schema']}"
|
|
639
|
+
|
|
640
|
+
@pytest.mark.parametrize("non_object_json_type_names", ['string', 'number', 'integer', 'boolean', 'array', 'null'])
|
|
641
|
+
def test_validate_data_model_that_is_invalid_because_json_schema_type_is_not_object(self, populated_domain_repo, data_models, non_object_json_type_names):
|
|
642
|
+
# get the number of data models
|
|
643
|
+
n = len(data_models)
|
|
644
|
+
# generate a random number between 0 and n-1
|
|
645
|
+
idx = np.random.randint(0, n-1)
|
|
646
|
+
# get a valid data model
|
|
647
|
+
data_model = data_models[idx].copy()
|
|
648
|
+
data_model['json_schema'] = data_model['json_schema'].copy()
|
|
649
|
+
# validate before changing the model
|
|
650
|
+
populated_domain_repo._validate(data_model)
|
|
651
|
+
# change json_schema to be an int
|
|
652
|
+
data_model['json_schema']['type'] = non_object_json_type_names
|
|
653
|
+
with pytest.raises(DomainRepositoryValidationError):
|
|
654
|
+
populated_domain_repo._validate(data_model)
|
|
655
|
+
assert False, f"Should have raised a ValidationError for json_schema: {data_model['json_schema']}"
|
|
656
|
+
|
|
657
|
+
@pytest.mark.parametrize("non_object_json_type_names", ['string', 'number', 'integer', 'boolean', 'array', 'null'])
|
|
658
|
+
def test_validate_metamodel_that_is_invalid_because_json_schema_type_is_not_object(self, populated_domain_repo, metamodels, non_object_json_type_names):
|
|
659
|
+
# get the number of data models
|
|
660
|
+
n = len(metamodels)
|
|
661
|
+
# generate a random number between 0 and n-1
|
|
662
|
+
idx = np.random.randint(0, n-1)
|
|
663
|
+
# get a valid data model
|
|
664
|
+
metamodel = metamodels[idx].copy()
|
|
665
|
+
metamodel['json_schema'] = metamodel['json_schema'].copy()
|
|
666
|
+
# validate before changing the model
|
|
667
|
+
populated_domain_repo._validate(metamodel)
|
|
668
|
+
# change json_schema to be an int
|
|
669
|
+
metamodel['json_schema']['type'] = non_object_json_type_names
|
|
670
|
+
with pytest.raises(DomainRepositoryValidationError):
|
|
671
|
+
populated_domain_repo._validate(metamodel)
|
|
672
|
+
assert False, f"Should have raised a ValidationError for json_schema: {metamodel['json_schema']}"
|
|
673
|
+
|
|
674
|
+
|
|
675
|
+
@pytest.mark.parametrize("bad_value", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
676
|
+
def test_validate_model_that_is_invalid_because_json_schema_is_not_a_valid_json_schema(self, populated_domain_repo, models, bad_value):
|
|
677
|
+
# get the number of data models
|
|
678
|
+
n = len(models)
|
|
679
|
+
# generate a random number between 0 and n-1
|
|
680
|
+
idx = 0
|
|
681
|
+
# get a valid data model
|
|
682
|
+
model = models[idx].copy()
|
|
683
|
+
model['json_schema'] = model['json_schema'].copy()
|
|
684
|
+
# validate model before changing it
|
|
685
|
+
populated_domain_repo._validate(model)
|
|
686
|
+
# change json_schema to be an int
|
|
687
|
+
model['json_schema'] = bad_value
|
|
688
|
+
with pytest.raises(DomainRepositoryValidationError):
|
|
689
|
+
populated_domain_repo._validate(model)
|
|
690
|
+
assert False, f"Should have raised a ValidationError for json_schema: {bad_value}"
|
|
691
|
+
|
|
692
|
+
def test_validate_data_model_that_is_invalid_because_json_schema_does_not_contain_metamodel_ref(self, populated_domain_repo, data_models):
|
|
693
|
+
# get the number of data models
|
|
694
|
+
n = len(data_models)
|
|
695
|
+
# generate a random number between 0 and n-1
|
|
696
|
+
idx = 0
|
|
697
|
+
# get a valid data model
|
|
698
|
+
data_model = data_models[idx].copy()
|
|
699
|
+
# validate before removing the key
|
|
700
|
+
populated_domain_repo._validate(data_model)
|
|
701
|
+
# remove the metamodel_ref key from json_schema
|
|
702
|
+
del data_model['metamodel_ref']
|
|
703
|
+
with pytest.raises(ValidationError):
|
|
704
|
+
populated_domain_repo._validate(data_model)
|
|
705
|
+
assert False, f"Should have raised a ValidationError for json_schema: {data_model['json_schema']}"
|
|
706
|
+
|
|
707
|
+
@pytest.mark.parametrize("bad_value", ['', ' ', ' ', "Has Invalid Ch@racters", "has-dashes", "has_underscores", "has\nnew\nlines"])
|
|
708
|
+
def test_validate_model_that_is_invalid_because_schema_title_is_invalid(self, populated_domain_repo, models, bad_value):
|
|
709
|
+
# get the number of data models
|
|
710
|
+
n = len(models)
|
|
711
|
+
# generate a random number between 0 and n-1
|
|
712
|
+
idx = 0
|
|
713
|
+
# get a valid data model
|
|
714
|
+
data_model = models[idx].copy()
|
|
715
|
+
# validate model before changing it
|
|
716
|
+
populated_domain_repo._validate(data_model)
|
|
717
|
+
# change schema_title to be an int
|
|
718
|
+
data_model['schema_title'] = bad_value
|
|
719
|
+
with pytest.raises(ValidationError):
|
|
720
|
+
populated_domain_repo._validate(data_model)
|
|
721
|
+
assert False, f"Should have raised a ValidationError for schema_title: {bad_value}"
|
|
722
|
+
|
|
723
|
+
@pytest.mark.parametrize("valid_title", ['Valid Title', 'valid lowercase title', 'Valid Title With Numbers 123'])
|
|
724
|
+
def test_valid_title(self, populated_domain_repo, models, valid_title):
|
|
725
|
+
# get the number of data models
|
|
726
|
+
n = len(models)
|
|
727
|
+
# generate a random number between 0 and n-1
|
|
728
|
+
idx = 0
|
|
729
|
+
# get a valid data model
|
|
730
|
+
model = models[idx].copy()
|
|
731
|
+
# change schema_title to be an int
|
|
732
|
+
model['schema_title'] = valid_title
|
|
733
|
+
populated_domain_repo._validate(model)
|
|
734
|
+
|
|
735
|
+
|
|
736
|
+
class TestDataRepository:
|
|
737
|
+
|
|
738
|
+
# get tests (test all expected behaviors of get())
|
|
739
|
+
# ------------------------------------------------
|
|
740
|
+
# Category 1: get a data object that exists
|
|
741
|
+
# Test 1.1: get an unversioned record data object (has_file=False) that exists
|
|
742
|
+
# Test 1.1: get an unversioned data array object that exists (has_file=True)
|
|
743
|
+
# Test 1.2: get a versioned data object that exists with a specific version (has_file=True)
|
|
744
|
+
# Category 2: get a data object that exists but is invalid (error)
|
|
745
|
+
# Test2.1: get an unversioned record data object (has_file=False) that exists but is invalid (error)
|
|
746
|
+
# Category 3: get a data object that does not exis
|
|
747
|
+
# Test 3.1: get a data object that does not exist; check that it returns None
|
|
748
|
+
# Category 4: bad arguments
|
|
749
|
+
# Test 4.1: get a data object with a bad schema_ref argument (error)
|
|
750
|
+
# Test 4.2: get a data object with a bad data_name argument (error)
|
|
751
|
+
# Test 4.3: get a data object with a bad data_version argument (error)
|
|
752
|
+
|
|
753
|
+
@pytest.mark.parametrize("schema_ref", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
754
|
+
def test_get_unversioned_data_object_that_exists(self, populated_data_repo, schema_ref):
|
|
755
|
+
data_object = populated_data_repo.get(
|
|
756
|
+
schema_ref=schema_ref,
|
|
757
|
+
data_name="test",
|
|
758
|
+
version_timestamp=0
|
|
759
|
+
)
|
|
760
|
+
assert data_object is not None, f"Should have returned a data object for schema_ref: {schema_ref} and data_name: test"
|
|
761
|
+
if isinstance(data_object, dict):
|
|
762
|
+
assert data_object['schema_ref'] == schema_ref
|
|
763
|
+
assert data_object['data_name'] == "test"
|
|
764
|
+
assert data_object.get('version_timestamp') == 0
|
|
765
|
+
else:
|
|
766
|
+
assert data_object.attrs['schema_ref'] == schema_ref
|
|
767
|
+
assert data_object.attrs['data_name'] == "test"
|
|
768
|
+
assert data_object.attrs.get('version_timestamp') == 0
|
|
769
|
+
|
|
770
|
+
@pytest.mark.parametrize("time_delta", [s for s in range(1, 11)])
|
|
771
|
+
def test_get_versioned_data_object_that_exists(self, populated_data_repo, timestamp, time_delta, model_numpy_adapter):
|
|
772
|
+
vts = timestamp + timedelta(seconds=time_delta)
|
|
773
|
+
data_object = populated_data_repo.get(schema_ref='numpy_test', data_name="numpy_test", version_timestamp=vts, data_adapter=model_numpy_adapter)
|
|
774
|
+
assert data_object.attrs['schema_ref'] == 'numpy_test', f"Should have returned a data object with argument schema_ref: numpy_test (time_delta: {time_delta}), but got {data_object.attrs['schema_ref']}"
|
|
775
|
+
assert data_object.attrs['data_name'] == 'numpy_test', f"Should have returned a data object with argument data_name: numpy_test (time_delta: {time_delta}), but got {data_object.attrs['data_name']}"
|
|
776
|
+
assert data_object.attrs['version_timestamp'] == vts, f"Should have returned a data object with argument version_timestamp: {vts} (timestamp: {timestamp}, time_delta: {time_delta}), but got {data_object.attrs['version_timestamp']}"
|
|
777
|
+
|
|
778
|
+
@pytest.mark.parametrize("kwargs", [{'schema_ref': 'session', 'data_name': 'invalid_session_date'}, {'schema_ref': 'session', 'data_name': 'invalid_session_has_file'}])
|
|
779
|
+
def test_get_unversioned_record_that_exists_but_is_invalid(self, populated_data_repo_with_invalid_records, kwargs):
|
|
780
|
+
repo = populated_data_repo_with_invalid_records
|
|
781
|
+
with pytest.raises(DataRepositoryValidationError):
|
|
782
|
+
record = repo.get(**kwargs)
|
|
783
|
+
raise Exception(f"Should have raised a DataRepositoryValidationError for kwargs: {kwargs} for record: \n\n{record}")
|
|
784
|
+
|
|
785
|
+
def test_get_data_object_that_does_not_exist(self, populated_data_repo):
|
|
786
|
+
data_object = populated_data_repo.get(schema_ref='does_not_exist', data_name='does_not_exist', version_timestamp=0)
|
|
787
|
+
assert data_object is None, f"Should have returned None for schema_ref: does_not_exist and data_name: does_not_exist"
|
|
788
|
+
|
|
789
|
+
@pytest.mark.parametrize("bad_schema_ref", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
790
|
+
def test_get_data_object_with_bad_schema_ref(self, populated_data_repo, bad_schema_ref):
|
|
791
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
792
|
+
populated_data_repo.get(schema_ref=bad_schema_ref, data_name='does_not_exist', version_timestamp=0)
|
|
793
|
+
assert False, f"Should have raised a TypeError for schema_ref: {bad_schema_ref}"
|
|
794
|
+
|
|
795
|
+
@pytest.mark.parametrize("bad_data_name", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
796
|
+
def test_get_data_object_with_bad_data_name(self, populated_data_repo, bad_data_name):
|
|
797
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
798
|
+
populated_data_repo.get(schema_ref='does_not_exist', data_name=bad_data_name, version_timestamp=0)
|
|
799
|
+
assert False, f"Should have raised a TypeError for data_name: {bad_data_name}"
|
|
800
|
+
|
|
801
|
+
@pytest.mark.parametrize("bad_version_timestamp", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
802
|
+
def test_get_data_object_with_bad_version_timestamp(self, populated_data_repo, bad_version_timestamp):
|
|
803
|
+
with pytest.raises(Exception):
|
|
804
|
+
populated_data_repo.get(schema_ref='does_not_exist', data_name='does_not_exist', version_timestamp=bad_version_timestamp)
|
|
805
|
+
assert False, f"Should have raised an Exception for version_timestamp: {bad_version_timestamp}"
|
|
806
|
+
|
|
807
|
+
# exists tests (test all expected behaviors of exists())
|
|
808
|
+
# ------------------------------------------------------
|
|
809
|
+
# Category 1: exists a data object that exists
|
|
810
|
+
# Test 1.1: exists an unversioned record data object (has_file=False) that exists
|
|
811
|
+
# Test 1.1: exists an unversioned data array object that exists (has_file=True)
|
|
812
|
+
# Test 1.2: exists a versioned data object that exists with a specific version (has_file=True)
|
|
813
|
+
# Category 2: exists a data object that does not exist
|
|
814
|
+
# Test 2.1: exists a data object that does not exist; check that it returns False
|
|
815
|
+
# Category 3: bad arguments
|
|
816
|
+
# Test 3.1: exists a data object with a bad schema_ref argument (error)
|
|
817
|
+
# Test 3.2: exists a data object with a bad data_name argument (error)
|
|
818
|
+
# Test 3.3: exists a data object with a bad data_version argument (error)
|
|
819
|
+
|
|
820
|
+
@pytest.mark.parametrize("schema_ref", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
821
|
+
def test_exists_unversioned_data_object_that_exists(self, populated_data_repo, schema_ref):
|
|
822
|
+
assert populated_data_repo.exists(schema_ref=schema_ref, data_name="test", version_timestamp=0)
|
|
823
|
+
|
|
824
|
+
@pytest.mark.parametrize("time_delta", [timedelta(seconds=s) for s in range(1, 11)])
|
|
825
|
+
def test_exists_versioned_data_object_that_exists(self, populated_data_repo, model_numpy_adapter, timestamp, time_delta):
|
|
826
|
+
vts = timestamp + time_delta
|
|
827
|
+
assert populated_data_repo.exists(schema_ref='numpy_test', data_name="numpy_test", version_timestamp=vts)
|
|
828
|
+
|
|
829
|
+
def test_exists_data_object_that_does_not_exist(self, populated_data_repo):
|
|
830
|
+
assert not populated_data_repo.exists(schema_ref='does_not_exist', data_name='does_not_exist', version_timestamp=0)
|
|
831
|
+
|
|
832
|
+
@pytest.mark.parametrize("bad_schema_ref", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
833
|
+
def test_exists_data_object_with_bad_schema_ref(self, populated_data_repo, bad_schema_ref):
|
|
834
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
835
|
+
populated_data_repo.exists(schema_ref=bad_schema_ref, data_name='does_not_exist', version_timestamp=0)
|
|
836
|
+
assert False, f"Should have raised a TypeError for schema_ref: {bad_schema_ref}"
|
|
837
|
+
|
|
838
|
+
@pytest.mark.parametrize("bad_data_name", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
839
|
+
def test_exists_data_object_with_bad_data_name(self, populated_data_repo, bad_data_name):
|
|
840
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
841
|
+
populated_data_repo.exists(schema_ref='does_not_exist', data_name=bad_data_name, version_timestamp=0)
|
|
842
|
+
assert False, f"Should have raised a TypeError for data_name: {bad_data_name}"
|
|
843
|
+
|
|
844
|
+
# `find` tests (test all expected behaviors of find())
|
|
845
|
+
# --------------------------------------------------
|
|
846
|
+
# Category 1: find a data object that exists
|
|
847
|
+
# Test 1.1: find an unversioned record data objects (has_file=False) that exist
|
|
848
|
+
# Test 1.1: find an unversioned data array objects that exists (has_file=True)
|
|
849
|
+
# Test 1.2: find all the numpy versioned data objects that exist
|
|
850
|
+
# Test 1.3: find all the numpy versioned data objects that exist and have a version_timestamp that is greater than or equal to a specific timestamp
|
|
851
|
+
#TODO: Test 1.4: find all numpy data with
|
|
852
|
+
# Category 2: find a data object that does not exist
|
|
853
|
+
# Test 2.1: find a data object that does not exist; check that it returns an empty list
|
|
854
|
+
# Category 3: bad arguments
|
|
855
|
+
# Test 3.1: find a data object with a bad filter argument (error)
|
|
856
|
+
# Test 3.2: find a data object with a bad projection argument (error)
|
|
857
|
+
# Test 3.3: find a data object with a bad sort argument (error)
|
|
858
|
+
|
|
859
|
+
@pytest.mark.parametrize("schema_ref", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
860
|
+
def test_find_unversioned_data_object_that_exists(self, populated_data_repo, schema_ref):
|
|
861
|
+
query_filter = {'schema_ref': schema_ref, 'data_name': 'test', 'version_timestamp': 0}
|
|
862
|
+
data_objects = populated_data_repo.find(filter=query_filter)
|
|
863
|
+
assert len(data_objects) > 0
|
|
864
|
+
for data_object in data_objects:
|
|
865
|
+
if isinstance(data_object, dict):
|
|
866
|
+
assert data_object['schema_ref'] == schema_ref
|
|
867
|
+
assert data_object['data_name'] == "test"
|
|
868
|
+
assert data_object.get('version_timestamp') == 0
|
|
869
|
+
else:
|
|
870
|
+
assert data_object.attrs['schema_ref'] == schema_ref
|
|
871
|
+
assert data_object.attrs['data_name'] == "test"
|
|
872
|
+
assert data_object.attrs.get('version_timestamp') == 0
|
|
873
|
+
|
|
874
|
+
def test_find_versioned_data_object_that_exists(self, populated_data_repo):
|
|
875
|
+
query_filter = {'schema_ref': 'numpy_test', 'data_name': 'numpy_test'}
|
|
876
|
+
data_objects = populated_data_repo.find(filter=query_filter)
|
|
877
|
+
assert len(data_objects) > 0
|
|
878
|
+
for data_object in data_objects:
|
|
879
|
+
assert data_object['schema_ref'] == 'numpy_test'
|
|
880
|
+
assert data_object['data_name'] == "numpy_test"
|
|
881
|
+
|
|
882
|
+
@pytest.mark.parametrize("time_delta", [timedelta(seconds=s) for s in range(1, 11)])
|
|
883
|
+
def test_find_versioned_data_object_that_exists_with_timestamp_filter(self, populated_data_repo, timestamp, time_delta):
|
|
884
|
+
vts = timestamp + time_delta
|
|
885
|
+
query_filter = {'schema_ref': 'numpy_test', 'data_name': 'numpy_test', 'version_timestamp': {'$gte': vts}}
|
|
886
|
+
data_objects = populated_data_repo.find(filter=query_filter)
|
|
887
|
+
assert len(data_objects) > 0
|
|
888
|
+
for data_object in data_objects:
|
|
889
|
+
assert data_object['schema_ref'] == 'numpy_test'
|
|
890
|
+
assert data_object['data_name'] == "numpy_test"
|
|
891
|
+
try:
|
|
892
|
+
assert data_object['version_timestamp'] >= timestamp, f"version_timestamp: {data_object['version_timestamp']} is not greater than or equal to timestamp: {timestamp}."
|
|
893
|
+
except Exception as e:
|
|
894
|
+
message = f"version_timestamp: {data_object['version_timestamp']} is not greater than or equal to timestamp: {timestamp}; error: {e}"
|
|
895
|
+
raise Exception(message)
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
def test_find_data_object_that_does_not_exist(self, populated_data_repo):
|
|
899
|
+
query_filter = {'schema_ref': 'does_not_exist', 'data_name': 'does_not_exist'}
|
|
900
|
+
data_objects = populated_data_repo.find(filter=query_filter)
|
|
901
|
+
assert len(data_objects) == 0
|
|
902
|
+
|
|
903
|
+
# add tests (test all expected behaviors of add())
|
|
904
|
+
# ------------------------------------------------
|
|
905
|
+
# Category 1: add a data object that is valid
|
|
906
|
+
# Test 1.1: add a valid unversioned record data object (has_file=False)
|
|
907
|
+
# Test 1.2: add a valid unversioned data array object (has_file=True)
|
|
908
|
+
# Test 1.3: add a valid versioned data object
|
|
909
|
+
# Test 1.4: add a file data object that has no has_file attribute (it should be added and set to True)
|
|
910
|
+
# Test 1.5: add a record data object that has no has_file attribute (it should be added and set to False)
|
|
911
|
+
# Category 2: add a data object that is invalid (error)
|
|
912
|
+
# Test 2.1: add an invalid unversioned record data object (has_file=False)
|
|
913
|
+
# Test 2.2: add an invalid unversioned data array object (has_file=True)
|
|
914
|
+
# Test 2.3: add an invalid versioned data object
|
|
915
|
+
# Test 2.4: add a file data object that with has_file=False (error)
|
|
916
|
+
# Test 2.5: add a record data object that with has_file=True (error)
|
|
917
|
+
# Category 3: bad arguments
|
|
918
|
+
# Test 3.1: add a data object with a bad data_object argument (error)
|
|
919
|
+
# Test 3.2: add a data object with a bad schema_ref argument (error)
|
|
920
|
+
# Test 3.3: add a data object with a bad data_name argument (error)
|
|
921
|
+
# Test 3.4: add a data object with a bad data_version argument (error)
|
|
922
|
+
# Test 3.5: add a data object with a bad data_adapter argument (error)
|
|
923
|
+
|
|
924
|
+
@pytest.mark.parametrize("schema_ref", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
925
|
+
def test_add_unversioned_data_object_that_is_valid(self, populated_data_repo, schema_ref):
|
|
926
|
+
data_object = populated_data_repo.get(schema_ref=schema_ref, data_name="test", version_timestamp=0)
|
|
927
|
+
if isinstance(data_object, dict):
|
|
928
|
+
data_object['data_name'] = 'test_add'
|
|
929
|
+
elif isinstance(data_object, xr.DataArray):
|
|
930
|
+
data_object.attrs['data_name'] = 'test_add'
|
|
931
|
+
populated_data_repo.add(data_object, versioning_on=False)
|
|
932
|
+
# get the data object that was just added
|
|
933
|
+
try:
|
|
934
|
+
data_object = populated_data_repo.get(schema_ref=schema_ref, data_name="test_add", version_timestamp=0)
|
|
935
|
+
assert data_object is not None
|
|
936
|
+
assert populated_data_repo.exists(schema_ref=schema_ref, data_name="test_add", version_timestamp=0)
|
|
937
|
+
except Exception as e:
|
|
938
|
+
raise Exception(f"Error: {e}")
|
|
939
|
+
|
|
940
|
+
|
|
941
|
+
@pytest.mark.parametrize("schema_ref", ['animal', 'session', 'spike_times', 'spike_waveforms'])
|
|
942
|
+
def test_add_versioned_data_object_that_is_valid(self, populated_data_repo, schema_ref, timestamp):
|
|
943
|
+
data_object = populated_data_repo.get(schema_ref=schema_ref, data_name="test", version_timestamp=0)
|
|
944
|
+
if isinstance(data_object, dict):
|
|
945
|
+
if 'version_timestamp' in data_object:
|
|
946
|
+
del data_object['version_timestamp']
|
|
947
|
+
else:
|
|
948
|
+
if 'version_timestamp' in data_object.attrs:
|
|
949
|
+
del data_object.attrs['version_timestamp']
|
|
950
|
+
populated_data_repo.add(data_object, versioning_on=True)
|
|
951
|
+
sleep(0.001)
|
|
952
|
+
|
|
953
|
+
def test_add_file_data_object_that_has_no_has_file_attribute(self, populated_data_repo):
|
|
954
|
+
data_object = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
955
|
+
# change data_name
|
|
956
|
+
data_object.attrs['data_name'] = 'test_add'
|
|
957
|
+
# remove has_file attribute
|
|
958
|
+
del data_object.attrs['has_file']
|
|
959
|
+
populated_data_repo.add(data_object, versioning_on=False)
|
|
960
|
+
# get the data object that was just added
|
|
961
|
+
data_object = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test_add', version_timestamp=0)
|
|
962
|
+
assert data_object is not None
|
|
963
|
+
has_file = data_object.attrs.get('has_file')
|
|
964
|
+
assert has_file is not None
|
|
965
|
+
assert has_file == True
|
|
966
|
+
|
|
967
|
+
def test_add_record_data_object_that_has_no_has_file_attribute(self, populated_data_repo):
|
|
968
|
+
data_object = populated_data_repo.get(schema_ref='animal', data_name='test', version_timestamp=0)
|
|
969
|
+
# change data_name
|
|
970
|
+
data_object['data_name'] = 'test_add'
|
|
971
|
+
# remove has_file attribute
|
|
972
|
+
del data_object['has_file']
|
|
973
|
+
populated_data_repo.add(data_object, versioning_on=False)
|
|
974
|
+
# get the data object that was just added
|
|
975
|
+
data_object = populated_data_repo.get(schema_ref='animal', data_name='test_add', version_timestamp=0)
|
|
976
|
+
assert data_object is not None
|
|
977
|
+
has_file = data_object.get('has_file')
|
|
978
|
+
assert has_file is not None
|
|
979
|
+
assert has_file == False
|
|
980
|
+
|
|
981
|
+
|
|
982
|
+
def test_add_unversioned_record_that_is_invalid(self, populated_data_repo):
|
|
983
|
+
session_record = populated_data_repo.get(schema_ref='session', data_name='test', version_timestamp=0)
|
|
984
|
+
session_record['data_name'] = 'test_add_bad'
|
|
985
|
+
session_record['animal_data_ref'] = 5
|
|
986
|
+
with pytest.raises(DataRepositoryValidationError):
|
|
987
|
+
populated_data_repo.add(session_record, versioning_on=False)
|
|
988
|
+
assert False, f"Should have raised a DataRepositoryValidationError for session_record: {session_record}"
|
|
989
|
+
|
|
990
|
+
def test_add_unversioned_dataarray_that_is_invalid(self, populated_data_repo):
|
|
991
|
+
waveform_data_array = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
992
|
+
waveform_data_array.attrs['data_name'] = 'test_add_bad'
|
|
993
|
+
waveform_data_array.attrs['session_data_ref'] = 5
|
|
994
|
+
with pytest.raises(DataRepositoryValidationError):
|
|
995
|
+
populated_data_repo.add(waveform_data_array, versioning_on=False)
|
|
996
|
+
assert False, f"Should have raised a DataRepositoryValidationError for waveform_data_array: {waveform_data_array}"
|
|
997
|
+
|
|
998
|
+
def test_add_versioned_data_object_that_is_invalid(self, populated_data_repo):
|
|
999
|
+
data_object = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1000
|
+
data_object.attrs['data_name'] = 'test_add_bad_versioned'
|
|
1001
|
+
data_object.attrs['data'] = 5
|
|
1002
|
+
with pytest.raises(DataRepositoryValidationError):
|
|
1003
|
+
populated_data_repo.add(data_object, versioning_on=True)
|
|
1004
|
+
assert False, f"Should have raised a DataRepositoryValidationError for data_object: {data_object}"
|
|
1005
|
+
|
|
1006
|
+
@pytest.mark.parametrize("bad_data_object", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
1007
|
+
def test_add_data_object_with_bad_data_object(self, populated_data_repo, bad_data_object):
|
|
1008
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
1009
|
+
populated_data_repo.add(bad_data_object, versioning_on=False)
|
|
1010
|
+
assert False, f"Should have raised a TypeError for data_object: {bad_data_object}"
|
|
1011
|
+
|
|
1012
|
+
def test_add_xarray_with_false_has_file(self, populated_data_repo):
|
|
1013
|
+
data_object = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1014
|
+
# check that the data object is an xarray DataArray
|
|
1015
|
+
assert isinstance(data_object, xr.DataArray)
|
|
1016
|
+
# change data_name to new_test
|
|
1017
|
+
data_object.attrs['schema_ref'] = 'new_test'
|
|
1018
|
+
# change has_file to False
|
|
1019
|
+
data_object.attrs['has_file'] = False
|
|
1020
|
+
with pytest.raises(DataRepositoryValidationError):
|
|
1021
|
+
populated_data_repo.add(data_object, versioning_on=False)
|
|
1022
|
+
assert False, f"Should have raised a DataRepositoryValidationError for data_object: {data_object}"
|
|
1023
|
+
|
|
1024
|
+
def test_add_dict_with_true_has_file(self, populated_data_repo):
|
|
1025
|
+
data_object = populated_data_repo.get(schema_ref='animal', data_name='test', version_timestamp=0)
|
|
1026
|
+
# check that the data object is a dictionary
|
|
1027
|
+
assert isinstance(data_object, dict)
|
|
1028
|
+
# change data_name to new_test
|
|
1029
|
+
data_object['data_name'] = 'new_test'
|
|
1030
|
+
# change has_file to True
|
|
1031
|
+
data_object['has_file'] = True
|
|
1032
|
+
with pytest.raises(DataRepositoryValidationError):
|
|
1033
|
+
populated_data_repo.add(data_object, versioning_on=False)
|
|
1034
|
+
assert False, f"Should have raised a DataRepositoryValidationError for data_object: {data_object}"
|
|
1035
|
+
|
|
1036
|
+
|
|
1037
|
+
# remove tests (test all expected behaviors of remove())
|
|
1038
|
+
# -----------------------------------------------------
|
|
1039
|
+
# Category 1: remove a data object that exists
|
|
1040
|
+
# Test 1.1: remove an unversioned record data object (has_file=False) that exists
|
|
1041
|
+
# Test 1.2: remove an unversioned data array object that exists (has_file=True)
|
|
1042
|
+
# Test 1.3: remove a versioned data array object that exists
|
|
1043
|
+
# Category 2: remove a data object that does not exist
|
|
1044
|
+
# Test 2.1: remove a record that does not exist; check that it raises an error
|
|
1045
|
+
# Test 2.2: remove a data array that does not exist (with no record); check that it raises an error
|
|
1046
|
+
# Category 3: bad arguments
|
|
1047
|
+
# Test 3.1: remove a data object with a bad schema_ref argument (error)
|
|
1048
|
+
# Test 3.2: remove a data object with a bad data_name argument (error)
|
|
1049
|
+
# Test 3.3: remove a data object with a bad version_timestamp argument (error)
|
|
1050
|
+
|
|
1051
|
+
def test_remove_unversioned_record_that_exists(self, populated_data_repo):
|
|
1052
|
+
populated_data_repo.remove(schema_ref='animal', data_name='test', version_timestamp=0)
|
|
1053
|
+
|
|
1054
|
+
def test_remove_unversioned_data_array_that_exists(self, populated_data_repo):
|
|
1055
|
+
populated_data_repo.remove(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1056
|
+
|
|
1057
|
+
@pytest.mark.parametrize("delta", [s for s in range(1, 11)])
|
|
1058
|
+
def test_remove_versioned_data_object_that_exists(self, populated_data_repo, timestamp, model_numpy_adapter, delta):
|
|
1059
|
+
ts = timestamp + timedelta(seconds=delta)
|
|
1060
|
+
populated_data_repo.remove(
|
|
1061
|
+
schema_ref='numpy_test',
|
|
1062
|
+
data_name='numpy_test',
|
|
1063
|
+
version_timestamp=ts,
|
|
1064
|
+
data_adapter=model_numpy_adapter
|
|
1065
|
+
)
|
|
1066
|
+
|
|
1067
|
+
def test_remove_record_that_does_not_exist(self, populated_data_repo):
|
|
1068
|
+
with pytest.raises(DataRepositoryNotFoundError):
|
|
1069
|
+
populated_data_repo.remove(schema_ref='animal', data_name='does_not_exist', version_timestamp=0)
|
|
1070
|
+
assert False, f"Should have raised a DataRepositoryError for schema_ref: animal and data_name: does_not_exist"
|
|
1071
|
+
|
|
1072
|
+
def test_remove_data_array_that_does_not_exist(self, populated_data_repo):
|
|
1073
|
+
with pytest.raises(DataRepositoryNotFoundError):
|
|
1074
|
+
populated_data_repo.remove(schema_ref='spike_waveforms', data_name='does_not_exist', version_timestamp=0)
|
|
1075
|
+
assert False, f"Should have raised a DataRepositoryError for schema_ref: spike_waveforms and data_name: does_not_exist"
|
|
1076
|
+
|
|
1077
|
+
@pytest.mark.parametrize("bad_schema_ref", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
1078
|
+
def test_remove_data_object_with_bad_schema_ref(self, populated_data_repo, bad_schema_ref):
|
|
1079
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
1080
|
+
populated_data_repo.remove(schema_ref=bad_schema_ref, data_name='does_not_exist', version_timestamp=0)
|
|
1081
|
+
assert False, f"Should have raised a TypeError for schema_ref: {bad_schema_ref}"
|
|
1082
|
+
|
|
1083
|
+
@pytest.mark.parametrize("bad_data_name", [None, 1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
1084
|
+
def test_remove_data_object_with_bad_data_name(self, populated_data_repo, bad_data_name):
|
|
1085
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
1086
|
+
populated_data_repo.remove(schema_ref='does_not_exist', data_name=bad_data_name, version_timestamp=0)
|
|
1087
|
+
assert False, f"Should have raised a TypeError for data_name: {bad_data_name}"
|
|
1088
|
+
|
|
1089
|
+
@pytest.mark.parametrize("bad_version_timestamp", [None, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
1090
|
+
def test_remove_data_object_with_bad_version_timestamp(self, populated_data_repo, bad_version_timestamp):
|
|
1091
|
+
with pytest.raises(Exception):
|
|
1092
|
+
populated_data_repo.remove(schema_ref='does_not_exist', data_name='does_not_exist', version_timestamp=bad_version_timestamp)
|
|
1093
|
+
assert False, f"Should have raised an Exception for version_timestamp: {bad_version_timestamp}"
|
|
1094
|
+
|
|
1095
|
+
# test undo (test all expected behaviors of undo())
|
|
1096
|
+
# ------------------------------------------------
|
|
1097
|
+
# Category 1: undo adding
|
|
1098
|
+
# Test 1.1: undo adding an unversioned record data object (has_file=False)
|
|
1099
|
+
# Test 1.2: undo adding an unversioned data array object that exists (has_file=True)
|
|
1100
|
+
# Test 1.3: undo adding a versioned data array object that exists
|
|
1101
|
+
# Category 2: undo removing
|
|
1102
|
+
# Test 2.1: undo removing an unversioned record data object (has_file=False)
|
|
1103
|
+
# Test 2.2: undo removing an unversioned data array object that exists (has_file=True)
|
|
1104
|
+
# Test 2.3: undo removing a versioned data array object that exists
|
|
1105
|
+
# Category 3: undoing a change that does not exist
|
|
1106
|
+
# Test 3.1: undoing a change that does not exist
|
|
1107
|
+
# Category 4: bad arguments
|
|
1108
|
+
|
|
1109
|
+
def test_undo_adding_unversioned_record_that_exists(self, populated_data_repo):
|
|
1110
|
+
# add a record
|
|
1111
|
+
record = populated_data_repo.get(schema_ref='animal', data_name='test', version_timestamp=0)
|
|
1112
|
+
record['data_name'] = 'test_undo_adding'
|
|
1113
|
+
populated_data_repo.add(record, versioning_on=False)
|
|
1114
|
+
# check that the record exists
|
|
1115
|
+
assert populated_data_repo.exists(schema_ref='animal', data_name='test_undo_adding')
|
|
1116
|
+
# undo the add
|
|
1117
|
+
populated_data_repo.undo()
|
|
1118
|
+
# confirm that the record does not exist
|
|
1119
|
+
assert not populated_data_repo.exists(schema_ref='animal', data_name='test_undo_adding')
|
|
1120
|
+
|
|
1121
|
+
def test_undo_adding_unversioned_data_array_that_exists(self, populated_data_repo):
|
|
1122
|
+
# add a data array
|
|
1123
|
+
data_array = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1124
|
+
data_array.attrs['data_name'] = 'test_undo_adding'
|
|
1125
|
+
populated_data_repo.add(data_array, versioning_on=False)
|
|
1126
|
+
# check that the data array exists
|
|
1127
|
+
assert populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test_undo_adding')
|
|
1128
|
+
# undo the add
|
|
1129
|
+
populated_data_repo.undo()
|
|
1130
|
+
# confirm that the data array does not exist
|
|
1131
|
+
assert not populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test_undo_adding')
|
|
1132
|
+
|
|
1133
|
+
def test_undo_adding_versioned_data_object_that_exists(self, populated_data_repo):
|
|
1134
|
+
# add a data array
|
|
1135
|
+
data_array = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1136
|
+
data_array.attrs['data_name'] = 'test_undo_versioned_adding'
|
|
1137
|
+
# set environment variable DEBUG to True
|
|
1138
|
+
populated_data_repo.add(data_array, versioning_on=True)
|
|
1139
|
+
# set environment variable DEBUG to False
|
|
1140
|
+
# find the data array and get its timestamp
|
|
1141
|
+
data_array_record = populated_data_repo.find(filter={'schema_ref': 'spike_waveforms', 'data_name': 'test_undo_versioned_adding'})[0]
|
|
1142
|
+
vts = data_array_record['version_timestamp']
|
|
1143
|
+
assert populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test_undo_versioned_adding', version_timestamp=vts), "Assertion 1: The data array should exist."
|
|
1144
|
+
# undo the add
|
|
1145
|
+
populated_data_repo.undo()
|
|
1146
|
+
# confirm that the data array does not exist
|
|
1147
|
+
assert not populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test_undo_versioned_adding', version_timestamp=vts), "Assertion 2: The data array should not exist."
|
|
1148
|
+
|
|
1149
|
+
def test_undo_removing_unversioned_record_that_exists(self, populated_data_repo):
|
|
1150
|
+
# remove a record
|
|
1151
|
+
populated_data_repo.remove(schema_ref='animal', data_name='test', version_timestamp=0)
|
|
1152
|
+
# check that the record does not exist
|
|
1153
|
+
assert not populated_data_repo.exists(schema_ref='animal', data_name='test')
|
|
1154
|
+
# undo the remove
|
|
1155
|
+
populated_data_repo.undo()
|
|
1156
|
+
# confirm that the record exists
|
|
1157
|
+
assert populated_data_repo.exists(schema_ref='animal', data_name='test')
|
|
1158
|
+
|
|
1159
|
+
def test_undo_removing_unversioned_data_array_that_exists(self, populated_data_repo):
|
|
1160
|
+
# remove a data array
|
|
1161
|
+
populated_data_repo.remove(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1162
|
+
# check that the data array does not exist
|
|
1163
|
+
assert not populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test')
|
|
1164
|
+
# undo the remove
|
|
1165
|
+
populated_data_repo.undo()
|
|
1166
|
+
# confirm that the data array exists
|
|
1167
|
+
assert populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test')
|
|
1168
|
+
|
|
1169
|
+
@pytest.mark.skip(reason="Pending fix")
|
|
1170
|
+
def test_undo_removing_versioned_data_object_that_exists(self, populated_data_repo):
|
|
1171
|
+
# remove a data array
|
|
1172
|
+
data_array = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1173
|
+
data_array.attrs['data_name'] = 'test_undo_versioned_removing'
|
|
1174
|
+
populated_data_repo.add(data_array, versioning_on=True)
|
|
1175
|
+
# find the data array and get its timestamp
|
|
1176
|
+
data_array_record = populated_data_repo.find(filter={'schema_ref': 'spike_waveforms', 'data_name': 'test_undo_versioned_removing'})[0]
|
|
1177
|
+
vts = data_array_record['version_timestamp']
|
|
1178
|
+
# check that the data array exists
|
|
1179
|
+
assert populated_data_repo.get(schema_ref='spike_waveforms', data_name='test_undo_versioned_removing', version_timestamp=vts) is not None
|
|
1180
|
+
# remove the data array
|
|
1181
|
+
populated_data_repo.remove(schema_ref='spike_waveforms', data_name='test_undo_versioned_removing', version_timestamp=vts)
|
|
1182
|
+
# check that the data array does not exist
|
|
1183
|
+
assert populated_data_repo.get(schema_ref='spike_waveforms', data_name='test_undo_versioned_removing', version_timestamp=vts) is None
|
|
1184
|
+
# undo the remove
|
|
1185
|
+
populated_data_repo.undo()
|
|
1186
|
+
# confirm that the data array exists
|
|
1187
|
+
assert populated_data_repo.get(schema_ref='spike_waveforms', data_name='test_undo_versioned_removing', version_timestamp=vts) is not None
|
|
1188
|
+
|
|
1189
|
+
def test_undoing_a_change_that_does_not_exist(self, populated_data_repo):
|
|
1190
|
+
ohe = populated_data_repo.undo()
|
|
1191
|
+
assert ohe is None, f"Should have returned None for undoing a change that does not exist, but returned: {ohe}"
|
|
1192
|
+
|
|
1193
|
+
# test undo_all (test all expected behaviors of undo_all())
|
|
1194
|
+
# --------------------------------------------------------
|
|
1195
|
+
# Category 1: undo all changes
|
|
1196
|
+
# Test 1.1: undo all changes
|
|
1197
|
+
# Test 1.2: undo all changes when there are no changes to undo
|
|
1198
|
+
|
|
1199
|
+
def test_undo_all_changes(self, populated_data_repo):
|
|
1200
|
+
# add a record
|
|
1201
|
+
record = populated_data_repo.get(schema_ref='animal', data_name='test', version_timestamp=0)
|
|
1202
|
+
record['data_name'] = 'test_undo_all'
|
|
1203
|
+
populated_data_repo.add(record, versioning_on=False)
|
|
1204
|
+
# add a data array
|
|
1205
|
+
data_array = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1206
|
+
data_array.attrs['data_name'] = 'test_undo_all'
|
|
1207
|
+
populated_data_repo.add(data_array, versioning_on=False)
|
|
1208
|
+
# add a versioned data array
|
|
1209
|
+
data_array = populated_data_repo.get(schema_ref='spike_waveforms', data_name='test', version_timestamp=0)
|
|
1210
|
+
populated_data_repo.add(data_array, versioning_on=True)
|
|
1211
|
+
# find the data array and get its timestamp
|
|
1212
|
+
data_array_record = populated_data_repo.find(filter={'schema_ref': 'spike_waveforms', 'data_name': 'test'})[0]
|
|
1213
|
+
vts = data_array_record['version_timestamp']
|
|
1214
|
+
# check that the data array exists
|
|
1215
|
+
assert populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test', version_timestamp=vts)
|
|
1216
|
+
# undo all changes
|
|
1217
|
+
undone_operations = populated_data_repo.undo_all()
|
|
1218
|
+
# confirm that the number of undone operations is correct
|
|
1219
|
+
assert len(undone_operations) == 3
|
|
1220
|
+
# confirm that the record does not exist
|
|
1221
|
+
assert not populated_data_repo.exists(schema_ref='animal', data_name='test_undo_all')
|
|
1222
|
+
# confirm that the data array does not exist
|
|
1223
|
+
assert not populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test_undo_all')
|
|
1224
|
+
# confirm that the versioned data array does not exist
|
|
1225
|
+
assert not populated_data_repo.exists(schema_ref='spike_waveforms', data_name='test', version_timestamp=vts)
|
|
1226
|
+
|
|
1227
|
+
def test_undo_all_changes_when_there_are_no_changes_to_undo(self, populated_data_repo):
|
|
1228
|
+
undone_operations = populated_data_repo.undo_all()
|
|
1229
|
+
assert len(undone_operations) == 0, f"Should have returned an empty list, but returned: {undone_operations}"
|
|
1230
|
+
|
|
1231
|
+
# test list_marked_for_deletion (test all expected behaviors of list_marked_for_deletion())
|
|
1232
|
+
# ------------------------------------------------------------------------------------------
|
|
1233
|
+
# Category 1: list data objects that are marked for deletion
|
|
1234
|
+
# Test 1.1: list all data objects that are marked for deletion using time_threshold of None
|
|
1235
|
+
# Test 1.2: list data objects that are marked for deletion after a time using a time_threshold
|
|
1236
|
+
# Test 1.3: list all data objects that are marked for deletion using time_threshold of None when there are no data objects marked for deletion (empty list)
|
|
1237
|
+
# Category 2: bad arguments
|
|
1238
|
+
# Test 2.1: time_threshold is not a datetime object (several tests / params) (error)
|
|
1239
|
+
|
|
1240
|
+
def test_list_marked_for_deletion_all(self, populated_data_repo, model_numpy_adapter, timestamp):
|
|
1241
|
+
# mark records for deletion
|
|
1242
|
+
for i in range(1,11):
|
|
1243
|
+
ts = timestamp + timedelta(seconds=i)
|
|
1244
|
+
margin = timedelta(milliseconds=1)
|
|
1245
|
+
lower = ts - margin
|
|
1246
|
+
upper = ts + margin
|
|
1247
|
+
records = populated_data_repo.find(filter={'schema_ref': 'numpy_test', 'data_name': 'numpy_test'})
|
|
1248
|
+
assert any([lower <= r['version_timestamp'] <= upper for r in records]), f"Should have found a record with version_timestamp: {[ts]}, but found only {[r['version_timestamp'] for r in records]}"
|
|
1249
|
+
ohe = populated_data_repo.remove(
|
|
1250
|
+
schema_ref='numpy_test',
|
|
1251
|
+
data_name='numpy_test',
|
|
1252
|
+
version_timestamp=ts,
|
|
1253
|
+
data_adapter=model_numpy_adapter
|
|
1254
|
+
)
|
|
1255
|
+
assert ohe is not None, f"Should have returned an OperationHistoryEntry"
|
|
1256
|
+
assert ohe.has_file == True, f"Should have returned an OperationHistoryEntry with has_file: True"
|
|
1257
|
+
marked_for_deletion = populated_data_repo.list_marked_for_deletion(time_threshold=None)
|
|
1258
|
+
assert len(marked_for_deletion) == 10, f"Should have returned {10} data objects marked for deletion, but returned {len(marked_for_deletion)}"
|
|
1259
|
+
|
|
1260
|
+
@pytest.mark.skip(reason="Need to fix the test")
|
|
1261
|
+
@pytest.mark.parametrize("threshold_delta", range(0, 10))
|
|
1262
|
+
def test_list_marked_for_deletion_after_time(self, populated_data_repo, model_numpy_adapter, timestamp, threshold_delta):
|
|
1263
|
+
# mark records for deletion
|
|
1264
|
+
for i in range(1,11):
|
|
1265
|
+
ts = timestamp + timedelta(seconds=i)
|
|
1266
|
+
ts.replace(microsecond=0)
|
|
1267
|
+
populated_data_repo.remove(
|
|
1268
|
+
schema_ref='numpy_test',
|
|
1269
|
+
data_name='numpy_test',
|
|
1270
|
+
version_timestamp=ts,
|
|
1271
|
+
data_adapter=model_numpy_adapter
|
|
1272
|
+
)
|
|
1273
|
+
# get the time_threshold
|
|
1274
|
+
time_threshold = timestamp + timedelta(seconds=threshold_delta)
|
|
1275
|
+
marked_for_deletion = populated_data_repo.list_marked_for_deletion(time_threshold=time_threshold)
|
|
1276
|
+
assert len(marked_for_deletion) == 10 - threshold_delta, f"Should have returned {10 - threshold_delta} data objects marked for deletion, but returned {len(marked_for_deletion)}"
|
|
1277
|
+
|
|
1278
|
+
def test_list_marked_for_deletion_all_when_there_are_no_data_objects_marked_for_deletion(self, populated_data_repo):
|
|
1279
|
+
marked_for_deletion = populated_data_repo.list_marked_for_deletion(time_threshold=None)
|
|
1280
|
+
assert len(marked_for_deletion) == 0, f"Should have returned an empty list, but returned {marked_for_deletion}"
|
|
1281
|
+
|
|
1282
|
+
@pytest.mark.parametrize("bad_time_threshold", [1, 1.0, [1,2,3], {"x",1,2}, ("a", "b", "c")])
|
|
1283
|
+
def test_list_marked_for_deletion_with_bad_time_threshold(self, populated_data_repo, bad_time_threshold):
|
|
1284
|
+
with pytest.raises(DataRepositoryTypeError):
|
|
1285
|
+
populated_data_repo.list_marked_for_deletion(time_threshold=bad_time_threshold)
|
|
1286
|
+
assert False, f"Should have raised an Exception for time_threshold: {bad_time_threshold}"
|
|
1287
|
+
|
|
1288
|
+
# test purge (test all expected behaviors of purge()) # purges marked for deletion data objects
|
|
1289
|
+
# -----------------------------------------------------------------------------------------------
|
|
1290
|
+
# Category 1: purge data objects that are marked for deletion
|
|
1291
|
+
# Test 1.1: purge all data objects that are marked for deletion using time_threshold of None
|
|
1292
|
+
# Test 1.2: purge data objects that are marked for deletion after a time using a time_threshold
|
|
1293
|
+
# Test 1.3: purge all data objects that are marked for deletion using time_threshold of None when there are no data objects marked for deletion (empty list)
|
|
1294
|
+
# Category 2: bad arguments
|
|
1295
|
+
# Test 2.1: time_threshold is not a datetime object (several tests / params) (error)
|
|
1296
|
+
|
|
1297
|
+
def purge_all(self, populated_data_repo, model_numpy_adapter, timestamp):
|
|
1298
|
+
# mark records for deletion
|
|
1299
|
+
for i in range(1,11):
|
|
1300
|
+
ts = timestamp + timedelta(seconds=i)
|
|
1301
|
+
populated_data_repo.remove(
|
|
1302
|
+
schema_ref='numpy_test',
|
|
1303
|
+
data_name='numpy_test',
|
|
1304
|
+
version_timestamp=ts,
|
|
1305
|
+
data_adapter=model_numpy_adapter
|
|
1306
|
+
)
|
|
1307
|
+
# purge the data objects
|
|
1308
|
+
purged = populated_data_repo.purge(time_threshold=None)
|
|
1309
|
+
assert len(purged) == 10, f"Should have purged {10} data objects, but purged {len(purged)}"
|