bigraph-schema 0.0.71__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of bigraph-schema might be problematic. Click here for more details.
- bigraph_schema/__init__.py +8 -0
- bigraph_schema/edge.py +129 -0
- bigraph_schema/parse.py +165 -0
- bigraph_schema/protocols.py +35 -0
- bigraph_schema/registry.py +287 -0
- bigraph_schema/tests.py +2631 -0
- bigraph_schema/type_functions.py +3379 -0
- bigraph_schema/type_system.py +1798 -0
- bigraph_schema/type_system_adjunct.py +502 -0
- bigraph_schema/units.py +133 -0
- bigraph_schema/utilities.py +243 -0
- bigraph_schema-0.0.71.dist-info/METADATA +52 -0
- bigraph_schema-0.0.71.dist-info/RECORD +17 -0
- bigraph_schema-0.0.71.dist-info/WHEEL +5 -0
- bigraph_schema-0.0.71.dist-info/licenses/AUTHORS.md +6 -0
- bigraph_schema-0.0.71.dist-info/licenses/LICENSE +201 -0
- bigraph_schema-0.0.71.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,3379 @@
|
|
|
1
|
+
"""
|
|
2
|
+
==============
|
|
3
|
+
Type Functions
|
|
4
|
+
==============
|
|
5
|
+
|
|
6
|
+
This module includes various type functions that are essential for handling and
|
|
7
|
+
manipulating different types of schemas and states. These functions are
|
|
8
|
+
categorized based on their functionality and the type of schema they operate
|
|
9
|
+
on. Below is an overview of the type functions included in this module:
|
|
10
|
+
|
|
11
|
+
1. **Apply Functions**:
|
|
12
|
+
- Responsible for applying updates to various types of schemas.
|
|
13
|
+
- Each function handles a specific type of schema and ensures that updates
|
|
14
|
+
are applied correctly.
|
|
15
|
+
|
|
16
|
+
2. **Check Functions**:
|
|
17
|
+
- Responsible for validating the state against various types of schemas.
|
|
18
|
+
- Each function ensures that the state conforms to the expected schema type.
|
|
19
|
+
|
|
20
|
+
3. **Fold Functions**:
|
|
21
|
+
- Responsible for folding the state based on the schema and a given method.
|
|
22
|
+
- Each function handles a specific type of schema and ensures that the
|
|
23
|
+
folding is done correctly.
|
|
24
|
+
|
|
25
|
+
4. **Divide Functions**:
|
|
26
|
+
- Responsible for dividing the state into a number of parts based on the
|
|
27
|
+
schema.
|
|
28
|
+
- Each function handles a specific type of schema and divides the state
|
|
29
|
+
accordingly.
|
|
30
|
+
|
|
31
|
+
5. **Serialize Functions**:
|
|
32
|
+
- Responsible for converting the state into a serializable format based on
|
|
33
|
+
the schema.
|
|
34
|
+
- Each function handles a specific type of schema and ensures that the state
|
|
35
|
+
is serialized correctly.
|
|
36
|
+
|
|
37
|
+
6. **Deserialize Functions**:
|
|
38
|
+
- Responsible for converting serialized data back into the state based on
|
|
39
|
+
the schema.
|
|
40
|
+
- Each function handles a specific type of schema and ensures that the data
|
|
41
|
+
is deserialized correctly.
|
|
42
|
+
|
|
43
|
+
7. **Slice Functions**:
|
|
44
|
+
- Responsible for extracting a part of the state based on the schema and
|
|
45
|
+
path.
|
|
46
|
+
- Each function handles a specific type of schema and ensures that the
|
|
47
|
+
correct part of the state is sliced.
|
|
48
|
+
|
|
49
|
+
8. **Bind Functions**:
|
|
50
|
+
- Responsible for binding a key and its corresponding schema and state to
|
|
51
|
+
the main schema and state.
|
|
52
|
+
- Each function handles a specific type of schema and ensures that the
|
|
53
|
+
binding is done correctly.
|
|
54
|
+
|
|
55
|
+
9. **Resolve Functions**:
|
|
56
|
+
- Responsible for resolving updates to the schema.
|
|
57
|
+
- Each function handles a specific type of schema and ensures that updates
|
|
58
|
+
are resolved correctly.
|
|
59
|
+
|
|
60
|
+
10. **Dataclass Functions**:
|
|
61
|
+
- Responsible for generating dataclass representations of various types of
|
|
62
|
+
schemas.
|
|
63
|
+
- Each function handles a specific type of schema and ensures that the
|
|
64
|
+
dataclass is generated correctly.
|
|
65
|
+
|
|
66
|
+
11. **Default Functions**:
|
|
67
|
+
- Responsible for providing default values for various types of schemas.
|
|
68
|
+
- Each function handles a specific type of schema and ensures that the
|
|
69
|
+
default value is generated correctly.
|
|
70
|
+
|
|
71
|
+
12. **Generate Functions**:
|
|
72
|
+
- Responsible for generating schemas and states based on the provided
|
|
73
|
+
schema and state.
|
|
74
|
+
- Each function handles a specific type of schema and ensures that the
|
|
75
|
+
generation is done correctly.
|
|
76
|
+
|
|
77
|
+
13. **Sort Functions**:
|
|
78
|
+
- Responsible for sorting schemas and states.
|
|
79
|
+
- Each function handles a specific type of schema and ensures that the
|
|
80
|
+
sorting is done correctly.
|
|
81
|
+
|
|
82
|
+
14. **Reaction Functions**:
|
|
83
|
+
- Responsible for handling reactions within the schema and state.
|
|
84
|
+
- Each function processes a specific type of reaction and ensures that the
|
|
85
|
+
state is updated accordingly.
|
|
86
|
+
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
import sys
|
|
91
|
+
import types
|
|
92
|
+
import copy
|
|
93
|
+
import numbers
|
|
94
|
+
import numpy as np
|
|
95
|
+
from abc import ABCMeta
|
|
96
|
+
from pint import Quantity
|
|
97
|
+
from pprint import pformat as pf
|
|
98
|
+
|
|
99
|
+
import typing
|
|
100
|
+
from typing import NewType, Union, Mapping, List, Dict, Optional, Callable
|
|
101
|
+
from dataclasses import field, make_dataclass
|
|
102
|
+
|
|
103
|
+
from bigraph_schema import get_path, set_path
|
|
104
|
+
from bigraph_schema.protocols import local_lookup_module
|
|
105
|
+
from bigraph_schema.units import units, render_units_type
|
|
106
|
+
from bigraph_schema.registry import (
|
|
107
|
+
is_schema_key, non_schema_keys, type_parameter_key, deep_merge, hierarchy_depth, establish_path
|
|
108
|
+
)
|
|
109
|
+
from bigraph_schema.utilities import (
|
|
110
|
+
is_empty, union_keys, tuple_from_type, array_shape, read_datatype, read_shape, remove_path,
|
|
111
|
+
type_parameters_for, visit_method, NONE_SYMBOL
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
# Create a new module dynamically for the dataclasses
|
|
116
|
+
module_name = 'bigraph_schema.data'
|
|
117
|
+
if module_name not in sys.modules:
|
|
118
|
+
data_module = types.ModuleType(module_name)
|
|
119
|
+
sys.modules[module_name] = data_module
|
|
120
|
+
else:
|
|
121
|
+
data_module = sys.modules[module_name]
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
# =========================
|
|
125
|
+
# Apply Functions Overview
|
|
126
|
+
# =========================
|
|
127
|
+
# These functions are responsible for applying updates to various types of
|
|
128
|
+
# schemas. Each function handles a specific type of schema and ensures that
|
|
129
|
+
# updates are applied correctly.
|
|
130
|
+
#
|
|
131
|
+
# Function signature: (schema, current, update, core)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def apply_any(schema, current, update, top_schema, top_state, path, core):
|
|
135
|
+
if isinstance(current, dict):
|
|
136
|
+
return apply_tree(
|
|
137
|
+
current,
|
|
138
|
+
update,
|
|
139
|
+
'tree[any]',
|
|
140
|
+
top_schema=top_schema,
|
|
141
|
+
top_state=top_state,
|
|
142
|
+
path=path,
|
|
143
|
+
core=core)
|
|
144
|
+
else:
|
|
145
|
+
return update
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def apply_tuple(schema, current, update, top_schema, top_state, path, core):
|
|
149
|
+
parameters = core._parameters_for(schema)
|
|
150
|
+
result = []
|
|
151
|
+
|
|
152
|
+
for parameter, current_value, update_value in zip(parameters, current, update):
|
|
153
|
+
element = core.apply_update(
|
|
154
|
+
parameter,
|
|
155
|
+
current_value,
|
|
156
|
+
update_value,
|
|
157
|
+
top_schema=top_schema,
|
|
158
|
+
top_state=top_state,
|
|
159
|
+
path=path)
|
|
160
|
+
|
|
161
|
+
result.append(element)
|
|
162
|
+
|
|
163
|
+
return tuple(result)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def apply_union(schema, current, update, top_schema, top_state, path, core):
|
|
167
|
+
current_type = find_union_type(
|
|
168
|
+
core,
|
|
169
|
+
schema,
|
|
170
|
+
current)
|
|
171
|
+
|
|
172
|
+
update_type = find_union_type(
|
|
173
|
+
core,
|
|
174
|
+
schema,
|
|
175
|
+
update)
|
|
176
|
+
|
|
177
|
+
if current_type is None:
|
|
178
|
+
raise Exception('\n '.join([
|
|
179
|
+
'trying to apply update to union value but cannot find type of'
|
|
180
|
+
'value in the union',
|
|
181
|
+
f'value: {current}',
|
|
182
|
+
f'update: {update}',
|
|
183
|
+
f'union: {list(bindings.values())}']))
|
|
184
|
+
elif update_type is None:
|
|
185
|
+
raise Exception('\n '.join([
|
|
186
|
+
'trying to apply update to union value but cannot find type of'
|
|
187
|
+
'update in the union',
|
|
188
|
+
f'value: {current}',
|
|
189
|
+
f'update: {update}',
|
|
190
|
+
f'union: {list(bindings.values())}']))
|
|
191
|
+
|
|
192
|
+
# TODO: throw an exception if current_type is incompatible with update_type
|
|
193
|
+
|
|
194
|
+
return core.apply_update(
|
|
195
|
+
update_type,
|
|
196
|
+
current,
|
|
197
|
+
update,
|
|
198
|
+
top_schema=top_schema,
|
|
199
|
+
top_state=top_state,
|
|
200
|
+
path=path)
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def set_apply(schema, current, update, top_schema, top_state, path, core):
|
|
204
|
+
if isinstance(current, dict) and isinstance(update, dict):
|
|
205
|
+
for key, value in update.items():
|
|
206
|
+
# TODO: replace this with type specific functions (??)
|
|
207
|
+
if key in schema:
|
|
208
|
+
subschema = schema[key]
|
|
209
|
+
elif '_leaf' in schema:
|
|
210
|
+
if core.check(schema['_leaf'], value):
|
|
211
|
+
subschema = schema['_leaf']
|
|
212
|
+
else:
|
|
213
|
+
subschema = schema
|
|
214
|
+
elif '_value' in schema:
|
|
215
|
+
subschema = schema['_value']
|
|
216
|
+
|
|
217
|
+
current[key] = set_apply(
|
|
218
|
+
subschema,
|
|
219
|
+
current.get(key),
|
|
220
|
+
value,
|
|
221
|
+
top_schema,
|
|
222
|
+
top_state,
|
|
223
|
+
path,
|
|
224
|
+
core)
|
|
225
|
+
|
|
226
|
+
return current
|
|
227
|
+
else:
|
|
228
|
+
return update
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def accumulate(schema, current, update, top_schema, top_state, path, core):
|
|
232
|
+
if current is None:
|
|
233
|
+
return update
|
|
234
|
+
if update is None:
|
|
235
|
+
return current
|
|
236
|
+
else:
|
|
237
|
+
return current + update
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def concatenate(schema, current, update, top_schema, top_state, path, core=None):
|
|
241
|
+
return current + update
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def replace(schema, current, update, top_schema, top_state, path, core=None):
|
|
245
|
+
return update
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def apply_schema(schema, current, update, top_schema, top_state, path, core):
|
|
249
|
+
"""
|
|
250
|
+
Apply an update to a schema, returning the new schema
|
|
251
|
+
"""
|
|
252
|
+
outcome = core.resolve_schemas(current, update)
|
|
253
|
+
return outcome
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def apply_tree(schema, current, update, top_schema, top_state, path, core):
|
|
257
|
+
leaf_type = core._find_parameter(
|
|
258
|
+
schema,
|
|
259
|
+
'leaf')
|
|
260
|
+
|
|
261
|
+
if current is None:
|
|
262
|
+
current = core.default(leaf_type)
|
|
263
|
+
|
|
264
|
+
if isinstance(current, dict) and isinstance(update, dict):
|
|
265
|
+
for key, branch in update.items():
|
|
266
|
+
if key == '_add':
|
|
267
|
+
current.update(branch)
|
|
268
|
+
elif key == '_remove':
|
|
269
|
+
for removed_path in branch:
|
|
270
|
+
if isinstance(removed_path, str):
|
|
271
|
+
removed_path = [removed_path]
|
|
272
|
+
current = remove_path(current, removed_path)
|
|
273
|
+
elif isinstance(branch, dict):
|
|
274
|
+
subschema = schema
|
|
275
|
+
if key in schema:
|
|
276
|
+
subschema = schema[key]
|
|
277
|
+
|
|
278
|
+
current[key] = core.apply_update(
|
|
279
|
+
subschema,
|
|
280
|
+
current.get(key),
|
|
281
|
+
branch,
|
|
282
|
+
top_schema=top_schema,
|
|
283
|
+
top_state=top_state,
|
|
284
|
+
path=path + [key])
|
|
285
|
+
|
|
286
|
+
elif core.check(leaf_type, branch):
|
|
287
|
+
current[key] = core.apply_update(
|
|
288
|
+
leaf_type,
|
|
289
|
+
current.get(key),
|
|
290
|
+
branch,
|
|
291
|
+
top_schema=top_schema,
|
|
292
|
+
top_state=top_state,
|
|
293
|
+
path=path + [key])
|
|
294
|
+
|
|
295
|
+
else:
|
|
296
|
+
raise Exception('\n '.join([
|
|
297
|
+
'state does not seem to be of leaf type:',
|
|
298
|
+
f'state: {state}',
|
|
299
|
+
f'leaf type: {leaf_type}']))
|
|
300
|
+
|
|
301
|
+
return current
|
|
302
|
+
|
|
303
|
+
elif core.check(leaf_type, current):
|
|
304
|
+
return core.apply_update(
|
|
305
|
+
leaf_type,
|
|
306
|
+
current,
|
|
307
|
+
update,
|
|
308
|
+
top_schema=top_schema,
|
|
309
|
+
top_state=top_state,
|
|
310
|
+
path=path)
|
|
311
|
+
|
|
312
|
+
else:
|
|
313
|
+
raise Exception('\n '.join([
|
|
314
|
+
'trying to apply an update to a tree but the values are not'
|
|
315
|
+
'trees or leaves of that tree\ncurrent:',
|
|
316
|
+
f'{pf(current)}\nupdate:',
|
|
317
|
+
f'{pf(update)}\nschema:',
|
|
318
|
+
f'{pf(schema)}']))
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def apply_boolean(schema, current: bool, update: bool, top_schema, top_state, path, core=None) -> bool:
|
|
322
|
+
"""
|
|
323
|
+
Performs a bit flip if `current` does not match `update`, returning update.
|
|
324
|
+
Returns current if they match.
|
|
325
|
+
"""
|
|
326
|
+
if current != update:
|
|
327
|
+
return update
|
|
328
|
+
else:
|
|
329
|
+
return current
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def apply_list(schema, current, update, top_schema, top_state, path, core):
|
|
333
|
+
element_type = core._find_parameter(
|
|
334
|
+
schema,
|
|
335
|
+
'element')
|
|
336
|
+
|
|
337
|
+
if current is None:
|
|
338
|
+
current = []
|
|
339
|
+
|
|
340
|
+
if core.check(element_type, update):
|
|
341
|
+
result = current + [update]
|
|
342
|
+
return result
|
|
343
|
+
|
|
344
|
+
elif isinstance(update, list):
|
|
345
|
+
result = current + update
|
|
346
|
+
# for current_element, update_element in zip(current, update):
|
|
347
|
+
# applied = core.apply(
|
|
348
|
+
# element_type,
|
|
349
|
+
# current_element,
|
|
350
|
+
# update_element)
|
|
351
|
+
# result.append(applied)
|
|
352
|
+
|
|
353
|
+
return result
|
|
354
|
+
else:
|
|
355
|
+
raise Exception('\n '.join([
|
|
356
|
+
'trying to apply an update to an existing list, but the update'
|
|
357
|
+
'is not a list or of element type:',
|
|
358
|
+
f'update: {update}',
|
|
359
|
+
f'element type: {pf(element_type)}']))
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def apply_map(schema, current, update, top_schema, top_state, path, core=None):
|
|
363
|
+
if update is None:
|
|
364
|
+
return current
|
|
365
|
+
|
|
366
|
+
if not isinstance(current, dict):
|
|
367
|
+
raise Exception('\n '.join([
|
|
368
|
+
'trying to apply an update to a value that is not a map:',
|
|
369
|
+
f'value: {current}',
|
|
370
|
+
f'update: {update}']))
|
|
371
|
+
if not isinstance(update, dict):
|
|
372
|
+
raise Exception('\n '.join([
|
|
373
|
+
'trying to apply an update that is not a map:',
|
|
374
|
+
f'value: {current}',
|
|
375
|
+
f'update: {update}']))
|
|
376
|
+
|
|
377
|
+
value_type = core._find_parameter(
|
|
378
|
+
schema,
|
|
379
|
+
'value')
|
|
380
|
+
|
|
381
|
+
result = current.copy()
|
|
382
|
+
|
|
383
|
+
for key, update_value in update.items():
|
|
384
|
+
if key == '_add':
|
|
385
|
+
for addition_key, addition in update_value.items():
|
|
386
|
+
|
|
387
|
+
_, generated_state, top_schema, top_state = core._generate_recur(
|
|
388
|
+
value_type,
|
|
389
|
+
addition,
|
|
390
|
+
top_schema=top_schema,
|
|
391
|
+
top_state=top_state,
|
|
392
|
+
path=path + [addition_key])
|
|
393
|
+
|
|
394
|
+
result[addition_key] = generated_state
|
|
395
|
+
|
|
396
|
+
elif key == '_remove':
|
|
397
|
+
for remove_key in update_value:
|
|
398
|
+
if remove_key in result:
|
|
399
|
+
del result[remove_key]
|
|
400
|
+
|
|
401
|
+
elif key not in current:
|
|
402
|
+
# # This supports adding without the '_add' key, if the key is not in
|
|
403
|
+
# the state
|
|
404
|
+
# _, generated_state, top_schema, top_state = core._generate_recur(
|
|
405
|
+
# value_type,
|
|
406
|
+
# update_value,
|
|
407
|
+
# top_schema=top_schema,
|
|
408
|
+
# top_state=top_state,
|
|
409
|
+
# path=path + [key])
|
|
410
|
+
|
|
411
|
+
# result[key] = generated_state
|
|
412
|
+
|
|
413
|
+
# # Or raise an exception
|
|
414
|
+
# raise Exception(f'trying to update a key that does not exist:\n value: {current}\n update: {update}')
|
|
415
|
+
|
|
416
|
+
pass
|
|
417
|
+
else:
|
|
418
|
+
result[key] = core.apply_update(
|
|
419
|
+
value_type,
|
|
420
|
+
result[key],
|
|
421
|
+
update_value,
|
|
422
|
+
top_schema=top_schema,
|
|
423
|
+
top_state=top_state,
|
|
424
|
+
path=path + [key])
|
|
425
|
+
|
|
426
|
+
return result
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
def apply_maybe(schema, current, update, top_schema, top_state, path, core):
|
|
430
|
+
if current is None or update is None:
|
|
431
|
+
return update
|
|
432
|
+
else:
|
|
433
|
+
value_type = core._find_parameter(
|
|
434
|
+
schema,
|
|
435
|
+
'value')
|
|
436
|
+
|
|
437
|
+
return core.apply_update(
|
|
438
|
+
value_type,
|
|
439
|
+
current,
|
|
440
|
+
update,
|
|
441
|
+
top_schema=top_schema,
|
|
442
|
+
top_state=top_state,
|
|
443
|
+
path=path)
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
def apply_mark(schema, current, update, top_schema, top_state, path, core):
|
|
447
|
+
return update
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
def check_mark(schema, state, core):
|
|
451
|
+
return isinstance(state, (int, str, type(None)))
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
def deserialize_mark(schema, value, core):
|
|
455
|
+
try:
|
|
456
|
+
return int(value)
|
|
457
|
+
except Exception as e:
|
|
458
|
+
return value
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
def resolve_mark(schema, other, core):
|
|
462
|
+
return schema
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
def apply_path(schema, current, update, top_schema, top_state, path, core):
|
|
466
|
+
# paths replace previous paths
|
|
467
|
+
return update
|
|
468
|
+
|
|
469
|
+
|
|
470
|
+
def apply_edge(schema, current, update, top_schema, top_state, path, core):
|
|
471
|
+
result = current.copy()
|
|
472
|
+
result['inputs'] = core.apply_update(
|
|
473
|
+
'wires',
|
|
474
|
+
current.get('inputs'),
|
|
475
|
+
update.get('inputs'),
|
|
476
|
+
top_schema=top_schema,
|
|
477
|
+
top_state=top_state,
|
|
478
|
+
path=path)
|
|
479
|
+
|
|
480
|
+
result['outputs'] = core.apply_update(
|
|
481
|
+
'wires',
|
|
482
|
+
current.get('outputs'),
|
|
483
|
+
update.get('outputs'),
|
|
484
|
+
top_schema=top_schema,
|
|
485
|
+
top_state=top_state,
|
|
486
|
+
path=path)
|
|
487
|
+
|
|
488
|
+
return result
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
# TODO: deal with all the different unit core
|
|
492
|
+
def apply_units(schema, current, update, top_schema, top_state, path, core):
|
|
493
|
+
return current + update
|
|
494
|
+
|
|
495
|
+
|
|
496
|
+
def apply_enum(schema, current, update, top_schema, top_state, path, core):
|
|
497
|
+
parameters = core._parameters_for(schema)
|
|
498
|
+
if update in parameters:
|
|
499
|
+
return update
|
|
500
|
+
else:
|
|
501
|
+
raise Exception(f'{update} is not in the enum, '
|
|
502
|
+
f'options are: {parameters}')
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
def apply_array(schema, current, update, top_schema, top_state, path, core):
|
|
506
|
+
if isinstance(update, dict):
|
|
507
|
+
paths = hierarchy_depth(update)
|
|
508
|
+
for path, inner_update in paths.items():
|
|
509
|
+
if len(path) > len(schema['_shape']):
|
|
510
|
+
raise Exception('index is too large for array update: '
|
|
511
|
+
f'{path}\n {schema}')
|
|
512
|
+
else:
|
|
513
|
+
index = tuple(path)
|
|
514
|
+
current[index] += inner_update
|
|
515
|
+
|
|
516
|
+
return current
|
|
517
|
+
else:
|
|
518
|
+
return current + update
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
def apply_function(schema, current, update, top_schema, top_state, path, core):
|
|
522
|
+
def compose(a):
|
|
523
|
+
return update(
|
|
524
|
+
current(
|
|
525
|
+
a))
|
|
526
|
+
|
|
527
|
+
return compose
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def apply_meta(schema, current, update, top_schema, top_state, path, core):
|
|
531
|
+
return update
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
# =========================
|
|
535
|
+
# Check Functions Overview
|
|
536
|
+
# =========================
|
|
537
|
+
# These functions are responsible for validating the state against various
|
|
538
|
+
# types of schemas.
|
|
539
|
+
#
|
|
540
|
+
# Each function ensures that the state conforms to the expected schema type.
|
|
541
|
+
#
|
|
542
|
+
# Function signature: (schema, state, core)
|
|
543
|
+
|
|
544
|
+
|
|
545
|
+
def check_any(schema, state, core):
|
|
546
|
+
if isinstance(schema, dict):
|
|
547
|
+
for key, subschema in schema.items():
|
|
548
|
+
if not key.startswith('_'):
|
|
549
|
+
if isinstance(state, dict):
|
|
550
|
+
if key in state:
|
|
551
|
+
check = core.check_state(
|
|
552
|
+
subschema,
|
|
553
|
+
state[key])
|
|
554
|
+
|
|
555
|
+
if not check:
|
|
556
|
+
return False
|
|
557
|
+
else:
|
|
558
|
+
return False
|
|
559
|
+
else:
|
|
560
|
+
return False
|
|
561
|
+
|
|
562
|
+
return True
|
|
563
|
+
else:
|
|
564
|
+
return True
|
|
565
|
+
|
|
566
|
+
|
|
567
|
+
def check_tuple(schema, state, core):
|
|
568
|
+
if not isinstance(state, (tuple, list)):
|
|
569
|
+
return False
|
|
570
|
+
|
|
571
|
+
parameters = core._parameters_for(schema)
|
|
572
|
+
for parameter, element in zip(parameters, state):
|
|
573
|
+
if not core.check(parameter, element):
|
|
574
|
+
return False
|
|
575
|
+
|
|
576
|
+
return True
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
def check_union(schema, state, core):
|
|
580
|
+
found = find_union_type(
|
|
581
|
+
core,
|
|
582
|
+
schema,
|
|
583
|
+
state)
|
|
584
|
+
|
|
585
|
+
return found is not None and len(found) > 0
|
|
586
|
+
|
|
587
|
+
|
|
588
|
+
def check_number(schema, state, core=None):
|
|
589
|
+
return isinstance(state, numbers.Number)
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
def check_boolean(schema, state, core=None):
|
|
593
|
+
return isinstance(state, bool)
|
|
594
|
+
|
|
595
|
+
|
|
596
|
+
def check_integer(schema, state, core=None):
|
|
597
|
+
return isinstance(state, int) and not isinstance(state, bool)
|
|
598
|
+
|
|
599
|
+
|
|
600
|
+
def check_float(schema, state, core=None):
|
|
601
|
+
return isinstance(state, float)
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
def check_string(schema, state, core=None):
|
|
605
|
+
return isinstance(state, str)
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
class Empty():
|
|
609
|
+
def method(self):
|
|
610
|
+
pass
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
FUNCTION_TYPE = type(check_string)
|
|
614
|
+
METHOD_TYPE = type(Empty().method)
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
def check_function(schema, state, core=None):
|
|
618
|
+
return isinstance(state, FUNCTION_TYPE)
|
|
619
|
+
|
|
620
|
+
|
|
621
|
+
def check_method(schema, state, core=None):
|
|
622
|
+
return isinstance(state, METHOD_TYPE)
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
def check_meta(schema, state, core=None):
|
|
626
|
+
return isinstance(state, ABCMeta)
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
def check_list(schema, state, core):
|
|
630
|
+
element_type = core._find_parameter(
|
|
631
|
+
schema,
|
|
632
|
+
'element')
|
|
633
|
+
|
|
634
|
+
if isinstance(state, list):
|
|
635
|
+
for element in state:
|
|
636
|
+
check = core.check(
|
|
637
|
+
element_type,
|
|
638
|
+
element)
|
|
639
|
+
|
|
640
|
+
if not check:
|
|
641
|
+
return False
|
|
642
|
+
|
|
643
|
+
return True
|
|
644
|
+
else:
|
|
645
|
+
return False
|
|
646
|
+
|
|
647
|
+
|
|
648
|
+
def check_tree(schema, state, core):
|
|
649
|
+
leaf_type = core._find_parameter(
|
|
650
|
+
schema,
|
|
651
|
+
'leaf')
|
|
652
|
+
|
|
653
|
+
if isinstance(state, dict):
|
|
654
|
+
for key, value in state.items():
|
|
655
|
+
check = core.check({
|
|
656
|
+
'_type': 'tree',
|
|
657
|
+
'_leaf': leaf_type},
|
|
658
|
+
value)
|
|
659
|
+
|
|
660
|
+
if not check:
|
|
661
|
+
return core.check(
|
|
662
|
+
leaf_type,
|
|
663
|
+
value)
|
|
664
|
+
|
|
665
|
+
return True
|
|
666
|
+
else:
|
|
667
|
+
return core.check(leaf_type, state)
|
|
668
|
+
|
|
669
|
+
|
|
670
|
+
def check_map(schema, state, core=None):
|
|
671
|
+
value_type = core._find_parameter(
|
|
672
|
+
schema,
|
|
673
|
+
'value')
|
|
674
|
+
|
|
675
|
+
if not isinstance(state, dict):
|
|
676
|
+
return False
|
|
677
|
+
|
|
678
|
+
for key, substate in state.items():
|
|
679
|
+
if not core.check(value_type, substate):
|
|
680
|
+
return False
|
|
681
|
+
|
|
682
|
+
return True
|
|
683
|
+
|
|
684
|
+
|
|
685
|
+
def check_ports(state, core, key):
|
|
686
|
+
return key in state and core.check(
|
|
687
|
+
'wires',
|
|
688
|
+
state[key])
|
|
689
|
+
|
|
690
|
+
|
|
691
|
+
def check_edge(schema, state, core):
|
|
692
|
+
return isinstance(state, dict) \
|
|
693
|
+
and check_ports(state, core, 'inputs') \
|
|
694
|
+
and check_ports(state, core, 'outputs')
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
def check_maybe(schema, state, core):
|
|
698
|
+
if state is None:
|
|
699
|
+
return True
|
|
700
|
+
else:
|
|
701
|
+
value_type = core._find_parameter(
|
|
702
|
+
schema,
|
|
703
|
+
'value')
|
|
704
|
+
|
|
705
|
+
return core.check(value_type, state)
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
def check_array(schema, state, core):
|
|
709
|
+
shape_type = core._find_parameter(
|
|
710
|
+
schema,
|
|
711
|
+
'shape')
|
|
712
|
+
|
|
713
|
+
return isinstance(state, np.ndarray) \
|
|
714
|
+
and state.shape == array_shape(core, shape_type)
|
|
715
|
+
# and state.dtype == bindings['data']
|
|
716
|
+
|
|
717
|
+
# TODO align numpy data types so we can validate the types of the
|
|
718
|
+
# arrays
|
|
719
|
+
|
|
720
|
+
|
|
721
|
+
def check_enum(schema, state, core):
|
|
722
|
+
if not isinstance(state, str):
|
|
723
|
+
return False
|
|
724
|
+
|
|
725
|
+
parameters = core._parameters_for(schema)
|
|
726
|
+
return state in parameters
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
def check_units(schema, state, core):
|
|
730
|
+
# TODO: expand this to check the actual units for compatibility
|
|
731
|
+
return isinstance(state, Quantity)
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
# =========================
|
|
735
|
+
# Fold Functions Overview
|
|
736
|
+
# =========================
|
|
737
|
+
# These functions are responsible for folding the state based on the schema and
|
|
738
|
+
# a given method.
|
|
739
|
+
#
|
|
740
|
+
# Each function handles a specific type of schema and ensures that the folding
|
|
741
|
+
# is done correctly.
|
|
742
|
+
#
|
|
743
|
+
# In functional programming, a fold is a higher-order function that processes a
|
|
744
|
+
# data structure in some order and builds a return value.
|
|
745
|
+
#
|
|
746
|
+
# Function signature: (schema, state, method, values, core)
|
|
747
|
+
|
|
748
|
+
|
|
749
|
+
def fold_any(schema, state, method, values, core):
|
|
750
|
+
if isinstance(state, dict):
|
|
751
|
+
result = {}
|
|
752
|
+
for key, value in state.items():
|
|
753
|
+
if key.startswith('_'):
|
|
754
|
+
result[key] = value
|
|
755
|
+
else:
|
|
756
|
+
if key in schema:
|
|
757
|
+
fold = core.fold_state(
|
|
758
|
+
schema[key],
|
|
759
|
+
value,
|
|
760
|
+
method,
|
|
761
|
+
values)
|
|
762
|
+
result[key] = fold
|
|
763
|
+
|
|
764
|
+
else:
|
|
765
|
+
result = state
|
|
766
|
+
|
|
767
|
+
visit = visit_method(
|
|
768
|
+
schema,
|
|
769
|
+
result,
|
|
770
|
+
method,
|
|
771
|
+
values,
|
|
772
|
+
core)
|
|
773
|
+
|
|
774
|
+
return visit
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
def fold_tuple(schema, state, method, values, core):
|
|
778
|
+
if not isinstance(state, (tuple, list)):
|
|
779
|
+
return visit_method(
|
|
780
|
+
schema,
|
|
781
|
+
state,
|
|
782
|
+
method,
|
|
783
|
+
values,
|
|
784
|
+
core)
|
|
785
|
+
else:
|
|
786
|
+
parameters = core._parameters_for(schema)
|
|
787
|
+
result = []
|
|
788
|
+
for parameter, element in zip(parameters, state):
|
|
789
|
+
fold = core.fold(
|
|
790
|
+
parameter,
|
|
791
|
+
element,
|
|
792
|
+
method,
|
|
793
|
+
values)
|
|
794
|
+
result.append(fold)
|
|
795
|
+
|
|
796
|
+
result = tuple(result)
|
|
797
|
+
|
|
798
|
+
return visit_method(
|
|
799
|
+
schema,
|
|
800
|
+
result,
|
|
801
|
+
method,
|
|
802
|
+
values,
|
|
803
|
+
core)
|
|
804
|
+
|
|
805
|
+
|
|
806
|
+
def fold_union(schema, state, method, values, core):
|
|
807
|
+
union_type = find_union_type(
|
|
808
|
+
core,
|
|
809
|
+
schema,
|
|
810
|
+
state)
|
|
811
|
+
|
|
812
|
+
result = core.fold(
|
|
813
|
+
union_type,
|
|
814
|
+
state,
|
|
815
|
+
method,
|
|
816
|
+
values)
|
|
817
|
+
|
|
818
|
+
return result
|
|
819
|
+
|
|
820
|
+
|
|
821
|
+
def fold_list(schema, state, method, values, core):
|
|
822
|
+
element_type = core._find_parameter(
|
|
823
|
+
schema,
|
|
824
|
+
'element')
|
|
825
|
+
|
|
826
|
+
if core.check(element_type, state):
|
|
827
|
+
result = core.fold(
|
|
828
|
+
element_type,
|
|
829
|
+
state,
|
|
830
|
+
method,
|
|
831
|
+
values)
|
|
832
|
+
|
|
833
|
+
elif isinstance(state, list):
|
|
834
|
+
subresult = [
|
|
835
|
+
fold_list(
|
|
836
|
+
schema,
|
|
837
|
+
element,
|
|
838
|
+
method,
|
|
839
|
+
values,
|
|
840
|
+
core)
|
|
841
|
+
for element in state]
|
|
842
|
+
|
|
843
|
+
result = visit_method(
|
|
844
|
+
schema,
|
|
845
|
+
subresult,
|
|
846
|
+
method,
|
|
847
|
+
values,
|
|
848
|
+
core)
|
|
849
|
+
|
|
850
|
+
else:
|
|
851
|
+
raise Exception('\n '.join([
|
|
852
|
+
'state does not seem to be a list or an element:',
|
|
853
|
+
f'state: {state}',
|
|
854
|
+
f'schema: {schema}']))
|
|
855
|
+
|
|
856
|
+
return result
|
|
857
|
+
|
|
858
|
+
|
|
859
|
+
def fold_tree(schema, state, method, values, core):
|
|
860
|
+
leaf_type = core._find_parameter(
|
|
861
|
+
schema,
|
|
862
|
+
'leaf')
|
|
863
|
+
|
|
864
|
+
if core.check(leaf_type, state):
|
|
865
|
+
result = core.fold(
|
|
866
|
+
leaf_type,
|
|
867
|
+
state,
|
|
868
|
+
method,
|
|
869
|
+
values)
|
|
870
|
+
|
|
871
|
+
elif isinstance(state, dict):
|
|
872
|
+
subresult = {}
|
|
873
|
+
|
|
874
|
+
for key, branch in state.items():
|
|
875
|
+
if key.startswith('_'):
|
|
876
|
+
subresult[key] = branch
|
|
877
|
+
else:
|
|
878
|
+
subresult[key] = fold_tree(
|
|
879
|
+
schema[key] if key in schema else schema,
|
|
880
|
+
branch,
|
|
881
|
+
method,
|
|
882
|
+
values,
|
|
883
|
+
core)
|
|
884
|
+
|
|
885
|
+
result = visit_method(
|
|
886
|
+
schema,
|
|
887
|
+
subresult,
|
|
888
|
+
method,
|
|
889
|
+
values,
|
|
890
|
+
core)
|
|
891
|
+
|
|
892
|
+
else:
|
|
893
|
+
raise Exception('\n '.join([
|
|
894
|
+
'state does not seem to be a tree or a leaf:',
|
|
895
|
+
f'state: {state}',
|
|
896
|
+
f'schema: {schema}']))
|
|
897
|
+
|
|
898
|
+
return result
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
def fold_map(schema, state, method, values, core):
|
|
902
|
+
value_type = core._find_parameter(
|
|
903
|
+
schema,
|
|
904
|
+
'value')
|
|
905
|
+
|
|
906
|
+
subresult = {}
|
|
907
|
+
|
|
908
|
+
for key, value in state.items():
|
|
909
|
+
subresult[key] = core.fold(
|
|
910
|
+
value_type,
|
|
911
|
+
value,
|
|
912
|
+
method,
|
|
913
|
+
values)
|
|
914
|
+
|
|
915
|
+
result = visit_method(
|
|
916
|
+
schema,
|
|
917
|
+
subresult,
|
|
918
|
+
method,
|
|
919
|
+
values,
|
|
920
|
+
core)
|
|
921
|
+
|
|
922
|
+
return result
|
|
923
|
+
|
|
924
|
+
|
|
925
|
+
def fold_maybe(schema, state, method, values, core):
|
|
926
|
+
value_type = core._find_parameter(
|
|
927
|
+
schema,
|
|
928
|
+
'value')
|
|
929
|
+
|
|
930
|
+
if state is None:
|
|
931
|
+
result = core.fold(
|
|
932
|
+
'any',
|
|
933
|
+
state,
|
|
934
|
+
method,
|
|
935
|
+
values)
|
|
936
|
+
|
|
937
|
+
else:
|
|
938
|
+
result = core.fold(
|
|
939
|
+
value_type,
|
|
940
|
+
state,
|
|
941
|
+
method,
|
|
942
|
+
values)
|
|
943
|
+
|
|
944
|
+
return result
|
|
945
|
+
|
|
946
|
+
|
|
947
|
+
def fold_enum(schema, state, method, values, core):
|
|
948
|
+
if not isinstance(state, (tuple, list)):
|
|
949
|
+
return visit_method(
|
|
950
|
+
schema,
|
|
951
|
+
state,
|
|
952
|
+
method,
|
|
953
|
+
values,
|
|
954
|
+
core)
|
|
955
|
+
else:
|
|
956
|
+
parameters = core._parameters_for(schema)
|
|
957
|
+
result = []
|
|
958
|
+
for parameter, element in zip(parameters, state):
|
|
959
|
+
fold = core.fold(
|
|
960
|
+
parameter,
|
|
961
|
+
element,
|
|
962
|
+
method,
|
|
963
|
+
values)
|
|
964
|
+
result.append(fold)
|
|
965
|
+
|
|
966
|
+
result = tuple(result)
|
|
967
|
+
|
|
968
|
+
return visit_method(
|
|
969
|
+
schema,
|
|
970
|
+
result,
|
|
971
|
+
method,
|
|
972
|
+
values,
|
|
973
|
+
core)
|
|
974
|
+
|
|
975
|
+
|
|
976
|
+
# ==========================
|
|
977
|
+
# Divide Functions Overview
|
|
978
|
+
# ==========================
|
|
979
|
+
# These functions are responsible for dividing the state into a number of parts
|
|
980
|
+
# based on the schema.
|
|
981
|
+
#
|
|
982
|
+
# Each function handles a specific type of schema and divides the state
|
|
983
|
+
# accordingly.
|
|
984
|
+
#
|
|
985
|
+
# Function signature: (schema, state, values, core)
|
|
986
|
+
|
|
987
|
+
|
|
988
|
+
def divide_any(schema, state, values, core):
|
|
989
|
+
divisions = values.get('divisions', 2)
|
|
990
|
+
|
|
991
|
+
if isinstance(state, dict):
|
|
992
|
+
result = [
|
|
993
|
+
{}
|
|
994
|
+
for _ in range(divisions)]
|
|
995
|
+
|
|
996
|
+
for key, value in state.items():
|
|
997
|
+
for index in range(divisions):
|
|
998
|
+
result[index][key] = value[index]
|
|
999
|
+
|
|
1000
|
+
return result
|
|
1001
|
+
|
|
1002
|
+
else:
|
|
1003
|
+
# TODO: division operates on and returns dictionaries
|
|
1004
|
+
# return {
|
|
1005
|
+
# id: copy.deepcopy(state),
|
|
1006
|
+
# for generate_new_id(existing_id, division) in range(divisions)}
|
|
1007
|
+
# ?????
|
|
1008
|
+
|
|
1009
|
+
return [
|
|
1010
|
+
copy.deepcopy(state)
|
|
1011
|
+
for _ in range(divisions)]
|
|
1012
|
+
|
|
1013
|
+
|
|
1014
|
+
def divide_tuple(schema, state, values, core):
|
|
1015
|
+
divisions = values.get('divisions', 2)
|
|
1016
|
+
|
|
1017
|
+
return [
|
|
1018
|
+
tuple([item[index] for item in state])
|
|
1019
|
+
for index in range(divisions)]
|
|
1020
|
+
|
|
1021
|
+
|
|
1022
|
+
def divide_float(schema, state, values, core):
|
|
1023
|
+
divisions = values.get('divisions', 2)
|
|
1024
|
+
portion = float(state) / divisions
|
|
1025
|
+
return [
|
|
1026
|
+
portion
|
|
1027
|
+
for _ in range(divisions)]
|
|
1028
|
+
|
|
1029
|
+
|
|
1030
|
+
# support function core for registries?
|
|
1031
|
+
def divide_integer(schema, value, values, core):
|
|
1032
|
+
half = value // 2
|
|
1033
|
+
other_half = half
|
|
1034
|
+
if value % 2 == 1:
|
|
1035
|
+
other_half += 1
|
|
1036
|
+
return [half, other_half]
|
|
1037
|
+
|
|
1038
|
+
|
|
1039
|
+
def divide_longest(schema, dimensions, values, core):
|
|
1040
|
+
# any way to declare the required keys for this function in the registry?
|
|
1041
|
+
# find a way to ask a function what type its domain and codomain are
|
|
1042
|
+
|
|
1043
|
+
width = dimensions['width']
|
|
1044
|
+
height = dimensions['height']
|
|
1045
|
+
|
|
1046
|
+
if width > height:
|
|
1047
|
+
a, b = divide_integer(width)
|
|
1048
|
+
return [{'width': a, 'height': height}, {'width': b, 'height': height}]
|
|
1049
|
+
else:
|
|
1050
|
+
x, y = divide_integer(height)
|
|
1051
|
+
return [{'width': width, 'height': x}, {'width': width, 'height': y}]
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
def divide_reaction(schema, state, reaction, core):
|
|
1055
|
+
mother = reaction['mother']
|
|
1056
|
+
daughters = reaction['daughters']
|
|
1057
|
+
|
|
1058
|
+
mother_schema, mother_state = core.slice(
|
|
1059
|
+
schema,
|
|
1060
|
+
state,
|
|
1061
|
+
mother)
|
|
1062
|
+
|
|
1063
|
+
division = core.fold(
|
|
1064
|
+
mother_schema,
|
|
1065
|
+
mother_state,
|
|
1066
|
+
'divide', {
|
|
1067
|
+
'divisions': len(daughters),
|
|
1068
|
+
'daughter_configs': [daughter[1] for daughter in daughters]})
|
|
1069
|
+
|
|
1070
|
+
after = {
|
|
1071
|
+
daughter[0]: daughter_state
|
|
1072
|
+
for daughter, daughter_state in zip(daughters, division)}
|
|
1073
|
+
|
|
1074
|
+
replace = {
|
|
1075
|
+
'before': {
|
|
1076
|
+
mother: {}},
|
|
1077
|
+
'after': after}
|
|
1078
|
+
|
|
1079
|
+
return replace_reaction(
|
|
1080
|
+
schema,
|
|
1081
|
+
state,
|
|
1082
|
+
replace,
|
|
1083
|
+
core)
|
|
1084
|
+
|
|
1085
|
+
|
|
1086
|
+
def divide_list(schema, state, values, core):
|
|
1087
|
+
element_type = core._find_parameter(
|
|
1088
|
+
schema,
|
|
1089
|
+
'element')
|
|
1090
|
+
|
|
1091
|
+
if core.check(element_type, state):
|
|
1092
|
+
return core.fold(
|
|
1093
|
+
element_type,
|
|
1094
|
+
state,
|
|
1095
|
+
'divide',
|
|
1096
|
+
values)
|
|
1097
|
+
|
|
1098
|
+
elif isinstance(state, list):
|
|
1099
|
+
divisions = values.get('divisions', 2)
|
|
1100
|
+
result = [[] for _ in range(divisions)]
|
|
1101
|
+
|
|
1102
|
+
for elements in state:
|
|
1103
|
+
for index in range(divisions):
|
|
1104
|
+
result[index].append(
|
|
1105
|
+
elements[index])
|
|
1106
|
+
|
|
1107
|
+
return result
|
|
1108
|
+
|
|
1109
|
+
else:
|
|
1110
|
+
raise Exception('\n '.join([
|
|
1111
|
+
'trying to divide list but state does not resemble a list or an'
|
|
1112
|
+
'element.',
|
|
1113
|
+
f'state: {pf(state)}',
|
|
1114
|
+
f'schema: {pf(schema)}']))
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
def divide_tree(schema, state, values, core):
|
|
1118
|
+
leaf_type = core._find_parameter(
|
|
1119
|
+
schema,
|
|
1120
|
+
'leaf')
|
|
1121
|
+
|
|
1122
|
+
if core.check(leaf_type, state):
|
|
1123
|
+
return core.fold(
|
|
1124
|
+
leaf_type,
|
|
1125
|
+
state,
|
|
1126
|
+
'divide',
|
|
1127
|
+
values)
|
|
1128
|
+
|
|
1129
|
+
elif isinstance(state, dict):
|
|
1130
|
+
divisions = values.get('divisions', 2)
|
|
1131
|
+
division = [{} for _ in range(divisions)]
|
|
1132
|
+
|
|
1133
|
+
for key, value in state.items():
|
|
1134
|
+
for index in range(divisions):
|
|
1135
|
+
division[index][key] = value[index]
|
|
1136
|
+
|
|
1137
|
+
return division
|
|
1138
|
+
|
|
1139
|
+
else:
|
|
1140
|
+
raise Exception('\n '.join([
|
|
1141
|
+
'trying to divide tree but state does not resemble a leaf or a'
|
|
1142
|
+
'tree.',
|
|
1143
|
+
f'state: {pf(state)}',
|
|
1144
|
+
f'schema: {pf(schema)}']))
|
|
1145
|
+
|
|
1146
|
+
|
|
1147
|
+
def divide_map(schema, state, values, core):
|
|
1148
|
+
if isinstance(state, dict):
|
|
1149
|
+
divisions = values.get('divisions', 2)
|
|
1150
|
+
division = [{} for _ in range(divisions)]
|
|
1151
|
+
for key, value in state.items():
|
|
1152
|
+
for index in range(divisions):
|
|
1153
|
+
division[index][key] = value[index]
|
|
1154
|
+
|
|
1155
|
+
return division
|
|
1156
|
+
else:
|
|
1157
|
+
raise Exception('\n '.join([
|
|
1158
|
+
'trying to divide a map but state is not a dict.',
|
|
1159
|
+
f'state: {pf(state)}',
|
|
1160
|
+
f'schema: {pf(schema)}']))
|
|
1161
|
+
|
|
1162
|
+
|
|
1163
|
+
def divide_enum(schema, state, values, core):
|
|
1164
|
+
divisions = values.get('divisions', 2)
|
|
1165
|
+
|
|
1166
|
+
return [
|
|
1167
|
+
tuple([item[index] for item in state])
|
|
1168
|
+
for index in range(divisions)]
|
|
1169
|
+
|
|
1170
|
+
|
|
1171
|
+
# =============================
|
|
1172
|
+
# Serialize Functions Overview
|
|
1173
|
+
# =============================
|
|
1174
|
+
# These functions are responsible for converting the state into a serializable
|
|
1175
|
+
# format based on the schema.
|
|
1176
|
+
#
|
|
1177
|
+
# Each function handles a specific type of schema and ensures that the state is
|
|
1178
|
+
# serialized correctly.
|
|
1179
|
+
#
|
|
1180
|
+
# Function signature: (schema, state, core)
|
|
1181
|
+
|
|
1182
|
+
|
|
1183
|
+
def serialize_any(schema, state, core):
|
|
1184
|
+
if isinstance(state, dict):
|
|
1185
|
+
tree = {}
|
|
1186
|
+
|
|
1187
|
+
for key in non_schema_keys(schema):
|
|
1188
|
+
encoded = core.serialize(
|
|
1189
|
+
schema.get(key, schema),
|
|
1190
|
+
state.get(key))
|
|
1191
|
+
tree[key] = encoded
|
|
1192
|
+
|
|
1193
|
+
return tree
|
|
1194
|
+
|
|
1195
|
+
else:
|
|
1196
|
+
return str(state)
|
|
1197
|
+
|
|
1198
|
+
|
|
1199
|
+
def serialize_tuple(schema, value, core):
|
|
1200
|
+
parameters = core._parameters_for(schema)
|
|
1201
|
+
result = []
|
|
1202
|
+
|
|
1203
|
+
for parameter, element in zip(parameters, value):
|
|
1204
|
+
encoded = core.serialize(
|
|
1205
|
+
parameter,
|
|
1206
|
+
element)
|
|
1207
|
+
|
|
1208
|
+
result.append(encoded)
|
|
1209
|
+
|
|
1210
|
+
return tuple(result)
|
|
1211
|
+
|
|
1212
|
+
|
|
1213
|
+
def serialize_union(schema, value, core):
|
|
1214
|
+
union_type = find_union_type(
|
|
1215
|
+
core,
|
|
1216
|
+
schema,
|
|
1217
|
+
value)
|
|
1218
|
+
|
|
1219
|
+
return core.serialize(
|
|
1220
|
+
union_type,
|
|
1221
|
+
value)
|
|
1222
|
+
|
|
1223
|
+
|
|
1224
|
+
def serialize_string(schema, value, core=None):
|
|
1225
|
+
return value
|
|
1226
|
+
|
|
1227
|
+
|
|
1228
|
+
def serialize_boolean(schema, value: bool, core) -> str:
|
|
1229
|
+
return str(value)
|
|
1230
|
+
|
|
1231
|
+
|
|
1232
|
+
def serialize_list(schema, value, core=None):
|
|
1233
|
+
element_type = core._find_parameter(
|
|
1234
|
+
schema,
|
|
1235
|
+
'element')
|
|
1236
|
+
|
|
1237
|
+
return [
|
|
1238
|
+
core.serialize(
|
|
1239
|
+
element_type,
|
|
1240
|
+
element)
|
|
1241
|
+
for element in value]
|
|
1242
|
+
|
|
1243
|
+
|
|
1244
|
+
def serialize_tree(schema, value, core):
|
|
1245
|
+
if isinstance(value, dict):
|
|
1246
|
+
encoded = {}
|
|
1247
|
+
for key, subvalue in value.items():
|
|
1248
|
+
encoded[key] = serialize_tree(
|
|
1249
|
+
schema,
|
|
1250
|
+
subvalue,
|
|
1251
|
+
core)
|
|
1252
|
+
|
|
1253
|
+
else:
|
|
1254
|
+
leaf_type = core._find_parameter(
|
|
1255
|
+
schema,
|
|
1256
|
+
'leaf')
|
|
1257
|
+
|
|
1258
|
+
if core.check(leaf_type, value):
|
|
1259
|
+
encoded = core.serialize(
|
|
1260
|
+
leaf_type,
|
|
1261
|
+
value)
|
|
1262
|
+
else:
|
|
1263
|
+
raise Exception('\n '.join([
|
|
1264
|
+
'trying to serialize a tree but unfamiliar with this form of'
|
|
1265
|
+
f'tree: {value} - current schema:',
|
|
1266
|
+
f'{pf(schema)}']))
|
|
1267
|
+
|
|
1268
|
+
return encoded
|
|
1269
|
+
|
|
1270
|
+
|
|
1271
|
+
def serialize_units(schema, value, core):
|
|
1272
|
+
return str(value)
|
|
1273
|
+
|
|
1274
|
+
|
|
1275
|
+
def serialize_maybe(schema, value, core):
|
|
1276
|
+
if value is None:
|
|
1277
|
+
return NONE_SYMBOL
|
|
1278
|
+
else:
|
|
1279
|
+
value_type = core._find_parameter(
|
|
1280
|
+
schema,
|
|
1281
|
+
'value')
|
|
1282
|
+
|
|
1283
|
+
return core.serialize(
|
|
1284
|
+
value_type,
|
|
1285
|
+
value)
|
|
1286
|
+
|
|
1287
|
+
|
|
1288
|
+
def serialize_function(schema, value, core=None):
|
|
1289
|
+
return f'{value.__module__}.{value.__name__}'
|
|
1290
|
+
|
|
1291
|
+
|
|
1292
|
+
def serialize_map(schema, value, core=None):
|
|
1293
|
+
value_type = core._find_parameter(
|
|
1294
|
+
schema,
|
|
1295
|
+
'value')
|
|
1296
|
+
|
|
1297
|
+
return {
|
|
1298
|
+
key: core.serialize(
|
|
1299
|
+
value_type,
|
|
1300
|
+
subvalue) if not is_schema_key(key) else subvalue
|
|
1301
|
+
for key, subvalue in value.items()}
|
|
1302
|
+
|
|
1303
|
+
|
|
1304
|
+
def serialize_edge(schema, value, core):
|
|
1305
|
+
return value
|
|
1306
|
+
|
|
1307
|
+
|
|
1308
|
+
def serialize_enum(schema, value, core):
|
|
1309
|
+
return value
|
|
1310
|
+
|
|
1311
|
+
|
|
1312
|
+
def recur_serialize_schema(schema, core, path=None, parents=None):
|
|
1313
|
+
""" Serialize schema to a string """
|
|
1314
|
+
path = path or []
|
|
1315
|
+
parents = parents or []
|
|
1316
|
+
schema_id = id(schema)
|
|
1317
|
+
|
|
1318
|
+
if schema_id in parents:
|
|
1319
|
+
index = parents.index(schema_id)
|
|
1320
|
+
reference = path[:index]
|
|
1321
|
+
output = '/'.join(reference)
|
|
1322
|
+
return f'/{output}'
|
|
1323
|
+
|
|
1324
|
+
if isinstance(schema, str):
|
|
1325
|
+
return schema
|
|
1326
|
+
|
|
1327
|
+
elif isinstance(schema, tuple):
|
|
1328
|
+
inner = [
|
|
1329
|
+
recur_serialize_schema(
|
|
1330
|
+
schema=element,
|
|
1331
|
+
core=core,
|
|
1332
|
+
path=path+[index],
|
|
1333
|
+
parents=parents+[schema_id])
|
|
1334
|
+
for index, element in enumerate(schema)]
|
|
1335
|
+
|
|
1336
|
+
return inner
|
|
1337
|
+
|
|
1338
|
+
elif isinstance(schema, dict):
|
|
1339
|
+
inner = {}
|
|
1340
|
+
for key in schema:
|
|
1341
|
+
subschema = recur_serialize_schema(
|
|
1342
|
+
schema=schema[key],
|
|
1343
|
+
core=core,
|
|
1344
|
+
path=path+[key],
|
|
1345
|
+
parents=parents+[schema_id])
|
|
1346
|
+
inner[key] = subschema
|
|
1347
|
+
|
|
1348
|
+
return inner
|
|
1349
|
+
|
|
1350
|
+
else:
|
|
1351
|
+
return schema
|
|
1352
|
+
|
|
1353
|
+
|
|
1354
|
+
def serialize_schema(schema, state, core):
|
|
1355
|
+
""" Serialize schema to a string """
|
|
1356
|
+
return recur_serialize_schema(schema=state, core=core)
|
|
1357
|
+
|
|
1358
|
+
|
|
1359
|
+
def serialize_array(schema, value, core):
|
|
1360
|
+
""" Serialize numpy array to list """
|
|
1361
|
+
|
|
1362
|
+
if isinstance(value, dict):
|
|
1363
|
+
return value
|
|
1364
|
+
elif isinstance(value, str):
|
|
1365
|
+
import ipdb; ipdb.set_trace()
|
|
1366
|
+
else:
|
|
1367
|
+
array_data = 'string'
|
|
1368
|
+
dtype = value.dtype.name
|
|
1369
|
+
if dtype.startswith('int'):
|
|
1370
|
+
array_data = 'integer'
|
|
1371
|
+
elif dtype.startswith('float'):
|
|
1372
|
+
array_data = 'float'
|
|
1373
|
+
|
|
1374
|
+
return {
|
|
1375
|
+
'list': value.tolist(),
|
|
1376
|
+
'data': array_data,
|
|
1377
|
+
'shape': list(value.shape)}
|
|
1378
|
+
|
|
1379
|
+
|
|
1380
|
+
# ===============================
|
|
1381
|
+
# Deserialize Functions Overview
|
|
1382
|
+
# ===============================
|
|
1383
|
+
# These functions are responsible for converting serialized data back into the
|
|
1384
|
+
# state based on the schema.
|
|
1385
|
+
#
|
|
1386
|
+
# Each function handles a specific type of schema and ensures that the data is
|
|
1387
|
+
# deserialized correctly.
|
|
1388
|
+
#
|
|
1389
|
+
# Function signature: (schema, state, core)
|
|
1390
|
+
|
|
1391
|
+
|
|
1392
|
+
def to_string(schema, value, core=None):
|
|
1393
|
+
return str(value)
|
|
1394
|
+
|
|
1395
|
+
# def evaluate(schema, encoded, core=None):
|
|
1396
|
+
# return eval(encoded)
|
|
1397
|
+
|
|
1398
|
+
|
|
1399
|
+
def deserialize_any(schema, state, core):
|
|
1400
|
+
if isinstance(state, dict):
|
|
1401
|
+
tree = {}
|
|
1402
|
+
|
|
1403
|
+
for key, value in state.items():
|
|
1404
|
+
if is_schema_key(key):
|
|
1405
|
+
decoded = value
|
|
1406
|
+
else:
|
|
1407
|
+
decoded = core.deserialize(
|
|
1408
|
+
schema.get(key, 'any'),
|
|
1409
|
+
value)
|
|
1410
|
+
|
|
1411
|
+
tree[key] = decoded
|
|
1412
|
+
|
|
1413
|
+
for key in non_schema_keys(schema):
|
|
1414
|
+
if key not in tree:
|
|
1415
|
+
# if key not in state:
|
|
1416
|
+
# decoded = core.default(
|
|
1417
|
+
# schema[key])
|
|
1418
|
+
# else:
|
|
1419
|
+
if key in state:
|
|
1420
|
+
decoded = core.deserialize(
|
|
1421
|
+
schema[key],
|
|
1422
|
+
state[key])
|
|
1423
|
+
|
|
1424
|
+
tree[key] = decoded
|
|
1425
|
+
|
|
1426
|
+
return tree
|
|
1427
|
+
|
|
1428
|
+
else:
|
|
1429
|
+
return state
|
|
1430
|
+
|
|
1431
|
+
|
|
1432
|
+
def deserialize_tuple(schema, state, core):
|
|
1433
|
+
parameters = core._parameters_for(schema)
|
|
1434
|
+
result = []
|
|
1435
|
+
|
|
1436
|
+
if isinstance(state, str):
|
|
1437
|
+
if (state[0] == '(' and state[-1] == ')') \
|
|
1438
|
+
or (state[0] == '[' and state[-1] == ']'):
|
|
1439
|
+
state = state[1:-1].split(',')
|
|
1440
|
+
else:
|
|
1441
|
+
return None
|
|
1442
|
+
|
|
1443
|
+
for parameter, code in zip(parameters, state):
|
|
1444
|
+
element = core.deserialize(
|
|
1445
|
+
parameter,
|
|
1446
|
+
code)
|
|
1447
|
+
|
|
1448
|
+
result.append(element)
|
|
1449
|
+
|
|
1450
|
+
return tuple(result)
|
|
1451
|
+
|
|
1452
|
+
|
|
1453
|
+
def deserialize_union(schema, encoded, core):
|
|
1454
|
+
if encoded == NONE_SYMBOL:
|
|
1455
|
+
return None
|
|
1456
|
+
else:
|
|
1457
|
+
parameters = core._parameters_for(schema)
|
|
1458
|
+
|
|
1459
|
+
for parameter in parameters:
|
|
1460
|
+
value = core.deserialize(
|
|
1461
|
+
parameter,
|
|
1462
|
+
encoded)
|
|
1463
|
+
|
|
1464
|
+
if value is not None:
|
|
1465
|
+
return value
|
|
1466
|
+
|
|
1467
|
+
|
|
1468
|
+
def deserialize_string(schema, encoded, core=None):
|
|
1469
|
+
if isinstance(encoded, str):
|
|
1470
|
+
return encoded
|
|
1471
|
+
|
|
1472
|
+
|
|
1473
|
+
def deserialize_integer(schema, encoded, core=None):
|
|
1474
|
+
value = None
|
|
1475
|
+
try:
|
|
1476
|
+
value = int(encoded)
|
|
1477
|
+
except:
|
|
1478
|
+
pass
|
|
1479
|
+
|
|
1480
|
+
return value
|
|
1481
|
+
|
|
1482
|
+
|
|
1483
|
+
def deserialize_float(schema, encoded, core=None):
|
|
1484
|
+
value = None
|
|
1485
|
+
try:
|
|
1486
|
+
value = float(encoded)
|
|
1487
|
+
except:
|
|
1488
|
+
pass
|
|
1489
|
+
|
|
1490
|
+
return value
|
|
1491
|
+
|
|
1492
|
+
|
|
1493
|
+
def deserialize_list(schema, encoded, core=None):
|
|
1494
|
+
if isinstance(encoded, list):
|
|
1495
|
+
element_type = core._find_parameter(
|
|
1496
|
+
schema,
|
|
1497
|
+
'element')
|
|
1498
|
+
|
|
1499
|
+
return [
|
|
1500
|
+
core.deserialize(
|
|
1501
|
+
element_type,
|
|
1502
|
+
element)
|
|
1503
|
+
for element in encoded]
|
|
1504
|
+
|
|
1505
|
+
|
|
1506
|
+
def deserialize_maybe(schema, encoded, core):
|
|
1507
|
+
if encoded == NONE_SYMBOL or encoded is None:
|
|
1508
|
+
return None
|
|
1509
|
+
else:
|
|
1510
|
+
value_type = core._find_parameter(
|
|
1511
|
+
schema,
|
|
1512
|
+
'value')
|
|
1513
|
+
|
|
1514
|
+
return core.deserialize(value_type, encoded)
|
|
1515
|
+
|
|
1516
|
+
|
|
1517
|
+
def deserialize_quote(schema, state, core):
|
|
1518
|
+
return state
|
|
1519
|
+
|
|
1520
|
+
|
|
1521
|
+
def deserialize_boolean(schema, encoded, core) -> bool:
|
|
1522
|
+
if encoded == 'true':
|
|
1523
|
+
return True
|
|
1524
|
+
elif encoded == 'false':
|
|
1525
|
+
return False
|
|
1526
|
+
elif encoded == True or encoded == False:
|
|
1527
|
+
return encoded
|
|
1528
|
+
|
|
1529
|
+
|
|
1530
|
+
def deserialize_tree(schema, encoded, core):
|
|
1531
|
+
if isinstance(encoded, dict):
|
|
1532
|
+
tree = {}
|
|
1533
|
+
for key, value in encoded.items():
|
|
1534
|
+
if key.startswith('_'):
|
|
1535
|
+
tree[key] = value
|
|
1536
|
+
else:
|
|
1537
|
+
tree[key] = deserialize_tree(schema, value, core)
|
|
1538
|
+
|
|
1539
|
+
return tree
|
|
1540
|
+
|
|
1541
|
+
else:
|
|
1542
|
+
leaf_type = core._find_parameter(
|
|
1543
|
+
schema,
|
|
1544
|
+
'leaf')
|
|
1545
|
+
|
|
1546
|
+
if leaf_type:
|
|
1547
|
+
return core.deserialize(
|
|
1548
|
+
leaf_type,
|
|
1549
|
+
encoded)
|
|
1550
|
+
else:
|
|
1551
|
+
return encoded
|
|
1552
|
+
|
|
1553
|
+
|
|
1554
|
+
def deserialize_units(schema, encoded, core):
|
|
1555
|
+
if isinstance(encoded, Quantity):
|
|
1556
|
+
return encoded
|
|
1557
|
+
else:
|
|
1558
|
+
return units(encoded)
|
|
1559
|
+
|
|
1560
|
+
|
|
1561
|
+
def deserialize_function(schema, value, core=None):
|
|
1562
|
+
if isinstance(value, str):
|
|
1563
|
+
return local_lookup_module(value)
|
|
1564
|
+
else:
|
|
1565
|
+
return value
|
|
1566
|
+
|
|
1567
|
+
|
|
1568
|
+
def deserialize_map(schema, encoded, core=None):
|
|
1569
|
+
if isinstance(encoded, dict):
|
|
1570
|
+
value_type = core._find_parameter(
|
|
1571
|
+
schema,
|
|
1572
|
+
'value')
|
|
1573
|
+
|
|
1574
|
+
return {
|
|
1575
|
+
key: core.deserialize(
|
|
1576
|
+
value_type,
|
|
1577
|
+
subvalue) if not is_schema_key(key) else subvalue
|
|
1578
|
+
for key, subvalue in encoded.items()}
|
|
1579
|
+
|
|
1580
|
+
|
|
1581
|
+
def enum_list(enum_schema):
|
|
1582
|
+
return [
|
|
1583
|
+
enum_schema[f'_{parameter}']
|
|
1584
|
+
for parameter in enum_schema['_type_parameters']]
|
|
1585
|
+
|
|
1586
|
+
|
|
1587
|
+
def deserialize_enum(schema, state, core):
|
|
1588
|
+
enum = enum_list(schema)
|
|
1589
|
+
if state in enum:
|
|
1590
|
+
return state
|
|
1591
|
+
else:
|
|
1592
|
+
raise Exception(f'{state} not in enum: {enum}')
|
|
1593
|
+
|
|
1594
|
+
|
|
1595
|
+
def deserialize_array(schema, encoded, core):
|
|
1596
|
+
if isinstance(encoded, np.ndarray):
|
|
1597
|
+
return encoded
|
|
1598
|
+
|
|
1599
|
+
elif isinstance(encoded, dict):
|
|
1600
|
+
if 'value' in encoded:
|
|
1601
|
+
return encoded['value']
|
|
1602
|
+
else:
|
|
1603
|
+
found = core.retrieve(
|
|
1604
|
+
encoded.get(
|
|
1605
|
+
'data',
|
|
1606
|
+
schema['_data']))
|
|
1607
|
+
|
|
1608
|
+
dtype = read_datatype(
|
|
1609
|
+
found)
|
|
1610
|
+
|
|
1611
|
+
shape = read_shape(
|
|
1612
|
+
schema['_shape'])
|
|
1613
|
+
|
|
1614
|
+
if 'list' in encoded:
|
|
1615
|
+
return np.array(
|
|
1616
|
+
encoded['list'],
|
|
1617
|
+
dtype=dtype).reshape(
|
|
1618
|
+
shape)
|
|
1619
|
+
else:
|
|
1620
|
+
return np.zeros(
|
|
1621
|
+
tuple(shape),
|
|
1622
|
+
dtype=dtype)
|
|
1623
|
+
|
|
1624
|
+
|
|
1625
|
+
def deserialize_edge(schema, encoded, core):
|
|
1626
|
+
return encoded
|
|
1627
|
+
|
|
1628
|
+
|
|
1629
|
+
def recur_deserialize_schema(schema, core, top_state=None, path=None):
|
|
1630
|
+
top_state = top_state or schema
|
|
1631
|
+
path = path or []
|
|
1632
|
+
|
|
1633
|
+
if isinstance(schema, dict):
|
|
1634
|
+
subschema = {}
|
|
1635
|
+
for key, value in schema.items():
|
|
1636
|
+
subschema[key] = recur_deserialize_schema(
|
|
1637
|
+
value,
|
|
1638
|
+
core,
|
|
1639
|
+
top_state=top_state,
|
|
1640
|
+
path=path+[key])
|
|
1641
|
+
|
|
1642
|
+
return subschema
|
|
1643
|
+
|
|
1644
|
+
elif isinstance(schema, list):
|
|
1645
|
+
subschema = []
|
|
1646
|
+
for index, value in enumerate(schema):
|
|
1647
|
+
subschema.append(
|
|
1648
|
+
recur_deserialize_schema(
|
|
1649
|
+
value,
|
|
1650
|
+
core,
|
|
1651
|
+
top_state=top_state,
|
|
1652
|
+
path=path+[index]))
|
|
1653
|
+
|
|
1654
|
+
return tuple(subschema)
|
|
1655
|
+
|
|
1656
|
+
elif isinstance(schema, str):
|
|
1657
|
+
if schema.startswith('/'): # this is a reference to another schema
|
|
1658
|
+
local_path = schema.split('/')[1:]
|
|
1659
|
+
reference = get_path(top_state, local_path)
|
|
1660
|
+
|
|
1661
|
+
set_path(
|
|
1662
|
+
tree=top_state,
|
|
1663
|
+
path=path,
|
|
1664
|
+
value=reference)
|
|
1665
|
+
|
|
1666
|
+
return reference
|
|
1667
|
+
else:
|
|
1668
|
+
return schema
|
|
1669
|
+
else:
|
|
1670
|
+
return schema
|
|
1671
|
+
|
|
1672
|
+
|
|
1673
|
+
def deserialize_schema(schema, state, core):
|
|
1674
|
+
return recur_deserialize_schema(schema=state, core=core)
|
|
1675
|
+
|
|
1676
|
+
|
|
1677
|
+
# =========================
|
|
1678
|
+
# Slice Functions Overview
|
|
1679
|
+
# =========================
|
|
1680
|
+
# These functions are responsible for extracting a part of the state based on
|
|
1681
|
+
# the schema and path.
|
|
1682
|
+
#
|
|
1683
|
+
# Each function handles a specific type of schema and ensures that the correct
|
|
1684
|
+
# part of the state is sliced.
|
|
1685
|
+
#
|
|
1686
|
+
# Function signature: (schema, state, path, core)
|
|
1687
|
+
|
|
1688
|
+
|
|
1689
|
+
def slice_any(schema, state, path, core):
|
|
1690
|
+
if not isinstance(path, (list, tuple)):
|
|
1691
|
+
if path is None:
|
|
1692
|
+
path = ()
|
|
1693
|
+
else:
|
|
1694
|
+
path = [path]
|
|
1695
|
+
|
|
1696
|
+
if len(path) == 0:
|
|
1697
|
+
return schema, state
|
|
1698
|
+
|
|
1699
|
+
elif len(path) > 0:
|
|
1700
|
+
head = path[0]
|
|
1701
|
+
tail = path[1:]
|
|
1702
|
+
step = None
|
|
1703
|
+
|
|
1704
|
+
if isinstance(state, dict):
|
|
1705
|
+
if head == '*':
|
|
1706
|
+
step_schema = {}
|
|
1707
|
+
step_state = {}
|
|
1708
|
+
|
|
1709
|
+
for key, value in state.items():
|
|
1710
|
+
if key in schema:
|
|
1711
|
+
step_schema[key], step_state[key] = core.slice(
|
|
1712
|
+
schema[key],
|
|
1713
|
+
value,
|
|
1714
|
+
tail)
|
|
1715
|
+
|
|
1716
|
+
else:
|
|
1717
|
+
step_schema[key], step_state[key] = slice_any(
|
|
1718
|
+
{},
|
|
1719
|
+
value,
|
|
1720
|
+
tail,
|
|
1721
|
+
core)
|
|
1722
|
+
|
|
1723
|
+
return step_schema, step_state
|
|
1724
|
+
|
|
1725
|
+
elif head not in state:
|
|
1726
|
+
state[head] = core.default(
|
|
1727
|
+
schema.get(head))
|
|
1728
|
+
|
|
1729
|
+
step = state[head]
|
|
1730
|
+
|
|
1731
|
+
elif isinstance(head, str) and hasattr(state, head):
|
|
1732
|
+
step = getattr(state, head)
|
|
1733
|
+
|
|
1734
|
+
if head in schema:
|
|
1735
|
+
return core.slice(
|
|
1736
|
+
schema[head],
|
|
1737
|
+
step,
|
|
1738
|
+
tail)
|
|
1739
|
+
else:
|
|
1740
|
+
return slice_any(
|
|
1741
|
+
{},
|
|
1742
|
+
step,
|
|
1743
|
+
tail,
|
|
1744
|
+
core)
|
|
1745
|
+
|
|
1746
|
+
|
|
1747
|
+
def slice_tuple(schema, state, path, core):
|
|
1748
|
+
if len(path) > 0:
|
|
1749
|
+
head = path[0]
|
|
1750
|
+
tail = path[1:]
|
|
1751
|
+
|
|
1752
|
+
if head == '*':
|
|
1753
|
+
result_schema = {}
|
|
1754
|
+
result_state = {}
|
|
1755
|
+
for index, position in enumerate(schema['_type_parameters']):
|
|
1756
|
+
result_schema[position], result_state[position] = core.slice(
|
|
1757
|
+
schema[position],
|
|
1758
|
+
state[index],
|
|
1759
|
+
tail)
|
|
1760
|
+
return result_schema, result_state
|
|
1761
|
+
elif str(head) in schema['_type_parameters']:
|
|
1762
|
+
try:
|
|
1763
|
+
index = schema['_type_parameters'].index(str(head))
|
|
1764
|
+
except:
|
|
1765
|
+
raise Exception('\n '.join([
|
|
1766
|
+
f'step {head} in path {path} is not a type parameter of',
|
|
1767
|
+
f'schema: {pf(schema)}',
|
|
1768
|
+
f'state: {pf(state)}']))
|
|
1769
|
+
index_key = f'_{index}'
|
|
1770
|
+
subschema = core.access(schema[index_key])
|
|
1771
|
+
|
|
1772
|
+
return core.slice(subschema, state[head], tail)
|
|
1773
|
+
else:
|
|
1774
|
+
raise Exception(
|
|
1775
|
+
'trying to index a tuple with a key that is '
|
|
1776
|
+
f'not an index: {state} {head}')
|
|
1777
|
+
else:
|
|
1778
|
+
return schema, state
|
|
1779
|
+
|
|
1780
|
+
|
|
1781
|
+
def slice_union(schema, state, path, core):
|
|
1782
|
+
union_type = find_union_type(
|
|
1783
|
+
core,
|
|
1784
|
+
schema,
|
|
1785
|
+
state)
|
|
1786
|
+
|
|
1787
|
+
return core.slice(
|
|
1788
|
+
union_type,
|
|
1789
|
+
state,
|
|
1790
|
+
path)
|
|
1791
|
+
|
|
1792
|
+
|
|
1793
|
+
def slice_list(schema, state, path, core):
|
|
1794
|
+
element_type = core._find_parameter(
|
|
1795
|
+
schema,
|
|
1796
|
+
'element')
|
|
1797
|
+
|
|
1798
|
+
if len(path) > 0:
|
|
1799
|
+
head = path[0]
|
|
1800
|
+
tail = path[1:]
|
|
1801
|
+
|
|
1802
|
+
if not isinstance(head, int) or head >= len(state):
|
|
1803
|
+
raise Exception(f'bad index for list: {path} for {state}')
|
|
1804
|
+
|
|
1805
|
+
step = state[head]
|
|
1806
|
+
return core.slice(element_type, step, tail)
|
|
1807
|
+
else:
|
|
1808
|
+
return schema, state
|
|
1809
|
+
|
|
1810
|
+
|
|
1811
|
+
def slice_tree(schema, state, path, core):
|
|
1812
|
+
leaf_type = core._find_parameter(
|
|
1813
|
+
schema,
|
|
1814
|
+
'leaf')
|
|
1815
|
+
|
|
1816
|
+
if len(path) > 0:
|
|
1817
|
+
head = path[0]
|
|
1818
|
+
tail = path[1:]
|
|
1819
|
+
|
|
1820
|
+
if head == '*':
|
|
1821
|
+
slice_schema = {}
|
|
1822
|
+
slice_state = {}
|
|
1823
|
+
for key, value in state.items():
|
|
1824
|
+
if core.check(leaf_type, value):
|
|
1825
|
+
slice_schema[key], slice_state[key] = core.slice(
|
|
1826
|
+
leaf_type,
|
|
1827
|
+
value,
|
|
1828
|
+
tail)
|
|
1829
|
+
else:
|
|
1830
|
+
slice_schema[key], slice_state[key] = core.slice(
|
|
1831
|
+
schema,
|
|
1832
|
+
value,
|
|
1833
|
+
tail)
|
|
1834
|
+
|
|
1835
|
+
return slice_schema, slice_state
|
|
1836
|
+
|
|
1837
|
+
if not state:
|
|
1838
|
+
default = core.default(
|
|
1839
|
+
leaf_type)
|
|
1840
|
+
try:
|
|
1841
|
+
down_schema, down_state = core.slice(
|
|
1842
|
+
leaf_type,
|
|
1843
|
+
default,
|
|
1844
|
+
path)
|
|
1845
|
+
|
|
1846
|
+
if down_state:
|
|
1847
|
+
return down_schema, down_state
|
|
1848
|
+
except:
|
|
1849
|
+
state = {}
|
|
1850
|
+
if not head in state:
|
|
1851
|
+
state[head] = {}
|
|
1852
|
+
|
|
1853
|
+
step = state[head]
|
|
1854
|
+
if core.check(leaf_type, step):
|
|
1855
|
+
return core.slice(leaf_type, step, tail)
|
|
1856
|
+
else:
|
|
1857
|
+
return core.slice(schema, step, tail)
|
|
1858
|
+
else:
|
|
1859
|
+
return schema, state
|
|
1860
|
+
|
|
1861
|
+
|
|
1862
|
+
def slice_edge(schema, state, path, core):
|
|
1863
|
+
if len(path) > 0:
|
|
1864
|
+
head = path[0]
|
|
1865
|
+
tail = path[1:]
|
|
1866
|
+
|
|
1867
|
+
if head == '_inputs' or head == '_outputs':
|
|
1868
|
+
pass
|
|
1869
|
+
|
|
1870
|
+
return slice_any(schema, state, path, core)
|
|
1871
|
+
else:
|
|
1872
|
+
return schema, state
|
|
1873
|
+
|
|
1874
|
+
|
|
1875
|
+
def slice_map(schema, state, path, core):
|
|
1876
|
+
value_type = core._find_parameter(
|
|
1877
|
+
schema,
|
|
1878
|
+
'value')
|
|
1879
|
+
|
|
1880
|
+
if len(path) > 0:
|
|
1881
|
+
head = path[0]
|
|
1882
|
+
tail = path[1:]
|
|
1883
|
+
|
|
1884
|
+
if head == '*':
|
|
1885
|
+
slice_schema = {'_type': 'map'}
|
|
1886
|
+
slice_state = {}
|
|
1887
|
+
|
|
1888
|
+
for key, value in state.items():
|
|
1889
|
+
tail_schema, slice_state[key] = core.slice(
|
|
1890
|
+
value_type,
|
|
1891
|
+
value,
|
|
1892
|
+
tail)
|
|
1893
|
+
|
|
1894
|
+
if not '_value' in slice_schema:
|
|
1895
|
+
slice_schema['_value'] = tail_schema
|
|
1896
|
+
else:
|
|
1897
|
+
slice_schema['_value'] = core.resolve_schemas(
|
|
1898
|
+
slice_schema['_value'],
|
|
1899
|
+
tail_schema)
|
|
1900
|
+
|
|
1901
|
+
return slice_schema, slice_state
|
|
1902
|
+
|
|
1903
|
+
if not head in state:
|
|
1904
|
+
state[head] = core.default(
|
|
1905
|
+
value_type)
|
|
1906
|
+
|
|
1907
|
+
step = state[head]
|
|
1908
|
+
return core.slice(
|
|
1909
|
+
value_type,
|
|
1910
|
+
step,
|
|
1911
|
+
tail)
|
|
1912
|
+
else:
|
|
1913
|
+
return schema, state
|
|
1914
|
+
|
|
1915
|
+
|
|
1916
|
+
def slice_maybe(schema, state, path, core):
|
|
1917
|
+
if state is None:
|
|
1918
|
+
return schema, None
|
|
1919
|
+
|
|
1920
|
+
else:
|
|
1921
|
+
value_type = core._find_parameter(
|
|
1922
|
+
schema,
|
|
1923
|
+
'value')
|
|
1924
|
+
|
|
1925
|
+
return core.slice(
|
|
1926
|
+
value_type,
|
|
1927
|
+
state,
|
|
1928
|
+
path)
|
|
1929
|
+
|
|
1930
|
+
|
|
1931
|
+
def slice_array(schema, state, path, core):
|
|
1932
|
+
if len(path) > 0:
|
|
1933
|
+
head = path[0]
|
|
1934
|
+
tail = path[1:]
|
|
1935
|
+
if isinstance(head, str):
|
|
1936
|
+
head = int(head)
|
|
1937
|
+
step = state[head]
|
|
1938
|
+
|
|
1939
|
+
if isinstance(step, np.ndarray):
|
|
1940
|
+
sliceschema = schema.copy()
|
|
1941
|
+
sliceschema['_shape'] = step.shape
|
|
1942
|
+
return core.slice(
|
|
1943
|
+
sliceschema,
|
|
1944
|
+
step,
|
|
1945
|
+
tail)
|
|
1946
|
+
else:
|
|
1947
|
+
data_type = core._find_parameter(
|
|
1948
|
+
schema,
|
|
1949
|
+
'data')
|
|
1950
|
+
|
|
1951
|
+
return core.slice(
|
|
1952
|
+
data_type,
|
|
1953
|
+
step,
|
|
1954
|
+
tail)
|
|
1955
|
+
|
|
1956
|
+
else:
|
|
1957
|
+
return schema, state
|
|
1958
|
+
|
|
1959
|
+
|
|
1960
|
+
def slice_string(schema, state, path, core):
|
|
1961
|
+
raise Exception(f'cannot slice into an string: {path}\n{state}\n{schema}')
|
|
1962
|
+
|
|
1963
|
+
|
|
1964
|
+
|
|
1965
|
+
# ========================
|
|
1966
|
+
# Bind Functions Overview
|
|
1967
|
+
# ========================
|
|
1968
|
+
# These functions are responsible for binding a key and its corresponding
|
|
1969
|
+
# schema and state to the main schema and state.
|
|
1970
|
+
#
|
|
1971
|
+
# Each function handles a specific type of schema and ensures that the binding
|
|
1972
|
+
# is done correctly.
|
|
1973
|
+
#
|
|
1974
|
+
# Function signature: (schema, state, key, subschema, substate, core)
|
|
1975
|
+
|
|
1976
|
+
|
|
1977
|
+
def bind_any(schema, state, key, subschema, substate, core):
|
|
1978
|
+
result_schema = core.resolve_schemas(
|
|
1979
|
+
schema,
|
|
1980
|
+
{key: subschema})
|
|
1981
|
+
|
|
1982
|
+
if state is None:
|
|
1983
|
+
state = {}
|
|
1984
|
+
|
|
1985
|
+
state[key] = substate
|
|
1986
|
+
|
|
1987
|
+
return result_schema, state
|
|
1988
|
+
|
|
1989
|
+
|
|
1990
|
+
def bind_tuple(schema, state, key, subschema, substate, core):
|
|
1991
|
+
new_schema = schema.copy()
|
|
1992
|
+
new_schema[f'_{key}'] = subschema
|
|
1993
|
+
open = list(state)
|
|
1994
|
+
open[key] = substate
|
|
1995
|
+
|
|
1996
|
+
return new_schema, tuple(open)
|
|
1997
|
+
|
|
1998
|
+
|
|
1999
|
+
def bind_union(schema, state, key, subschema, substate, core):
|
|
2000
|
+
union_type = find_union_type(
|
|
2001
|
+
core,
|
|
2002
|
+
schema,
|
|
2003
|
+
state)
|
|
2004
|
+
|
|
2005
|
+
return core.bind(
|
|
2006
|
+
union_type,
|
|
2007
|
+
state,
|
|
2008
|
+
key,
|
|
2009
|
+
subschema,
|
|
2010
|
+
substate)
|
|
2011
|
+
|
|
2012
|
+
|
|
2013
|
+
def bind_enum(schema, state, key, subschema, substate, core):
|
|
2014
|
+
new_schema = schema.copy()
|
|
2015
|
+
new_schema[f'_{key}'] = subschema
|
|
2016
|
+
open = list(state)
|
|
2017
|
+
open[key] = substate
|
|
2018
|
+
|
|
2019
|
+
return new_schema, tuple(open)
|
|
2020
|
+
|
|
2021
|
+
|
|
2022
|
+
def bind_array(schema, state, key, subschema, substate, core):
|
|
2023
|
+
if state is None:
|
|
2024
|
+
state = core.default(schema)
|
|
2025
|
+
if isinstance(key, str):
|
|
2026
|
+
key = int(key)
|
|
2027
|
+
state[key] = substate
|
|
2028
|
+
|
|
2029
|
+
return schema, state
|
|
2030
|
+
|
|
2031
|
+
|
|
2032
|
+
# ==========================
|
|
2033
|
+
# Resolve Functions Overview
|
|
2034
|
+
# ==========================
|
|
2035
|
+
# These functions are responsible for resolving updates to the schema.
|
|
2036
|
+
#
|
|
2037
|
+
# Each function handles a specific type of schema and ensures that updates are
|
|
2038
|
+
# resolved correctly.
|
|
2039
|
+
#
|
|
2040
|
+
# Function signature: (schema, update, core)
|
|
2041
|
+
|
|
2042
|
+
|
|
2043
|
+
def resolve_maybe(schema, update, core):
|
|
2044
|
+
value_schema = core._find_parameter(
|
|
2045
|
+
schema,
|
|
2046
|
+
'value')
|
|
2047
|
+
|
|
2048
|
+
inner_value = core.resolve_schemas(
|
|
2049
|
+
value_schema,
|
|
2050
|
+
update)
|
|
2051
|
+
|
|
2052
|
+
schema['_value'] = inner_value
|
|
2053
|
+
|
|
2054
|
+
|
|
2055
|
+
def resolve_map(schema, update, core):
|
|
2056
|
+
if isinstance(update, dict):
|
|
2057
|
+
value_schema = update.get(
|
|
2058
|
+
'_value',
|
|
2059
|
+
schema.get('_value', {}))
|
|
2060
|
+
|
|
2061
|
+
for key, subschema in update.items():
|
|
2062
|
+
if not is_schema_key(key):
|
|
2063
|
+
value_schema = core.resolve_schemas(
|
|
2064
|
+
value_schema,
|
|
2065
|
+
subschema)
|
|
2066
|
+
|
|
2067
|
+
schema['_type'] = update.get(
|
|
2068
|
+
'_type',
|
|
2069
|
+
schema.get('_type', 'map'))
|
|
2070
|
+
schema['_value'] = value_schema
|
|
2071
|
+
|
|
2072
|
+
return schema
|
|
2073
|
+
|
|
2074
|
+
|
|
2075
|
+
def resolve_array(schema, update, core):
|
|
2076
|
+
if not '_shape' in schema:
|
|
2077
|
+
schema = core.access(schema)
|
|
2078
|
+
if not '_shape' in schema:
|
|
2079
|
+
raise Exception(f'array must have a "_shape" key, not {schema}')
|
|
2080
|
+
|
|
2081
|
+
data_schema = schema.get('_data', {})
|
|
2082
|
+
|
|
2083
|
+
if '_type' in update:
|
|
2084
|
+
data_schema = core.resolve_schemas(
|
|
2085
|
+
data_schema,
|
|
2086
|
+
update.get('_data', {}))
|
|
2087
|
+
|
|
2088
|
+
if update['_type'] == 'array':
|
|
2089
|
+
if '_shape' in update:
|
|
2090
|
+
if update['_shape'] != schema['_shape']:
|
|
2091
|
+
raise Exception('\n '.join([
|
|
2092
|
+
'arrays must be of the same shape, not',
|
|
2093
|
+
f'{schema}\nand',
|
|
2094
|
+
f'{update}']))
|
|
2095
|
+
|
|
2096
|
+
elif core.inherits_from(update, schema):
|
|
2097
|
+
schema.update(update)
|
|
2098
|
+
|
|
2099
|
+
elif not core.inherits_from(schema, update):
|
|
2100
|
+
raise Exception('\n '.join([
|
|
2101
|
+
'cannot resolve incompatible array schemas:',
|
|
2102
|
+
f'{schema}',
|
|
2103
|
+
f'{update}']))
|
|
2104
|
+
|
|
2105
|
+
else:
|
|
2106
|
+
for key, subschema in update.items():
|
|
2107
|
+
if isinstance(key, int):
|
|
2108
|
+
key = (key,)
|
|
2109
|
+
|
|
2110
|
+
if len(key) > len(schema['_shape']):
|
|
2111
|
+
raise Exception('\n'.join([
|
|
2112
|
+
f'key is longer than array dimension: {key}',
|
|
2113
|
+
f'{schema}',
|
|
2114
|
+
f'{update}']))
|
|
2115
|
+
elif len(key) == len(schema['_shape']):
|
|
2116
|
+
data_schema = core.resolve_schemas(
|
|
2117
|
+
data_schema,
|
|
2118
|
+
subschema)
|
|
2119
|
+
else:
|
|
2120
|
+
shape = tuple_from_type(
|
|
2121
|
+
schema['_shape'])
|
|
2122
|
+
|
|
2123
|
+
subshape = shape[len(key):]
|
|
2124
|
+
inner_schema = schema.copy()
|
|
2125
|
+
inner_schema['_shape'] = subshape
|
|
2126
|
+
inner_schema = core.resolve_schemas(
|
|
2127
|
+
inner_schema,
|
|
2128
|
+
subschema)
|
|
2129
|
+
|
|
2130
|
+
data_schema = inner_schema['_data']
|
|
2131
|
+
|
|
2132
|
+
schema['_data'] = data_schema
|
|
2133
|
+
|
|
2134
|
+
return schema
|
|
2135
|
+
|
|
2136
|
+
|
|
2137
|
+
def resolve_any(schema, update, core):
|
|
2138
|
+
if not schema or schema == 'any':
|
|
2139
|
+
return update
|
|
2140
|
+
if not update or update == 'any':
|
|
2141
|
+
return schema
|
|
2142
|
+
|
|
2143
|
+
if isinstance(schema, str):
|
|
2144
|
+
schema = core.access(schema)
|
|
2145
|
+
|
|
2146
|
+
outcome = schema.copy()
|
|
2147
|
+
|
|
2148
|
+
for key, subschema in update.items():
|
|
2149
|
+
if key == '_type' and key in outcome:
|
|
2150
|
+
if schema[key] != subschema:
|
|
2151
|
+
if core.inherits_from(schema[key], subschema):
|
|
2152
|
+
continue
|
|
2153
|
+
elif core.inherits_from(subschema, schema[key]):
|
|
2154
|
+
outcome[key] = subschema
|
|
2155
|
+
else:
|
|
2156
|
+
raise Exception('\n'.join([
|
|
2157
|
+
'cannot resolve types when updating',
|
|
2158
|
+
f'current type: {schema}',
|
|
2159
|
+
f'update type: {update}']))
|
|
2160
|
+
|
|
2161
|
+
elif not key in schema or type_parameter_key(schema, key):
|
|
2162
|
+
if subschema:
|
|
2163
|
+
outcome[key] = subschema
|
|
2164
|
+
else:
|
|
2165
|
+
outcome[key] = core.resolve_schemas(
|
|
2166
|
+
schema.get(key),
|
|
2167
|
+
subschema)
|
|
2168
|
+
|
|
2169
|
+
return outcome
|
|
2170
|
+
|
|
2171
|
+
|
|
2172
|
+
def resolve_union(schema, update, core):
|
|
2173
|
+
if '_type' in schema and schema['_type'] == 'union':
|
|
2174
|
+
union_type, resolve_type = schema, update
|
|
2175
|
+
elif '_type' in update and update['_type'] == 'union':
|
|
2176
|
+
union_type, resolve_type = update, schema
|
|
2177
|
+
else:
|
|
2178
|
+
raise Exception(f'empty union?\n{schema}\n{update}')
|
|
2179
|
+
|
|
2180
|
+
if '_type_parameters' in union_type:
|
|
2181
|
+
parameters = union_type['_type_parameters']
|
|
2182
|
+
else:
|
|
2183
|
+
raise Exception(f'no type parameters in union?\n{union_type}')
|
|
2184
|
+
|
|
2185
|
+
for parameter in parameters:
|
|
2186
|
+
parameter_key = f'_{parameter}'
|
|
2187
|
+
parameter_type = union_type[parameter_key]
|
|
2188
|
+
try:
|
|
2189
|
+
resolved = core.resolve(
|
|
2190
|
+
parameter_type,
|
|
2191
|
+
resolve_type)
|
|
2192
|
+
return union_type
|
|
2193
|
+
except Exception as e:
|
|
2194
|
+
pass
|
|
2195
|
+
|
|
2196
|
+
raise Exception('\n'.join([
|
|
2197
|
+
'could not resolve type with union:',
|
|
2198
|
+
f'{update}',
|
|
2199
|
+
f'union:',
|
|
2200
|
+
f'{schema}']))
|
|
2201
|
+
|
|
2202
|
+
|
|
2203
|
+
def resolve_tree(schema, update, core):
|
|
2204
|
+
if not schema or schema == 'any':
|
|
2205
|
+
return update
|
|
2206
|
+
if not update or update == 'any':
|
|
2207
|
+
return schema
|
|
2208
|
+
|
|
2209
|
+
outcome = schema.copy()
|
|
2210
|
+
|
|
2211
|
+
for key, subschema in update.items():
|
|
2212
|
+
if key == '_type' and key in outcome:
|
|
2213
|
+
if outcome[key] != subschema:
|
|
2214
|
+
if core.inherits_from(outcome[key], subschema):
|
|
2215
|
+
continue
|
|
2216
|
+
elif core.inherits_from(subschema, outcome[key]):
|
|
2217
|
+
outcome[key] = subschema
|
|
2218
|
+
else:
|
|
2219
|
+
leaf_type = core._find_parameter(
|
|
2220
|
+
schema,
|
|
2221
|
+
'leaf')
|
|
2222
|
+
|
|
2223
|
+
return core.resolve(
|
|
2224
|
+
leaf_type,
|
|
2225
|
+
update)
|
|
2226
|
+
|
|
2227
|
+
# raise Exception(f'cannot resolve types when updating\ncurrent type: {schema}\nupdate type: {update}')
|
|
2228
|
+
|
|
2229
|
+
elif not key in outcome or type_parameter_key(update, key):
|
|
2230
|
+
if subschema:
|
|
2231
|
+
outcome[key] = subschema
|
|
2232
|
+
else:
|
|
2233
|
+
outcome[key] = core.resolve_schemas(
|
|
2234
|
+
outcome.get(key),
|
|
2235
|
+
subschema)
|
|
2236
|
+
|
|
2237
|
+
return outcome
|
|
2238
|
+
|
|
2239
|
+
|
|
2240
|
+
# ============================
|
|
2241
|
+
# Dataclass Functions Overview
|
|
2242
|
+
# ============================
|
|
2243
|
+
# These functions are responsible for generating dataclass representations of
|
|
2244
|
+
# various types of schemas.
|
|
2245
|
+
#
|
|
2246
|
+
# Each function handles a specific type of schema and ensures that the
|
|
2247
|
+
# dataclass is generated correctly.
|
|
2248
|
+
#
|
|
2249
|
+
# Function signature: (schema, path, core)
|
|
2250
|
+
|
|
2251
|
+
|
|
2252
|
+
def dataclass_any(schema, path, core):
|
|
2253
|
+
parts = path
|
|
2254
|
+
if not parts:
|
|
2255
|
+
parts = ['top']
|
|
2256
|
+
dataclass_name = '_'.join(parts)
|
|
2257
|
+
|
|
2258
|
+
if isinstance(schema, dict):
|
|
2259
|
+
type_name = schema.get('_type', 'any')
|
|
2260
|
+
|
|
2261
|
+
branches = {}
|
|
2262
|
+
for key, subschema in schema.items():
|
|
2263
|
+
if not key.startswith('_'):
|
|
2264
|
+
branch = core.dataclass(
|
|
2265
|
+
subschema,
|
|
2266
|
+
path + [key])
|
|
2267
|
+
|
|
2268
|
+
def default(subschema=subschema):
|
|
2269
|
+
return core.default(subschema)
|
|
2270
|
+
|
|
2271
|
+
branches[key] = (
|
|
2272
|
+
key,
|
|
2273
|
+
branch,
|
|
2274
|
+
field(default_factory=default))
|
|
2275
|
+
|
|
2276
|
+
dataclass = make_dataclass(
|
|
2277
|
+
dataclass_name,
|
|
2278
|
+
branches.values(),
|
|
2279
|
+
# TODO: use module= here instead?
|
|
2280
|
+
namespace={
|
|
2281
|
+
'__module__': 'bigraph_schema.data'})
|
|
2282
|
+
|
|
2283
|
+
setattr(
|
|
2284
|
+
data_module,
|
|
2285
|
+
dataclass_name,
|
|
2286
|
+
dataclass)
|
|
2287
|
+
|
|
2288
|
+
else:
|
|
2289
|
+
schema = core.access(schema)
|
|
2290
|
+
dataclass = core.dataclass(schema, path)
|
|
2291
|
+
|
|
2292
|
+
return dataclass
|
|
2293
|
+
|
|
2294
|
+
|
|
2295
|
+
def dataclass_tuple(schema, path, core):
|
|
2296
|
+
parameters = type_parameters_for(schema)
|
|
2297
|
+
subtypes = []
|
|
2298
|
+
|
|
2299
|
+
for index, key in enumerate(schema['type_parameters']):
|
|
2300
|
+
subschema = schema.get(key, 'any')
|
|
2301
|
+
subtype = core.dataclass(
|
|
2302
|
+
subschema,
|
|
2303
|
+
path + [index])
|
|
2304
|
+
|
|
2305
|
+
subtypes.append(subtype)
|
|
2306
|
+
|
|
2307
|
+
parameter_block = ', '.join(subtypes)
|
|
2308
|
+
return eval(f'tuple[{parameter_block}]')
|
|
2309
|
+
|
|
2310
|
+
|
|
2311
|
+
def dataclass_union(schema, path, core):
|
|
2312
|
+
parameters = type_parameters_for(schema)
|
|
2313
|
+
subtypes = []
|
|
2314
|
+
for parameter in parameters:
|
|
2315
|
+
dataclass = core.dataclass(
|
|
2316
|
+
parameter,
|
|
2317
|
+
path)
|
|
2318
|
+
|
|
2319
|
+
if isinstance(dataclass, str):
|
|
2320
|
+
subtypes.append(dataclass)
|
|
2321
|
+
elif isinstance(dataclass, type):
|
|
2322
|
+
subtypes.append(dataclass.__name__)
|
|
2323
|
+
else:
|
|
2324
|
+
subtypes.append(str(dataclass))
|
|
2325
|
+
|
|
2326
|
+
parameter_block = ', '.join(subtypes)
|
|
2327
|
+
return eval(f'Union[{parameter_block}]')
|
|
2328
|
+
|
|
2329
|
+
|
|
2330
|
+
def dataclass_float(schema, path, core):
|
|
2331
|
+
return float
|
|
2332
|
+
|
|
2333
|
+
|
|
2334
|
+
def dataclass_integer(schema, path, core):
|
|
2335
|
+
return int
|
|
2336
|
+
|
|
2337
|
+
|
|
2338
|
+
def dataclass_list(schema, path, core):
|
|
2339
|
+
element_type = core._find_parameter(
|
|
2340
|
+
schema,
|
|
2341
|
+
'element')
|
|
2342
|
+
|
|
2343
|
+
dataclass = core.dataclass(
|
|
2344
|
+
element_type,
|
|
2345
|
+
path + ['element'])
|
|
2346
|
+
|
|
2347
|
+
return list[dataclass]
|
|
2348
|
+
|
|
2349
|
+
|
|
2350
|
+
def dataclass_tree(schema, path, core):
|
|
2351
|
+
leaf_type = core._find_parameter(schema, 'leaf')
|
|
2352
|
+
leaf_dataclass = core.dataclass(leaf_type, path + ['leaf'])
|
|
2353
|
+
|
|
2354
|
+
# TODO: find a more direct/non-eval way to do this
|
|
2355
|
+
dataclass_name = '_'.join(path)
|
|
2356
|
+
block = f"NewType('{dataclass_name}', " \
|
|
2357
|
+
f"Union[{leaf_dataclass}, " \
|
|
2358
|
+
f"Mapping[str, '{dataclass_name}']])"
|
|
2359
|
+
|
|
2360
|
+
dataclass = eval(block, {
|
|
2361
|
+
'typing': typing, # Add typing to the context
|
|
2362
|
+
'NewType': NewType,
|
|
2363
|
+
'Union': Union,
|
|
2364
|
+
'Mapping': Mapping,
|
|
2365
|
+
'List': List,
|
|
2366
|
+
'Dict': Dict,
|
|
2367
|
+
'Optional': Optional,
|
|
2368
|
+
'str': str
|
|
2369
|
+
})
|
|
2370
|
+
setattr(data_module, dataclass_name, dataclass)
|
|
2371
|
+
|
|
2372
|
+
return dataclass
|
|
2373
|
+
|
|
2374
|
+
|
|
2375
|
+
def dataclass_map(schema, path, core):
|
|
2376
|
+
value_type = core._find_parameter(
|
|
2377
|
+
schema,
|
|
2378
|
+
'value')
|
|
2379
|
+
|
|
2380
|
+
dataclass = core.dataclass(
|
|
2381
|
+
value_type,
|
|
2382
|
+
path + ['value'])
|
|
2383
|
+
|
|
2384
|
+
return Mapping[str, dataclass]
|
|
2385
|
+
|
|
2386
|
+
|
|
2387
|
+
def dataclass_maybe(schema, path, core):
|
|
2388
|
+
value_type = core._find_parameter(
|
|
2389
|
+
schema,
|
|
2390
|
+
'value')
|
|
2391
|
+
|
|
2392
|
+
dataclass = core.dataclass(
|
|
2393
|
+
value_type,
|
|
2394
|
+
path + ['value'])
|
|
2395
|
+
|
|
2396
|
+
return Optional[dataclass]
|
|
2397
|
+
|
|
2398
|
+
|
|
2399
|
+
def dataclass_edge(schema, path, core):
|
|
2400
|
+
inputs = schema.get('_inputs', {})
|
|
2401
|
+
inputs_dataclass = core.dataclass(
|
|
2402
|
+
inputs,
|
|
2403
|
+
path + ['inputs'])
|
|
2404
|
+
|
|
2405
|
+
outputs = schema.get('_outputs', {})
|
|
2406
|
+
outputs_dataclass = core.dataclass(
|
|
2407
|
+
outputs,
|
|
2408
|
+
path + ['outputs'])
|
|
2409
|
+
|
|
2410
|
+
return Callable[[inputs_dataclass], outputs_dataclass]
|
|
2411
|
+
|
|
2412
|
+
|
|
2413
|
+
def dataclass_boolean(schema, path, core):
|
|
2414
|
+
return bool
|
|
2415
|
+
|
|
2416
|
+
|
|
2417
|
+
def dataclass_string(schema, path, core):
|
|
2418
|
+
return str
|
|
2419
|
+
|
|
2420
|
+
|
|
2421
|
+
def dataclass_enum(schema, path, core):
|
|
2422
|
+
parameters = type_parameters_for(schema)
|
|
2423
|
+
subtypes = []
|
|
2424
|
+
|
|
2425
|
+
for index, key in enumerate(schema['type_parameters']):
|
|
2426
|
+
subschema = schema.get(key, 'any')
|
|
2427
|
+
subtype = core.dataclass(
|
|
2428
|
+
subschema,
|
|
2429
|
+
path + [index])
|
|
2430
|
+
|
|
2431
|
+
subtypes.append(subtype)
|
|
2432
|
+
|
|
2433
|
+
parameter_block = ', '.join(subtypes)
|
|
2434
|
+
return eval(f'tuple[{parameter_block}]')
|
|
2435
|
+
|
|
2436
|
+
|
|
2437
|
+
def dataclass_array(schema, path, core):
|
|
2438
|
+
return np.ndarray
|
|
2439
|
+
|
|
2440
|
+
|
|
2441
|
+
# ===========================
|
|
2442
|
+
# Default Functions Overview
|
|
2443
|
+
# ===========================
|
|
2444
|
+
# These functions are responsible for providing default values for various
|
|
2445
|
+
# types of schemas.
|
|
2446
|
+
#
|
|
2447
|
+
# Each function handles a specific type of schema and ensures that the
|
|
2448
|
+
# default value is generated correctly.
|
|
2449
|
+
#
|
|
2450
|
+
# Absent a default function, the type could provide a default value directly.
|
|
2451
|
+
|
|
2452
|
+
|
|
2453
|
+
def default_any(schema, core):
|
|
2454
|
+
default = {}
|
|
2455
|
+
|
|
2456
|
+
for key, subschema in schema.items():
|
|
2457
|
+
if not is_schema_key(key):
|
|
2458
|
+
default[key] = core.default(
|
|
2459
|
+
subschema)
|
|
2460
|
+
|
|
2461
|
+
return default
|
|
2462
|
+
|
|
2463
|
+
|
|
2464
|
+
def default_tuple(schema, core):
|
|
2465
|
+
parts = []
|
|
2466
|
+
for parameter in schema['_type_parameters']:
|
|
2467
|
+
subschema = schema[f'_{parameter}']
|
|
2468
|
+
part = core.default(subschema)
|
|
2469
|
+
parts.append(part)
|
|
2470
|
+
|
|
2471
|
+
return tuple(parts)
|
|
2472
|
+
|
|
2473
|
+
|
|
2474
|
+
def default_union(schema, core):
|
|
2475
|
+
final_parameter = schema['_type_parameters'][-1]
|
|
2476
|
+
subschema = schema[f'_{final_parameter}']
|
|
2477
|
+
|
|
2478
|
+
return core.default(subschema)
|
|
2479
|
+
|
|
2480
|
+
|
|
2481
|
+
def default_tree(schema, core):
|
|
2482
|
+
leaf_schema = core._find_parameter(
|
|
2483
|
+
schema,
|
|
2484
|
+
'leaf')
|
|
2485
|
+
|
|
2486
|
+
default = {}
|
|
2487
|
+
|
|
2488
|
+
non_schema_keys = [
|
|
2489
|
+
key
|
|
2490
|
+
for key in schema
|
|
2491
|
+
if not is_schema_key(key)]
|
|
2492
|
+
|
|
2493
|
+
if non_schema_keys:
|
|
2494
|
+
base_schema = {
|
|
2495
|
+
key: subschema
|
|
2496
|
+
for key, subschema in schema.items()
|
|
2497
|
+
if is_schema_key(key)}
|
|
2498
|
+
|
|
2499
|
+
for key in non_schema_keys:
|
|
2500
|
+
subschema = core.merge_schemas(
|
|
2501
|
+
base_schema,
|
|
2502
|
+
schema[key])
|
|
2503
|
+
|
|
2504
|
+
subdefault = core.default(
|
|
2505
|
+
subschema)
|
|
2506
|
+
|
|
2507
|
+
if subdefault:
|
|
2508
|
+
default[key] = subdefault
|
|
2509
|
+
|
|
2510
|
+
return default
|
|
2511
|
+
|
|
2512
|
+
|
|
2513
|
+
def default_array(schema, core):
|
|
2514
|
+
data_schema = core._find_parameter(
|
|
2515
|
+
schema,
|
|
2516
|
+
'data')
|
|
2517
|
+
|
|
2518
|
+
dtype = read_datatype(
|
|
2519
|
+
data_schema)
|
|
2520
|
+
|
|
2521
|
+
shape = read_shape(
|
|
2522
|
+
schema['_shape'])
|
|
2523
|
+
|
|
2524
|
+
return np.zeros(
|
|
2525
|
+
shape,
|
|
2526
|
+
dtype=dtype)
|
|
2527
|
+
|
|
2528
|
+
|
|
2529
|
+
def default_enum(schema, core):
|
|
2530
|
+
parameter = schema['_type_parameters'][0]
|
|
2531
|
+
return schema[f'_{parameter}']
|
|
2532
|
+
|
|
2533
|
+
|
|
2534
|
+
def default_edge(schema, core):
|
|
2535
|
+
edge = {}
|
|
2536
|
+
for key in schema:
|
|
2537
|
+
if not is_schema_key(key):
|
|
2538
|
+
edge[key] = core.default(
|
|
2539
|
+
schema[key])
|
|
2540
|
+
|
|
2541
|
+
return edge
|
|
2542
|
+
|
|
2543
|
+
|
|
2544
|
+
# ============================
|
|
2545
|
+
# Generate Functions Overview
|
|
2546
|
+
# ============================
|
|
2547
|
+
# These functions are responsible for generating schemas and states based on
|
|
2548
|
+
# the provided schema and state.
|
|
2549
|
+
#
|
|
2550
|
+
# Each function handles a specific type of schema and ensures that the
|
|
2551
|
+
# generation is done correctly.
|
|
2552
|
+
|
|
2553
|
+
|
|
2554
|
+
def generate_any(core, schema, state, top_schema=None, top_state=None, path=None):
|
|
2555
|
+
schema = schema or {}
|
|
2556
|
+
if is_empty(state):
|
|
2557
|
+
state = core.default(schema)
|
|
2558
|
+
top_schema = top_schema or schema
|
|
2559
|
+
top_state = top_state or state
|
|
2560
|
+
path = path or []
|
|
2561
|
+
|
|
2562
|
+
generated_schema = {}
|
|
2563
|
+
generated_state = {}
|
|
2564
|
+
|
|
2565
|
+
if isinstance(state, dict):
|
|
2566
|
+
visited = set([])
|
|
2567
|
+
|
|
2568
|
+
all_keys = union_keys(
|
|
2569
|
+
schema,
|
|
2570
|
+
state)
|
|
2571
|
+
|
|
2572
|
+
non_schema_keys = [
|
|
2573
|
+
key
|
|
2574
|
+
for key in all_keys
|
|
2575
|
+
if not is_schema_key(key)]
|
|
2576
|
+
|
|
2577
|
+
for key in all_keys:
|
|
2578
|
+
if is_schema_key(key):
|
|
2579
|
+
generated_schema[key] = state.get(
|
|
2580
|
+
key,
|
|
2581
|
+
schema.get(key))
|
|
2582
|
+
|
|
2583
|
+
else:
|
|
2584
|
+
subschema, substate, top_schema, top_state = core._generate_recur(
|
|
2585
|
+
schema.get(key),
|
|
2586
|
+
state.get(key),
|
|
2587
|
+
top_schema=top_schema,
|
|
2588
|
+
top_state=top_state,
|
|
2589
|
+
path=path+[key])
|
|
2590
|
+
|
|
2591
|
+
generated_schema[key] = core.resolve_schemas(
|
|
2592
|
+
schema.get(key, {}),
|
|
2593
|
+
subschema)
|
|
2594
|
+
|
|
2595
|
+
generated_state[key] = substate
|
|
2596
|
+
|
|
2597
|
+
else:
|
|
2598
|
+
if not core.check(schema, state):
|
|
2599
|
+
deserialized_state = core.deserialize(schema, state)
|
|
2600
|
+
if core.check(schema, deserialized_state):
|
|
2601
|
+
state = deserialized_state
|
|
2602
|
+
else:
|
|
2603
|
+
raise Exception(f'cannot generate {state} as {schema}')
|
|
2604
|
+
generated_schema, generated_state = schema, state
|
|
2605
|
+
|
|
2606
|
+
if path:
|
|
2607
|
+
top_schema, top_state = core.set_slice(
|
|
2608
|
+
top_schema,
|
|
2609
|
+
top_state,
|
|
2610
|
+
path,
|
|
2611
|
+
generated_schema,
|
|
2612
|
+
generated_state)
|
|
2613
|
+
else:
|
|
2614
|
+
top_state = core.merge_recur(
|
|
2615
|
+
top_schema,
|
|
2616
|
+
top_state,
|
|
2617
|
+
generated_state)
|
|
2618
|
+
|
|
2619
|
+
return generated_schema, generated_state, top_schema, top_state
|
|
2620
|
+
|
|
2621
|
+
|
|
2622
|
+
def generate_list(core, schema, state, top_schema=None, top_state=None, path=None):
|
|
2623
|
+
schema = schema or {}
|
|
2624
|
+
state = state or core.default(schema)
|
|
2625
|
+
top_schema = top_schema or schema
|
|
2626
|
+
top_state = top_state or state
|
|
2627
|
+
path = path or []
|
|
2628
|
+
|
|
2629
|
+
element_type = core._find_parameter(
|
|
2630
|
+
schema,
|
|
2631
|
+
'element')
|
|
2632
|
+
|
|
2633
|
+
generated_state = []
|
|
2634
|
+
|
|
2635
|
+
for index, element in enumerate(state):
|
|
2636
|
+
subschema, substate, top_schema, top_state = core._generate_recur(
|
|
2637
|
+
element_type,
|
|
2638
|
+
element,
|
|
2639
|
+
top_schema=top_schema,
|
|
2640
|
+
top_state=top_state,
|
|
2641
|
+
path=path + [index])
|
|
2642
|
+
|
|
2643
|
+
generated_state.append(substate)
|
|
2644
|
+
|
|
2645
|
+
return schema, generated_state, top_schema, top_state
|
|
2646
|
+
|
|
2647
|
+
|
|
2648
|
+
def generate_quote(core, schema, state, top_schema=None, top_state=None, path=None):
|
|
2649
|
+
return schema, state, top_schema, top_state
|
|
2650
|
+
|
|
2651
|
+
|
|
2652
|
+
def default_quote(schema, core):
|
|
2653
|
+
if '_default' in schema:
|
|
2654
|
+
return copy.deepcopy(schema['_default'])
|
|
2655
|
+
else:
|
|
2656
|
+
return None
|
|
2657
|
+
|
|
2658
|
+
|
|
2659
|
+
def generate_map(core, schema, state, top_schema=None, top_state=None, path=None):
|
|
2660
|
+
schema = schema or {}
|
|
2661
|
+
state = state or core.default(schema)
|
|
2662
|
+
top_schema = top_schema or schema
|
|
2663
|
+
top_state = top_state or state
|
|
2664
|
+
path = path or []
|
|
2665
|
+
|
|
2666
|
+
value_type = core._find_parameter(
|
|
2667
|
+
schema,
|
|
2668
|
+
'value')
|
|
2669
|
+
|
|
2670
|
+
# TODO: can we assume this was already sorted at the top level?
|
|
2671
|
+
generated_schema, generated_state = core._sort(
|
|
2672
|
+
schema,
|
|
2673
|
+
state)
|
|
2674
|
+
|
|
2675
|
+
try:
|
|
2676
|
+
all_keys = union_keys(schema, state) # set(schema.keys()).union(state.keys())
|
|
2677
|
+
except Exception as e:
|
|
2678
|
+
# provide the path at which the error occurred
|
|
2679
|
+
raise Exception(
|
|
2680
|
+
f"Error at path {path}:\n"
|
|
2681
|
+
f"Expected schema: {core.representation(schema)}\n"
|
|
2682
|
+
f"Provided state: {state}") from e
|
|
2683
|
+
|
|
2684
|
+
for key in all_keys:
|
|
2685
|
+
if is_schema_key(key):
|
|
2686
|
+
generated_schema[key] = state.get(
|
|
2687
|
+
key,
|
|
2688
|
+
schema.get(key))
|
|
2689
|
+
|
|
2690
|
+
else:
|
|
2691
|
+
subschema = schema.get(key, value_type)
|
|
2692
|
+
substate = state.get(key)
|
|
2693
|
+
|
|
2694
|
+
subschema = core.merge_schemas(
|
|
2695
|
+
value_type,
|
|
2696
|
+
subschema)
|
|
2697
|
+
|
|
2698
|
+
subschema, generated_state[key], top_schema, top_state = core._generate_recur(
|
|
2699
|
+
subschema,
|
|
2700
|
+
substate,
|
|
2701
|
+
top_schema=top_schema,
|
|
2702
|
+
top_state=top_state,
|
|
2703
|
+
path=path + [key])
|
|
2704
|
+
|
|
2705
|
+
return generated_schema, generated_state, top_schema, top_state
|
|
2706
|
+
|
|
2707
|
+
|
|
2708
|
+
def generate_tree(core, schema, state, top_schema=None, top_state=None, path=None):
|
|
2709
|
+
schema = schema or {}
|
|
2710
|
+
state = state or core.default(schema)
|
|
2711
|
+
top_schema = top_schema or schema
|
|
2712
|
+
top_state = top_state or state
|
|
2713
|
+
path = path or []
|
|
2714
|
+
|
|
2715
|
+
leaf_type = core._find_parameter(
|
|
2716
|
+
schema,
|
|
2717
|
+
'leaf')
|
|
2718
|
+
|
|
2719
|
+
leaf_is_any = leaf_type == 'any' or (isinstance(leaf_type, dict) and leaf_type.get('_type') == 'any')
|
|
2720
|
+
|
|
2721
|
+
if not leaf_is_any and core.check(leaf_type, state):
|
|
2722
|
+
generate_schema, generate_state, top_schema, top_state = core._generate_recur(
|
|
2723
|
+
leaf_type,
|
|
2724
|
+
state,
|
|
2725
|
+
top_schema=top_schema,
|
|
2726
|
+
top_state=top_state,
|
|
2727
|
+
path=path)
|
|
2728
|
+
|
|
2729
|
+
elif isinstance(state, dict):
|
|
2730
|
+
generate_schema = {}
|
|
2731
|
+
generate_state = {}
|
|
2732
|
+
|
|
2733
|
+
all_keys = union_keys(schema, state) # set(schema.keys()).union(state.keys())
|
|
2734
|
+
non_schema_keys = [
|
|
2735
|
+
key
|
|
2736
|
+
for key in all_keys
|
|
2737
|
+
if not is_schema_key(key)]
|
|
2738
|
+
|
|
2739
|
+
if non_schema_keys:
|
|
2740
|
+
base_schema = {
|
|
2741
|
+
key: subschema
|
|
2742
|
+
for key, subschema in schema.items()
|
|
2743
|
+
if is_schema_key(key)}
|
|
2744
|
+
else:
|
|
2745
|
+
base_schema = schema
|
|
2746
|
+
|
|
2747
|
+
for key in all_keys:
|
|
2748
|
+
if not is_schema_key(key):
|
|
2749
|
+
subschema = schema.get(key)
|
|
2750
|
+
substate = state.get(key)
|
|
2751
|
+
|
|
2752
|
+
if not substate or core.check(leaf_type, substate):
|
|
2753
|
+
base_schema = leaf_type
|
|
2754
|
+
|
|
2755
|
+
subschema = core.merge_schemas(
|
|
2756
|
+
base_schema,
|
|
2757
|
+
subschema)
|
|
2758
|
+
|
|
2759
|
+
subschema, generate_state[key], top_schema, top_state = core._generate_recur(
|
|
2760
|
+
subschema,
|
|
2761
|
+
substate,
|
|
2762
|
+
top_schema=top_schema,
|
|
2763
|
+
top_state=top_state,
|
|
2764
|
+
path=path + [key])
|
|
2765
|
+
|
|
2766
|
+
elif key in state:
|
|
2767
|
+
generate_schema[key] = state[key]
|
|
2768
|
+
elif key in schema:
|
|
2769
|
+
generate_schema[key] = schema[key]
|
|
2770
|
+
else:
|
|
2771
|
+
raise Exception('the impossible has occurred now is the time'
|
|
2772
|
+
' for celebration')
|
|
2773
|
+
else:
|
|
2774
|
+
generate_schema, generate_state, top_schema, top_state = core._generate_recur(
|
|
2775
|
+
leaf_type,
|
|
2776
|
+
state,
|
|
2777
|
+
top_schema=top_schema,
|
|
2778
|
+
top_state=top_state,
|
|
2779
|
+
path=path)
|
|
2780
|
+
|
|
2781
|
+
return generate_schema, generate_state, top_schema, top_state
|
|
2782
|
+
|
|
2783
|
+
|
|
2784
|
+
def generate_ports(core, schema, wires, top_schema=None, top_state=None, path=None):
|
|
2785
|
+
schema = schema or {}
|
|
2786
|
+
wires = wires or {}
|
|
2787
|
+
top_schema = top_schema or schema
|
|
2788
|
+
top_state = top_state or {}
|
|
2789
|
+
path = path or []
|
|
2790
|
+
|
|
2791
|
+
if isinstance(schema, str):
|
|
2792
|
+
schema = {'_type': schema}
|
|
2793
|
+
|
|
2794
|
+
for port_key, subwires in wires.items():
|
|
2795
|
+
if port_key in schema:
|
|
2796
|
+
port_schema = schema[port_key]
|
|
2797
|
+
else:
|
|
2798
|
+
port_schema, subwires = core.slice(
|
|
2799
|
+
schema,
|
|
2800
|
+
wires,
|
|
2801
|
+
port_key)
|
|
2802
|
+
|
|
2803
|
+
if isinstance(subwires, dict):
|
|
2804
|
+
top_schema, top_state = generate_ports(
|
|
2805
|
+
core,
|
|
2806
|
+
port_schema,
|
|
2807
|
+
subwires,
|
|
2808
|
+
top_schema=top_schema,
|
|
2809
|
+
top_state=top_state,
|
|
2810
|
+
path=path)
|
|
2811
|
+
|
|
2812
|
+
else:
|
|
2813
|
+
if isinstance(subwires, str):
|
|
2814
|
+
subwires = [subwires]
|
|
2815
|
+
|
|
2816
|
+
default_state = core.default(
|
|
2817
|
+
port_schema)
|
|
2818
|
+
|
|
2819
|
+
top_schema, top_state = core.set_slice(
|
|
2820
|
+
top_schema,
|
|
2821
|
+
top_state,
|
|
2822
|
+
path[:-1] + subwires,
|
|
2823
|
+
port_schema,
|
|
2824
|
+
default_state,
|
|
2825
|
+
defer=True)
|
|
2826
|
+
|
|
2827
|
+
return top_schema, top_state
|
|
2828
|
+
|
|
2829
|
+
|
|
2830
|
+
def generate_edge(core, schema, state, top_schema=None, top_state=None, path=None):
|
|
2831
|
+
schema = schema or {}
|
|
2832
|
+
state = state or {}
|
|
2833
|
+
top_schema = top_schema or schema
|
|
2834
|
+
top_state = top_state or state
|
|
2835
|
+
path = path or []
|
|
2836
|
+
|
|
2837
|
+
generated_schema, generated_state, top_schema, top_state = generate_any(
|
|
2838
|
+
core,
|
|
2839
|
+
schema,
|
|
2840
|
+
state,
|
|
2841
|
+
top_schema=top_schema,
|
|
2842
|
+
top_state=top_state,
|
|
2843
|
+
path=path)
|
|
2844
|
+
|
|
2845
|
+
deserialized_state = core.deserialize(
|
|
2846
|
+
generated_schema,
|
|
2847
|
+
generated_state)
|
|
2848
|
+
|
|
2849
|
+
merged_schema, merged_state = core._sort(
|
|
2850
|
+
generated_schema,
|
|
2851
|
+
deserialized_state)
|
|
2852
|
+
|
|
2853
|
+
top_schema, top_state = core.set_slice(
|
|
2854
|
+
top_schema,
|
|
2855
|
+
top_state,
|
|
2856
|
+
path,
|
|
2857
|
+
merged_schema,
|
|
2858
|
+
merged_state)
|
|
2859
|
+
|
|
2860
|
+
for port_key in ['inputs', 'outputs']:
|
|
2861
|
+
port_schema = merged_schema.get(
|
|
2862
|
+
f'_{port_key}', {})
|
|
2863
|
+
ports = merged_state.get(
|
|
2864
|
+
port_key, {})
|
|
2865
|
+
|
|
2866
|
+
top_schema, top_state = generate_ports(
|
|
2867
|
+
core,
|
|
2868
|
+
port_schema,
|
|
2869
|
+
ports,
|
|
2870
|
+
top_schema=top_schema,
|
|
2871
|
+
top_state=top_state,
|
|
2872
|
+
path=path)
|
|
2873
|
+
|
|
2874
|
+
return merged_schema, merged_state, top_schema, top_state
|
|
2875
|
+
|
|
2876
|
+
|
|
2877
|
+
# =========================
|
|
2878
|
+
# Sort Functions Overview
|
|
2879
|
+
# =========================
|
|
2880
|
+
# These functions are responsible for sorting schemas and states.
|
|
2881
|
+
#
|
|
2882
|
+
# Each function handles a specific type of schema and ensures that the
|
|
2883
|
+
# sorting is done correctly.
|
|
2884
|
+
|
|
2885
|
+
|
|
2886
|
+
def sort_any(core, schema, state):
|
|
2887
|
+
if not isinstance(schema, dict):
|
|
2888
|
+
schema = core.find(schema)
|
|
2889
|
+
if not isinstance(state, dict):
|
|
2890
|
+
return schema, state
|
|
2891
|
+
|
|
2892
|
+
merged_schema = {}
|
|
2893
|
+
merged_state = {}
|
|
2894
|
+
|
|
2895
|
+
for key in union_keys(schema, state):
|
|
2896
|
+
if is_schema_key(key):
|
|
2897
|
+
if key in state:
|
|
2898
|
+
merged_schema[key] = core.merge_schemas(
|
|
2899
|
+
schema.get(key, {}),
|
|
2900
|
+
state[key])
|
|
2901
|
+
else:
|
|
2902
|
+
merged_schema[key] = schema[key]
|
|
2903
|
+
else:
|
|
2904
|
+
subschema, merged_state[key] = core._sort(
|
|
2905
|
+
schema.get(key, {}),
|
|
2906
|
+
state.get(key, None))
|
|
2907
|
+
if subschema:
|
|
2908
|
+
merged_schema[key] = subschema
|
|
2909
|
+
|
|
2910
|
+
return merged_schema, merged_state
|
|
2911
|
+
|
|
2912
|
+
|
|
2913
|
+
def sort_quote(core, schema, state):
|
|
2914
|
+
return schema, state
|
|
2915
|
+
|
|
2916
|
+
|
|
2917
|
+
def sort_map(core, schema, state):
|
|
2918
|
+
if not isinstance(schema, dict):
|
|
2919
|
+
schema = core.find(schema)
|
|
2920
|
+
if not isinstance(state, dict):
|
|
2921
|
+
return schema, state
|
|
2922
|
+
|
|
2923
|
+
merged_schema = {}
|
|
2924
|
+
merged_state = {}
|
|
2925
|
+
|
|
2926
|
+
value_schema = core._find_parameter(
|
|
2927
|
+
schema,
|
|
2928
|
+
'value')
|
|
2929
|
+
|
|
2930
|
+
for key in union_keys(schema, state):
|
|
2931
|
+
if is_schema_key(key):
|
|
2932
|
+
if key in state:
|
|
2933
|
+
merged_schema[key] = core.merge_schemas(
|
|
2934
|
+
schema.get(key, {}),
|
|
2935
|
+
state[key])
|
|
2936
|
+
else:
|
|
2937
|
+
merged_schema[key] = schema[key]
|
|
2938
|
+
else:
|
|
2939
|
+
subschema, merged_state[key] = core._sort(
|
|
2940
|
+
schema.get(key, {}),
|
|
2941
|
+
state.get(key, None))
|
|
2942
|
+
if subschema:
|
|
2943
|
+
value_schema = core.merge_schemas(
|
|
2944
|
+
value_schema,
|
|
2945
|
+
subschema)
|
|
2946
|
+
# merged_schema[key] = subschema
|
|
2947
|
+
|
|
2948
|
+
return merged_schema, merged_state
|
|
2949
|
+
|
|
2950
|
+
|
|
2951
|
+
def find_union_type(core, schema, state):
|
|
2952
|
+
parameters = core._parameters_for(schema)
|
|
2953
|
+
|
|
2954
|
+
for possible in parameters:
|
|
2955
|
+
if core.check(possible, state):
|
|
2956
|
+
return core.access(possible)
|
|
2957
|
+
return None
|
|
2958
|
+
|
|
2959
|
+
|
|
2960
|
+
|
|
2961
|
+
|
|
2962
|
+
# ==========================
|
|
2963
|
+
# Reaction Functions Overview
|
|
2964
|
+
# ==========================
|
|
2965
|
+
# These functions are responsible for handling reactions within the schema
|
|
2966
|
+
# and state.
|
|
2967
|
+
#
|
|
2968
|
+
# Each function processes a specific type of reaction and ensures that the
|
|
2969
|
+
# state is updated accordingly.
|
|
2970
|
+
#
|
|
2971
|
+
# Function signature: (schema, state, reaction, core)
|
|
2972
|
+
|
|
2973
|
+
|
|
2974
|
+
def add_reaction(schema, state, reaction, core):
|
|
2975
|
+
path = reaction.get('path')
|
|
2976
|
+
|
|
2977
|
+
redex = {}
|
|
2978
|
+
establish_path(
|
|
2979
|
+
redex,
|
|
2980
|
+
path)
|
|
2981
|
+
|
|
2982
|
+
reactum = {}
|
|
2983
|
+
node = establish_path(
|
|
2984
|
+
reactum,
|
|
2985
|
+
path)
|
|
2986
|
+
|
|
2987
|
+
deep_merge(
|
|
2988
|
+
node,
|
|
2989
|
+
reaction.get('add', {}))
|
|
2990
|
+
|
|
2991
|
+
return {
|
|
2992
|
+
'redex': redex,
|
|
2993
|
+
'reactum': reactum}
|
|
2994
|
+
|
|
2995
|
+
|
|
2996
|
+
def remove_reaction(schema, state, reaction, core):
|
|
2997
|
+
path = reaction.get('path', ())
|
|
2998
|
+
redex = {}
|
|
2999
|
+
node = establish_path(
|
|
3000
|
+
redex,
|
|
3001
|
+
path)
|
|
3002
|
+
|
|
3003
|
+
for remove in reaction.get('remove', []):
|
|
3004
|
+
node[remove] = {}
|
|
3005
|
+
|
|
3006
|
+
reactum = {}
|
|
3007
|
+
establish_path(
|
|
3008
|
+
reactum,
|
|
3009
|
+
path)
|
|
3010
|
+
|
|
3011
|
+
return {
|
|
3012
|
+
'redex': redex,
|
|
3013
|
+
'reactum': reactum}
|
|
3014
|
+
|
|
3015
|
+
|
|
3016
|
+
def replace_reaction(schema, state, reaction, core):
|
|
3017
|
+
path = reaction.get('path', ())
|
|
3018
|
+
|
|
3019
|
+
redex = {}
|
|
3020
|
+
node = establish_path(
|
|
3021
|
+
redex,
|
|
3022
|
+
path)
|
|
3023
|
+
|
|
3024
|
+
for before_key, before_state in reaction.get('before', {}).items():
|
|
3025
|
+
node[before_key] = before_state
|
|
3026
|
+
|
|
3027
|
+
reactum = {}
|
|
3028
|
+
node = establish_path(
|
|
3029
|
+
reactum,
|
|
3030
|
+
path)
|
|
3031
|
+
|
|
3032
|
+
for after_key, after_state in reaction.get('after', {}).items():
|
|
3033
|
+
node[after_key] = after_state
|
|
3034
|
+
|
|
3035
|
+
return {
|
|
3036
|
+
'redex': redex,
|
|
3037
|
+
'reactum': reactum}
|
|
3038
|
+
|
|
3039
|
+
|
|
3040
|
+
def register_base_reactions(core):
|
|
3041
|
+
core.register_reaction('add', add_reaction)
|
|
3042
|
+
core.register_reaction('remove', remove_reaction)
|
|
3043
|
+
core.register_reaction('replace', replace_reaction)
|
|
3044
|
+
core.register_reaction('divide', divide_reaction)
|
|
3045
|
+
|
|
3046
|
+
|
|
3047
|
+
# ===============================
|
|
3048
|
+
# Types with their type functions
|
|
3049
|
+
# ===============================
|
|
3050
|
+
#
|
|
3051
|
+
# These dictionaries define the types and their corresponding type functions.
|
|
3052
|
+
|
|
3053
|
+
|
|
3054
|
+
def add_units_to_library(units, type_library):
|
|
3055
|
+
for unit_name in units._units:
|
|
3056
|
+
try:
|
|
3057
|
+
unit = getattr(units, unit_name)
|
|
3058
|
+
except:
|
|
3059
|
+
# print(f'no unit named {unit_name}')
|
|
3060
|
+
continue
|
|
3061
|
+
|
|
3062
|
+
dimensionality = unit.dimensionality
|
|
3063
|
+
type_key = render_units_type(dimensionality)
|
|
3064
|
+
if not type_library.get(type_key):
|
|
3065
|
+
type_library[type_key] = {
|
|
3066
|
+
'_default': '',
|
|
3067
|
+
'_apply': apply_units,
|
|
3068
|
+
'_check': check_units,
|
|
3069
|
+
'_serialize': serialize_units,
|
|
3070
|
+
'_deserialize': deserialize_units,
|
|
3071
|
+
'_description': 'type to represent values with scientific units'}
|
|
3072
|
+
|
|
3073
|
+
return type_library
|
|
3074
|
+
|
|
3075
|
+
unit_types = {}
|
|
3076
|
+
unit_types = add_units_to_library(units, unit_types)
|
|
3077
|
+
|
|
3078
|
+
base_types = {
|
|
3079
|
+
'boolean': {
|
|
3080
|
+
'_type': 'boolean',
|
|
3081
|
+
'_default': False,
|
|
3082
|
+
'_check': check_boolean,
|
|
3083
|
+
'_apply': apply_boolean,
|
|
3084
|
+
'_serialize': serialize_boolean,
|
|
3085
|
+
'_deserialize': deserialize_boolean,
|
|
3086
|
+
'_dataclass': dataclass_boolean},
|
|
3087
|
+
|
|
3088
|
+
# abstract number type
|
|
3089
|
+
'number': {
|
|
3090
|
+
'_type': 'number',
|
|
3091
|
+
'_check': check_number,
|
|
3092
|
+
'_apply': accumulate,
|
|
3093
|
+
'_serialize': to_string,
|
|
3094
|
+
'_description': 'abstract base type for numbers'},
|
|
3095
|
+
|
|
3096
|
+
'integer': {
|
|
3097
|
+
'_type': 'integer',
|
|
3098
|
+
'_default': 0,
|
|
3099
|
+
# inherit _apply and _serialize from number type
|
|
3100
|
+
'_check': check_integer,
|
|
3101
|
+
'_deserialize': deserialize_integer,
|
|
3102
|
+
'_dataclass': dataclass_integer,
|
|
3103
|
+
'_description': '64-bit integer',
|
|
3104
|
+
'_inherit': 'number'},
|
|
3105
|
+
|
|
3106
|
+
'float': {
|
|
3107
|
+
'_type': 'float',
|
|
3108
|
+
'_default': 0.0,
|
|
3109
|
+
'_check': check_float,
|
|
3110
|
+
'_deserialize': deserialize_float,
|
|
3111
|
+
'_divide': divide_float,
|
|
3112
|
+
'_dataclass': dataclass_float,
|
|
3113
|
+
'_description': '64-bit floating point precision number',
|
|
3114
|
+
'_inherit': 'number'},
|
|
3115
|
+
|
|
3116
|
+
'string': {
|
|
3117
|
+
'_type': 'string',
|
|
3118
|
+
'_default': '',
|
|
3119
|
+
'_check': check_string,
|
|
3120
|
+
'_apply': replace,
|
|
3121
|
+
'_serialize': serialize_string,
|
|
3122
|
+
'_deserialize': deserialize_string,
|
|
3123
|
+
'_dataclass': dataclass_string,
|
|
3124
|
+
'_description': '64-bit integer'},
|
|
3125
|
+
|
|
3126
|
+
'enum': {
|
|
3127
|
+
'_type': 'enum',
|
|
3128
|
+
'_default': default_enum,
|
|
3129
|
+
'_apply': apply_enum,
|
|
3130
|
+
'_check': check_enum,
|
|
3131
|
+
'_serialize': serialize_string,
|
|
3132
|
+
'_deserialize': deserialize_string,
|
|
3133
|
+
'_dataclass': dataclass_string,
|
|
3134
|
+
'_description': 'enumeration type for a selection of key values'},
|
|
3135
|
+
|
|
3136
|
+
'list': {
|
|
3137
|
+
'_type': 'list',
|
|
3138
|
+
'_default': [],
|
|
3139
|
+
# '_generate': generate_list,
|
|
3140
|
+
'_check': check_list,
|
|
3141
|
+
'_slice': slice_list,
|
|
3142
|
+
'_apply': apply_list,
|
|
3143
|
+
'_serialize': serialize_list,
|
|
3144
|
+
'_deserialize': deserialize_list,
|
|
3145
|
+
'_dataclass': dataclass_list,
|
|
3146
|
+
'_fold': fold_list,
|
|
3147
|
+
'_divide': divide_list,
|
|
3148
|
+
'_type_parameters': ['element'],
|
|
3149
|
+
'_description': 'general list type (or sublists)'},
|
|
3150
|
+
|
|
3151
|
+
'map': {
|
|
3152
|
+
'_type': 'map',
|
|
3153
|
+
'_default': {},
|
|
3154
|
+
'_generate': generate_map,
|
|
3155
|
+
'_apply': apply_map,
|
|
3156
|
+
'_serialize': serialize_map,
|
|
3157
|
+
'_deserialize': deserialize_map,
|
|
3158
|
+
'_resolve': resolve_map,
|
|
3159
|
+
'_dataclass': dataclass_map,
|
|
3160
|
+
'_check': check_map,
|
|
3161
|
+
'_slice': slice_map,
|
|
3162
|
+
'_fold': fold_map,
|
|
3163
|
+
'_divide': divide_map,
|
|
3164
|
+
'_sort': sort_map,
|
|
3165
|
+
'_type_parameters': ['value'],
|
|
3166
|
+
'_description': 'flat mapping from keys of strings to '
|
|
3167
|
+
'values of any type'},
|
|
3168
|
+
|
|
3169
|
+
'tree': {
|
|
3170
|
+
'_type': 'tree',
|
|
3171
|
+
'_default': default_tree,
|
|
3172
|
+
'_generate': generate_tree,
|
|
3173
|
+
'_check': check_tree,
|
|
3174
|
+
'_slice': slice_tree,
|
|
3175
|
+
'_apply': apply_tree,
|
|
3176
|
+
'_serialize': serialize_tree,
|
|
3177
|
+
'_deserialize': deserialize_tree,
|
|
3178
|
+
'_dataclass': dataclass_tree,
|
|
3179
|
+
'_fold': fold_tree,
|
|
3180
|
+
'_divide': divide_tree,
|
|
3181
|
+
'_resolve': resolve_tree,
|
|
3182
|
+
'_type_parameters': ['leaf'],
|
|
3183
|
+
'_description': 'mapping from str to some type in a potentially '
|
|
3184
|
+
'nested form'},
|
|
3185
|
+
|
|
3186
|
+
'array': {
|
|
3187
|
+
'_type': 'array',
|
|
3188
|
+
'_default': default_array,
|
|
3189
|
+
'_check': check_array,
|
|
3190
|
+
'_slice': slice_array,
|
|
3191
|
+
'_apply': apply_array,
|
|
3192
|
+
'_serialize': serialize_array,
|
|
3193
|
+
'_deserialize': deserialize_array,
|
|
3194
|
+
'_dataclass': dataclass_array,
|
|
3195
|
+
'_resolve': resolve_array,
|
|
3196
|
+
'_bind': bind_array,
|
|
3197
|
+
'_type_parameters': [
|
|
3198
|
+
'shape',
|
|
3199
|
+
'data'],
|
|
3200
|
+
'_description': 'an array of arbitrary dimension'},
|
|
3201
|
+
|
|
3202
|
+
'maybe': {
|
|
3203
|
+
'_type': 'maybe',
|
|
3204
|
+
'_default': None,
|
|
3205
|
+
'_apply': apply_maybe,
|
|
3206
|
+
'_check': check_maybe,
|
|
3207
|
+
'_slice': slice_maybe,
|
|
3208
|
+
'_serialize': serialize_maybe,
|
|
3209
|
+
'_deserialize': deserialize_maybe,
|
|
3210
|
+
'_dataclass': dataclass_maybe,
|
|
3211
|
+
'_resolve': resolve_maybe,
|
|
3212
|
+
'_fold': fold_maybe,
|
|
3213
|
+
'_type_parameters': ['value'],
|
|
3214
|
+
'_description': 'type to represent values that could be empty'},
|
|
3215
|
+
|
|
3216
|
+
'function': {
|
|
3217
|
+
'_type': 'function',
|
|
3218
|
+
'_apply': apply_function,
|
|
3219
|
+
'_serialize': serialize_function,
|
|
3220
|
+
'_deserialize': deserialize_function,
|
|
3221
|
+
'_check': check_function},
|
|
3222
|
+
|
|
3223
|
+
'method': {
|
|
3224
|
+
'_type': 'function',
|
|
3225
|
+
'_apply': apply_meta,
|
|
3226
|
+
'_serialize': serialize_function,
|
|
3227
|
+
'_deserialize': deserialize_function,
|
|
3228
|
+
'_check': check_method},
|
|
3229
|
+
|
|
3230
|
+
'meta': {
|
|
3231
|
+
'_inherit': 'function',
|
|
3232
|
+
'_apply': apply_meta,
|
|
3233
|
+
'_check': check_meta},
|
|
3234
|
+
|
|
3235
|
+
'mark': {
|
|
3236
|
+
'_type': 'mark',
|
|
3237
|
+
# '_inherit': ['string', 'integer'],
|
|
3238
|
+
'_apply': apply_mark,
|
|
3239
|
+
'_check': check_mark,
|
|
3240
|
+
'_deserialize': deserialize_mark,
|
|
3241
|
+
'_resolve': resolve_mark},
|
|
3242
|
+
|
|
3243
|
+
'path': {
|
|
3244
|
+
'_type': 'path',
|
|
3245
|
+
'_inherit': 'list[mark]',
|
|
3246
|
+
'_apply': apply_path},
|
|
3247
|
+
|
|
3248
|
+
'wires': {
|
|
3249
|
+
'_type': 'wires',
|
|
3250
|
+
'_inherit': 'tree[path]'},
|
|
3251
|
+
|
|
3252
|
+
'schema': {
|
|
3253
|
+
'_type': 'schema',
|
|
3254
|
+
'_inherit': 'tree[any]',
|
|
3255
|
+
'_apply': apply_schema,
|
|
3256
|
+
'_serialize': serialize_schema,
|
|
3257
|
+
'_deserialize': deserialize_schema},
|
|
3258
|
+
|
|
3259
|
+
'edge': {
|
|
3260
|
+
'_type': 'edge',
|
|
3261
|
+
'_default': default_edge,
|
|
3262
|
+
'_generate': generate_edge,
|
|
3263
|
+
'_apply': apply_edge,
|
|
3264
|
+
'_serialize': serialize_edge,
|
|
3265
|
+
'_deserialize': deserialize_edge,
|
|
3266
|
+
'_dataclass': dataclass_edge,
|
|
3267
|
+
'_check': check_edge,
|
|
3268
|
+
'_slice': slice_edge,
|
|
3269
|
+
# '_merge': merge_edge,
|
|
3270
|
+
'_type_parameters': ['inputs', 'outputs'],
|
|
3271
|
+
'_description': 'hyperedges in the bigraph, with inputs and outputs '
|
|
3272
|
+
'as type parameters',
|
|
3273
|
+
'inputs': 'wires',
|
|
3274
|
+
'outputs': 'wires'}}
|
|
3275
|
+
|
|
3276
|
+
registry_types = {
|
|
3277
|
+
'any': {
|
|
3278
|
+
'_type': 'any',
|
|
3279
|
+
'_default': default_any,
|
|
3280
|
+
'_slice': slice_any,
|
|
3281
|
+
'_apply': apply_any,
|
|
3282
|
+
'_check': check_any,
|
|
3283
|
+
'_sort': sort_any,
|
|
3284
|
+
'_generate': generate_any,
|
|
3285
|
+
'_serialize': serialize_any,
|
|
3286
|
+
'_deserialize': deserialize_any,
|
|
3287
|
+
'_dataclass': dataclass_any,
|
|
3288
|
+
'_resolve': resolve_any,
|
|
3289
|
+
'_fold': fold_any,
|
|
3290
|
+
'_bind': bind_any,
|
|
3291
|
+
'_divide': divide_any},
|
|
3292
|
+
|
|
3293
|
+
'quote': {
|
|
3294
|
+
'_type': 'quote',
|
|
3295
|
+
'_deserialize': deserialize_quote,
|
|
3296
|
+
'_default': default_quote,
|
|
3297
|
+
'_generate': generate_quote,
|
|
3298
|
+
'_sort': sort_quote,
|
|
3299
|
+
'_description': 'protect a schema from generation, ie in the config '
|
|
3300
|
+
'for a nested composite which has type information we only want to '
|
|
3301
|
+
'evaluate inside of the composite'},
|
|
3302
|
+
|
|
3303
|
+
'tuple': {
|
|
3304
|
+
'_type': 'tuple',
|
|
3305
|
+
'_default': default_tuple,
|
|
3306
|
+
'_apply': apply_tuple,
|
|
3307
|
+
'_check': check_tuple,
|
|
3308
|
+
'_slice': slice_tuple,
|
|
3309
|
+
'_serialize': serialize_tuple,
|
|
3310
|
+
'_deserialize': deserialize_tuple,
|
|
3311
|
+
'_dataclass': dataclass_tuple,
|
|
3312
|
+
'_fold': fold_tuple,
|
|
3313
|
+
'_divide': divide_tuple,
|
|
3314
|
+
'_bind': bind_tuple,
|
|
3315
|
+
'_description': 'tuple of an ordered set of typed values'},
|
|
3316
|
+
|
|
3317
|
+
'union': {
|
|
3318
|
+
'_type': 'union',
|
|
3319
|
+
'_default': default_union,
|
|
3320
|
+
'_apply': apply_union,
|
|
3321
|
+
'_check': check_union,
|
|
3322
|
+
'_slice': slice_union,
|
|
3323
|
+
'_serialize': serialize_union,
|
|
3324
|
+
'_deserialize': deserialize_union,
|
|
3325
|
+
'_dataclass': dataclass_union,
|
|
3326
|
+
'_fold': fold_union,
|
|
3327
|
+
'_resolve': resolve_union,
|
|
3328
|
+
'_description': 'union of a set of possible types'}}
|
|
3329
|
+
|
|
3330
|
+
|
|
3331
|
+
TYPE_FUNCTION_KEYS = [
|
|
3332
|
+
'_apply',
|
|
3333
|
+
'_check',
|
|
3334
|
+
'_fold',
|
|
3335
|
+
'_divide',
|
|
3336
|
+
'_react',
|
|
3337
|
+
'_serialize',
|
|
3338
|
+
'_deserialize',
|
|
3339
|
+
'_slice',
|
|
3340
|
+
'_bind',
|
|
3341
|
+
'_merge']
|
|
3342
|
+
|
|
3343
|
+
TYPE_SCHEMAS = {
|
|
3344
|
+
'float': 'float'}
|
|
3345
|
+
|
|
3346
|
+
SYMBOL_TYPES = ['enum']
|
|
3347
|
+
|
|
3348
|
+
required_schema_keys = {'_default', '_apply', '_check', '_serialize',
|
|
3349
|
+
'_deserialize', '_fold'}
|
|
3350
|
+
|
|
3351
|
+
optional_schema_keys = {'_type', '_value', '_description', '_type_parameters',
|
|
3352
|
+
'_inherit', '_divide'}
|
|
3353
|
+
|
|
3354
|
+
type_schema_keys = required_schema_keys | optional_schema_keys
|
|
3355
|
+
|
|
3356
|
+
|
|
3357
|
+
def is_method_key(key, parameters):
|
|
3358
|
+
parameter_tags = [f'_{parameter}' for parameter in parameters]
|
|
3359
|
+
return key.startswith('_') and\
|
|
3360
|
+
key not in type_schema_keys and\
|
|
3361
|
+
key not in parameter_tags
|
|
3362
|
+
|
|
3363
|
+
|
|
3364
|
+
def resolve_path(path):
|
|
3365
|
+
"""
|
|
3366
|
+
Given a path that includes '..' steps, resolve the path to a canonical form
|
|
3367
|
+
"""
|
|
3368
|
+
resolve = []
|
|
3369
|
+
|
|
3370
|
+
for step in path:
|
|
3371
|
+
if step == '..':
|
|
3372
|
+
if len(resolve) == 0:
|
|
3373
|
+
raise Exception(f'cannot go above the top in path: "{path}"')
|
|
3374
|
+
else:
|
|
3375
|
+
resolve = resolve[:-1]
|
|
3376
|
+
else:
|
|
3377
|
+
resolve.append(step)
|
|
3378
|
+
|
|
3379
|
+
return tuple(resolve)
|