sera-2 1.21.1__tar.gz → 1.21.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {sera_2-1.21.1 → sera_2-1.21.2}/PKG-INFO +1 -1
  2. {sera_2-1.21.1 → sera_2-1.21.2}/pyproject.toml +1 -1
  3. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/_dcg.py +41 -32
  4. {sera_2-1.21.1 → sera_2-1.21.2}/sera/misc/_utils.py +2 -0
  5. {sera_2-1.21.1 → sera_2-1.21.2}/README.md +0 -0
  6. {sera_2-1.21.1 → sera_2-1.21.2}/sera/__init__.py +0 -0
  7. {sera_2-1.21.1 → sera_2-1.21.2}/sera/constants.py +0 -0
  8. {sera_2-1.21.1 → sera_2-1.21.2}/sera/exports/__init__.py +0 -0
  9. {sera_2-1.21.1 → sera_2-1.21.2}/sera/exports/schema.py +0 -0
  10. {sera_2-1.21.1 → sera_2-1.21.2}/sera/exports/test.py +0 -0
  11. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/__init__.py +0 -0
  12. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/api_helper.py +0 -0
  13. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/api_test_helper.py +0 -0
  14. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/base_orm.py +0 -0
  15. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/base_service.py +0 -0
  16. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/__init__.py +0 -0
  17. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/_edge.py +0 -0
  18. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/_flow.py +0 -0
  19. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/_fn_signature.py +0 -0
  20. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/_node.py +0 -0
  21. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/_runtime.py +0 -0
  22. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/directed_computing_graph/_type_conversion.py +0 -0
  23. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/middlewares/__init__.py +0 -0
  24. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/middlewares/auth.py +0 -0
  25. {sera_2-1.21.1 → sera_2-1.21.2}/sera/libs/middlewares/uscp.py +0 -0
  26. {sera_2-1.21.1 → sera_2-1.21.2}/sera/make/__init__.py +0 -0
  27. {sera_2-1.21.1 → sera_2-1.21.2}/sera/make/__main__.py +0 -0
  28. {sera_2-1.21.1 → sera_2-1.21.2}/sera/make/make_app.py +0 -0
  29. {sera_2-1.21.1 → sera_2-1.21.2}/sera/make/make_python_api.py +0 -0
  30. {sera_2-1.21.1 → sera_2-1.21.2}/sera/make/make_python_model.py +0 -0
  31. {sera_2-1.21.1 → sera_2-1.21.2}/sera/make/make_python_services.py +0 -0
  32. {sera_2-1.21.1 → sera_2-1.21.2}/sera/make/make_typescript_model.py +0 -0
  33. {sera_2-1.21.1 → sera_2-1.21.2}/sera/misc/__init__.py +0 -0
  34. {sera_2-1.21.1 → sera_2-1.21.2}/sera/misc/_formatter.py +0 -0
  35. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/__init__.py +0 -0
  36. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_class.py +0 -0
  37. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_collection.py +0 -0
  38. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_constraints.py +0 -0
  39. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_datatype.py +0 -0
  40. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_default.py +0 -0
  41. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_enum.py +0 -0
  42. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_module.py +0 -0
  43. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_multi_lingual_string.py +0 -0
  44. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_parse.py +0 -0
  45. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_property.py +0 -0
  46. {sera_2-1.21.1 → sera_2-1.21.2}/sera/models/_schema.py +0 -0
  47. {sera_2-1.21.1 → sera_2-1.21.2}/sera/typing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: sera-2
3
- Version: 1.21.1
3
+ Version: 1.21.2
4
4
  Summary:
5
5
  Author: Binh Vu
6
6
  Author-email: bvu687@gmail.com
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "sera-2"
3
- version = "1.21.1"
3
+ version = "1.21.2"
4
4
  description = ""
5
5
  authors = ["Binh Vu <bvu687@gmail.com>"]
6
6
  readme = "README.md"
@@ -41,6 +41,11 @@ class DirectedComputingGraph:
41
41
  ):
42
42
  self.graph = graph
43
43
  self.type_service = type_service
44
+ self.node2descendants: dict[str, list[DCGNode]] = {}
45
+
46
+ for u in graph.iter_nodes():
47
+ self.node2descendants[u.id] = graph.descendants(u.id)
48
+ self.node2descendants[u.id].append(u)
44
49
 
45
50
  @staticmethod
46
51
  def from_flows(
@@ -179,7 +184,7 @@ class DirectedComputingGraph:
179
184
  def execute(
180
185
  self,
181
186
  input: dict[ComputeFnId, tuple],
182
- output: set[str],
187
+ output: Optional[set[str]] = None,
183
188
  context: Optional[
184
189
  dict[str, Callable | Any] | Callable[[], dict[str, Any]]
185
190
  ] = None,
@@ -203,6 +208,9 @@ class DirectedComputingGraph:
203
208
  else:
204
209
  context = {k: v() if callable(v) else v for k, v in context.items()}
205
210
 
211
+ if output is None:
212
+ output = set()
213
+
206
214
  # This is a quick reactive algorithm, we may be able to do it better.
207
215
  # The idea is when all inputs of a function is available, we can execute a function.
208
216
  # We assume that the memory is large enough to hold all the functions and their inputs
@@ -211,25 +219,25 @@ class DirectedComputingGraph:
211
219
  # we execute the computing nodes
212
220
  # when it's finished, we put the outgoing edges into a stack.
213
221
  runtimes: dict[NodeId, NodeRuntime] = {}
222
+ for id in input.keys():
223
+ for u in self.node2descendants[id]:
224
+ if u.id in input:
225
+ # user provided input should supersede the context
226
+ n_provided_args = len(input[u.id])
227
+ n_consumed_context = n_provided_args - len(u.required_args)
228
+ else:
229
+ n_consumed_context = 0
214
230
 
215
- for u in self.graph.iter_nodes():
216
- if u.id in input:
217
- # user provided input should supersede the context
218
- n_provided_args = len(input[u.id])
219
- n_consumed_context = n_provided_args - len(u.required_args)
220
- else:
221
- n_consumed_context = 0
222
-
223
- node_context = tuple(
224
- (
225
- context[name]
226
- if name in context
227
- else u.required_context_default_args[name]
231
+ node_context = tuple(
232
+ (
233
+ context[name]
234
+ if name in context
235
+ else u.required_context_default_args[name]
236
+ )
237
+ for name in u.required_context[n_consumed_context:]
228
238
  )
229
- for name in u.required_context[n_consumed_context:]
230
- )
231
239
 
232
- runtimes[u.id] = NodeRuntime.from_node(self.graph, u, node_context)
240
+ runtimes[u.id] = NodeRuntime.from_node(self.graph, u, node_context)
233
241
  stack: list[NodeId] = []
234
242
 
235
243
  for id, args in input.items():
@@ -321,24 +329,25 @@ class DirectedComputingGraph:
321
329
  # when it's finished, we put the outgoing edges into a stack.
322
330
  runtimes: dict[NodeId, NodeRuntime] = {}
323
331
 
324
- for u in self.graph.iter_nodes():
325
- if u.id in input:
326
- # user provided input should supersede the context
327
- n_provided_args = len(input[u.id])
328
- n_consumed_context = n_provided_args - len(u.required_args)
329
- else:
330
- n_consumed_context = 0
332
+ for id in input.keys():
333
+ for u in self.node2descendants[id]:
334
+ if u.id in input:
335
+ # user provided input should supersede the context
336
+ n_provided_args = len(input[u.id])
337
+ n_consumed_context = n_provided_args - len(u.required_args)
338
+ else:
339
+ n_consumed_context = 0
331
340
 
332
- node_context = tuple(
333
- (
334
- context[name]
335
- if name in context
336
- else u.required_context_default_args[name]
341
+ node_context = tuple(
342
+ (
343
+ context[name]
344
+ if name in context
345
+ else u.required_context_default_args[name]
346
+ )
347
+ for name in u.required_context[n_consumed_context:]
337
348
  )
338
- for name in u.required_context[n_consumed_context:]
339
- )
349
+ runtimes[u.id] = NodeRuntime.from_node(self.graph, u, node_context)
340
350
 
341
- runtimes[u.id] = NodeRuntime.from_node(self.graph, u, node_context)
342
351
  stack: list[NodeId] = []
343
352
 
344
353
  for id, args in input.items():
@@ -417,3 +417,5 @@ async def replay_events(
417
417
  await dcg.execute_async(
418
418
  input={innode: (record,)}, context={"session": session}
419
419
  )
420
+
421
+ await session.commit()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes