tracdap-runtime 0.6.4__py3-none-any.whl → 0.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. tracdap/rt/_exec/context.py +382 -29
  2. tracdap/rt/_exec/dev_mode.py +123 -94
  3. tracdap/rt/_exec/engine.py +120 -9
  4. tracdap/rt/_exec/functions.py +125 -20
  5. tracdap/rt/_exec/graph.py +38 -13
  6. tracdap/rt/_exec/graph_builder.py +120 -9
  7. tracdap/rt/_impl/data.py +115 -49
  8. tracdap/rt/_impl/grpc/tracdap/metadata/job_pb2.py +74 -30
  9. tracdap/rt/_impl/grpc/tracdap/metadata/job_pb2.pyi +120 -2
  10. tracdap/rt/_impl/grpc/tracdap/metadata/model_pb2.py +12 -10
  11. tracdap/rt/_impl/grpc/tracdap/metadata/model_pb2.pyi +14 -2
  12. tracdap/rt/_impl/grpc/tracdap/metadata/resource_pb2.py +29 -0
  13. tracdap/rt/_impl/grpc/tracdap/metadata/resource_pb2.pyi +16 -0
  14. tracdap/rt/_impl/models.py +8 -0
  15. tracdap/rt/_impl/static_api.py +16 -0
  16. tracdap/rt/_impl/storage.py +37 -25
  17. tracdap/rt/_impl/validation.py +76 -7
  18. tracdap/rt/_plugins/repo_git.py +1 -1
  19. tracdap/rt/_version.py +1 -1
  20. tracdap/rt/api/experimental.py +220 -0
  21. tracdap/rt/api/hook.py +4 -0
  22. tracdap/rt/api/model_api.py +48 -6
  23. tracdap/rt/config/__init__.py +2 -2
  24. tracdap/rt/config/common.py +6 -0
  25. tracdap/rt/metadata/__init__.py +25 -20
  26. tracdap/rt/metadata/job.py +54 -0
  27. tracdap/rt/metadata/model.py +18 -0
  28. tracdap/rt/metadata/resource.py +24 -0
  29. {tracdap_runtime-0.6.4.dist-info → tracdap_runtime-0.6.5.dist-info}/METADATA +3 -1
  30. {tracdap_runtime-0.6.4.dist-info → tracdap_runtime-0.6.5.dist-info}/RECORD +33 -29
  31. {tracdap_runtime-0.6.4.dist-info → tracdap_runtime-0.6.5.dist-info}/LICENSE +0 -0
  32. {tracdap_runtime-0.6.4.dist-info → tracdap_runtime-0.6.5.dist-info}/WHEEL +0 -0
  33. {tracdap_runtime-0.6.4.dist-info → tracdap_runtime-0.6.5.dist-info}/top_level.txt +0 -0
@@ -12,8 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from __future__ import annotations
16
-
17
15
  import re
18
16
  import typing as tp
19
17
  import copy
@@ -31,12 +29,12 @@ import tracdap.rt._impl.util as _util # noqa
31
29
 
32
30
 
33
31
  DEV_MODE_JOB_CONFIG = [
34
- re.compile(r"job\.run(Model|Flow)\.parameters\.\w+"),
35
- re.compile(r"job\.run(Model|Flow)\.inputs\.\w+"),
36
- re.compile(r"job\.run(Model|Flow)\.outputs\.\w+"),
37
- re.compile(r"job\.runModel\.model"),
38
- re.compile(r"job\.runFlow\.flow"),
39
- re.compile(r"job\.runFlow\.models\.\w+")]
32
+ re.compile(r"job\.\w+\.parameters\.\w+"),
33
+ re.compile(r"job\.\w+\.inputs\.\w+"),
34
+ re.compile(r"job\.\w+\.outputs\.\w+"),
35
+ re.compile(r"job\.\w+\.models\.\w+"),
36
+ re.compile(r"job\.\w+\.model"),
37
+ re.compile(r"job\.\w+\.flow")]
40
38
 
41
39
  DEV_MODE_SYS_CONFIG = []
42
40
 
@@ -56,7 +54,7 @@ class DevModeTranslator:
56
54
  sys_config.storage = _cfg.StorageConfig()
57
55
 
58
56
  sys_config = cls._add_integrated_repo(sys_config)
59
- sys_config = cls._resolve_relative_storage_root(sys_config, config_mgr)
57
+ sys_config = cls._process_storage(sys_config, config_mgr)
60
58
 
61
59
  return sys_config
62
60
 
@@ -72,24 +70,23 @@ class DevModeTranslator:
72
70
 
73
71
  cls._log.info(f"Applying dev mode config translation to job config")
74
72
 
75
- if not job_config.jobId:
73
+ # Protobuf semantics for a blank jobId should be an object, but objectId will be an empty string
74
+ if not job_config.jobId or not job_config.jobId.objectId:
76
75
  job_config = cls._process_job_id(job_config)
77
76
 
78
77
  if job_config.job.jobType is None or job_config.job.jobType == _meta.JobType.JOB_TYPE_NOT_SET:
79
78
  job_config = cls._process_job_type(job_config)
80
79
 
81
80
  # Load and populate any models provided as a Python class or class name
82
- if job_config.job.jobType in [_meta.JobType.RUN_MODEL, _meta.JobType.RUN_FLOW]:
83
- job_config = cls._process_models(sys_config, job_config, scratch_dir, model_class)
81
+ job_config = cls._process_models(sys_config, job_config, scratch_dir, model_class)
84
82
 
85
83
  # Fow flows, load external flow definitions then perform auto-wiring and type inference
86
84
  if job_config.job.jobType == _meta.JobType.RUN_FLOW:
87
85
  job_config = cls._process_flow_definition(job_config, config_mgr)
88
86
 
89
- # For run (model|flow) jobs, apply processing to the parameters, inputs and outputs
90
- if job_config.job.jobType in [_meta.JobType.RUN_MODEL, _meta.JobType.RUN_FLOW]:
91
- job_config = cls._process_parameters(job_config)
92
- job_config = cls._process_inputs_and_outputs(sys_config, job_config)
87
+ # Apply processing to the parameters, inputs and outputs
88
+ job_config = cls._process_parameters(job_config)
89
+ job_config = cls._process_inputs_and_outputs(sys_config, job_config)
93
90
 
94
91
  return job_config
95
92
 
@@ -107,51 +104,60 @@ class DevModeTranslator:
107
104
  return sys_config
108
105
 
109
106
  @classmethod
110
- def _resolve_relative_storage_root(
107
+ def _process_storage(
111
108
  cls, sys_config: _cfg.RuntimeConfig,
112
109
  config_mgr: _cfg_p.ConfigManager):
113
110
 
114
111
  storage_config = copy.deepcopy(sys_config.storage)
115
112
 
116
113
  for bucket_key, bucket_config in storage_config.buckets.items():
114
+ storage_config.buckets[bucket_key] = cls._resolve_storage_location(
115
+ bucket_key, bucket_config, config_mgr)
116
+
117
+ for bucket_key, bucket_config in storage_config.external.items():
118
+ storage_config.external[bucket_key] = cls._resolve_storage_location(
119
+ bucket_key, bucket_config, config_mgr)
117
120
 
118
- if bucket_config.protocol != "LOCAL":
119
- continue
121
+ sys_config = copy.copy(sys_config)
122
+ sys_config.storage = storage_config
120
123
 
121
- if "rootPath" not in bucket_config.properties:
122
- continue
124
+ return sys_config
123
125
 
124
- root_path = pathlib.Path(bucket_config.properties["rootPath"])
126
+ @classmethod
127
+ def _resolve_storage_location(cls, bucket_key, bucket_config, config_mgr: _cfg_p.ConfigManager):
125
128
 
126
- if root_path.is_absolute():
127
- continue
129
+ if bucket_config.protocol != "LOCAL":
130
+ return bucket_config
128
131
 
129
- cls._log.info(f"Resolving relative path for [{bucket_key}] local storage...")
132
+ if "rootPath" not in bucket_config.properties:
133
+ return bucket_config
130
134
 
131
- sys_config_path = config_mgr.config_dir_path()
132
- if sys_config_path is not None:
133
- absolute_path = sys_config_path.joinpath(root_path).resolve()
134
- if absolute_path.exists():
135
- cls._log.info(f"Resolved [{root_path}] -> [{absolute_path}]")
136
- bucket_config.properties["rootPath"] = str(absolute_path)
137
- continue
135
+ root_path = pathlib.Path(bucket_config.properties["rootPath"])
138
136
 
139
- cwd = pathlib.Path.cwd()
140
- absolute_path = cwd.joinpath(root_path).resolve()
137
+ if root_path.is_absolute():
138
+ return bucket_config
141
139
 
140
+ cls._log.info(f"Resolving relative path for [{bucket_key}] local storage...")
141
+
142
+ sys_config_path = config_mgr.config_dir_path()
143
+ if sys_config_path is not None:
144
+ absolute_path = sys_config_path.joinpath(root_path).resolve()
142
145
  if absolute_path.exists():
143
146
  cls._log.info(f"Resolved [{root_path}] -> [{absolute_path}]")
144
147
  bucket_config.properties["rootPath"] = str(absolute_path)
145
- continue
148
+ return bucket_config
146
149
 
147
- msg = f"Failed to resolve relative storage path [{root_path}]"
148
- cls._log.error(msg)
149
- raise _ex.EConfigParse(msg)
150
+ cwd = pathlib.Path.cwd()
151
+ absolute_path = cwd.joinpath(root_path).resolve()
150
152
 
151
- sys_config = copy.copy(sys_config)
152
- sys_config.storage = storage_config
153
+ if absolute_path.exists():
154
+ cls._log.info(f"Resolved [{root_path}] -> [{absolute_path}]")
155
+ bucket_config.properties["rootPath"] = str(absolute_path)
156
+ return bucket_config
153
157
 
154
- return sys_config
158
+ msg = f"Failed to resolve relative storage path [{root_path}]"
159
+ cls._log.error(msg)
160
+ raise _ex.EConfigParse(msg)
155
161
 
156
162
  @classmethod
157
163
  def _add_job_resource(
@@ -188,6 +194,12 @@ class DevModeTranslator:
188
194
  elif job_config.job.importModel is not None:
189
195
  job_type = _meta.JobType.IMPORT_MODEL
190
196
 
197
+ elif job_config.job.importData is not None:
198
+ job_type = _meta.JobType.IMPORT_DATA
199
+
200
+ elif job_config.job.exportData is not None:
201
+ job_type = _meta.JobType.EXPORT_DATA
202
+
191
203
  else:
192
204
  cls._log.error("Could not infer job type")
193
205
  raise _ex.EConfigParse("Could not infer job type")
@@ -202,6 +214,26 @@ class DevModeTranslator:
202
214
 
203
215
  return job_config
204
216
 
217
+ @classmethod
218
+ def _get_job_detail(cls, job_config: _cfg.JobConfig):
219
+
220
+ if job_config.job.jobType == _meta.JobType.RUN_MODEL:
221
+ return job_config.job.runModel
222
+
223
+ if job_config.job.jobType == _meta.JobType.RUN_FLOW:
224
+ return job_config.job.runFlow
225
+
226
+ if job_config.job.jobType == _meta.JobType.IMPORT_MODEL:
227
+ return job_config.job.importModel
228
+
229
+ if job_config.job.jobType == _meta.JobType.IMPORT_DATA:
230
+ return job_config.job.importData
231
+
232
+ if job_config.job.jobType == _meta.JobType.EXPORT_DATA:
233
+ return job_config.job.exportData
234
+
235
+ raise _ex.EConfigParse(f"Could not get job details for job type [{job_config.job.jobType}]")
236
+
205
237
  @classmethod
206
238
  def _process_models(
207
239
  cls,
@@ -214,41 +246,39 @@ class DevModeTranslator:
214
246
  model_loader = _models.ModelLoader(sys_config, scratch_dir)
215
247
  model_loader.create_scope("DEV_MODE_TRANSLATION")
216
248
 
217
- original_config = job_config
249
+ # This processing works on the assumption that job details follow a convention for addressing models
250
+ # Jobs requiring a single model have a field called "model"
251
+ # Jobs requiring multiple models have a field called "models@, which is a dict
218
252
 
219
- job_config = copy.copy(job_config)
220
- job_config.job = copy.copy(job_config.job)
221
- job_config.resources = copy.copy(job_config.resources)
253
+ job_detail = cls._get_job_detail(job_config)
222
254
 
223
- if job_config.job.jobType == _meta.JobType.RUN_MODEL:
255
+ # If a model class is supplied in code, use that to generate the model def
256
+ if model_class is not None:
224
257
 
225
- job_config.job.runModel = copy.copy(job_config.job.runModel)
258
+ # Passing a model class via launch_model() is only supported for job types with a single model
259
+ if not hasattr(job_detail, "model"):
260
+ raise _ex.EJobValidation(f"Job type [{job_config.job.jobType}] cannot be launched using launch_model()")
226
261
 
227
- # If a model class is supplied in code, use that to generate the model def
228
- if model_class is not None:
229
- model_id, model_obj = cls._generate_model_for_class(model_loader, model_class)
230
- job_config = cls._add_job_resource(job_config, model_id, model_obj)
231
- job_config.job.runModel.model = _util.selector_for(model_id)
232
-
233
- # Otherwise if model specified as a string instead of a selector, apply the translation
234
- elif isinstance(original_config.job.runModel.model, str):
235
- model_detail = original_config.job.runModel.model
236
- model_id, model_obj = cls._generate_model_for_entry_point(model_loader, model_detail) # noqa
237
- job_config = cls._add_job_resource(job_config, model_id, model_obj)
238
- job_config.job.runModel.model = _util.selector_for(model_id)
239
-
240
- if job_config.job.jobType == _meta.JobType.RUN_FLOW:
262
+ model_id, model_obj = cls._generate_model_for_class(model_loader, model_class)
263
+ job_detail.model = _util.selector_for(model_id)
264
+ job_config = cls._add_job_resource(job_config, model_id, model_obj)
241
265
 
242
- job_config.job.runFlow = copy.copy(job_config.job.runFlow)
243
- job_config.job.runFlow.models = copy.copy(job_config.job.runFlow.models)
266
+ # Otherwise look for models specified as a single string, and take that as the entry point
267
+ else:
244
268
 
245
- for model_key, model_detail in original_config.job.runFlow.models.items():
269
+ # Jobs with a single model
270
+ if hasattr(job_detail, "model") and isinstance(job_detail.model, str):
271
+ model_id, model_obj = cls._generate_model_for_entry_point(model_loader, job_detail.model) # noqa
272
+ job_detail.model = _util.selector_for(model_id)
273
+ job_config = cls._add_job_resource(job_config, model_id, model_obj)
246
274
 
247
- # Only apply translation if the model is specified as a string instead of a selector
248
- if isinstance(model_detail, str):
249
- model_id, model_obj = cls._generate_model_for_entry_point(model_loader, model_detail)
250
- job_config = cls._add_job_resource(job_config, model_id, model_obj)
251
- job_config.job.runFlow.models[model_key] = _util.selector_for(model_id)
275
+ # Jobs with multiple modlels
276
+ elif hasattr(job_detail, "models") and isinstance(job_detail.models, dict):
277
+ for model_key, model_detail in job_detail.models.items():
278
+ if isinstance(model_detail, str):
279
+ model_id, model_obj = cls._generate_model_for_entry_point(model_loader, model_detail)
280
+ job_detail.models[model_key] = _util.selector_for(model_id)
281
+ job_config = cls._add_job_resource(job_config, model_id, model_obj)
252
282
 
253
283
  model_loader.destroy_scope("DEV_MODE_TRANSLATION")
254
284
 
@@ -596,35 +626,34 @@ class DevModeTranslator:
596
626
  @classmethod
597
627
  def _process_parameters(cls, job_config: _cfg.JobConfig) -> _cfg.JobConfig:
598
628
 
599
- if job_config.job.jobType == _meta.JobType.RUN_MODEL:
600
-
601
- job_details = job_config.job.runModel
602
- model_key = _util.object_key(job_details.model)
603
- model_or_flow = job_config.resources[model_key].model
629
+ # This relies on convention for naming properties across similar job types
604
630
 
605
- elif job_config.job.jobType == _meta.JobType.RUN_FLOW:
631
+ job_detail = cls._get_job_detail(job_config)
606
632
 
607
- job_details = job_config.job.runFlow
608
- flow_key = _util.object_key(job_details.flow)
633
+ if hasattr(job_detail, "model"):
634
+ model_key = _util.object_key(job_detail.model)
635
+ model_or_flow = job_config.resources[model_key].model
636
+ elif hasattr(job_detail, "flow"):
637
+ flow_key = _util.object_key(job_detail.flow)
609
638
  model_or_flow = job_config.resources[flow_key].flow
610
-
611
639
  else:
612
- raise _ex.EUnexpected()
640
+ model_or_flow = None
641
+
642
+ if model_or_flow is not None:
613
643
 
614
- param_specs = model_or_flow.parameters
615
- param_values = job_details.parameters
644
+ param_specs = model_or_flow.parameters
645
+ raw_values = job_detail.parameters
616
646
 
617
- # Set encoded params on runModel or runFlow depending on the job type
618
- job_details.parameters = cls._process_parameters_dict(param_specs, param_values)
647
+ job_detail.parameters = cls._process_parameters_dict(param_specs, raw_values)
619
648
 
620
649
  return job_config
621
650
 
622
651
  @classmethod
623
652
  def _process_parameters_dict(
624
653
  cls, param_specs: tp.Dict[str, _meta.ModelParameter],
625
- param_values: tp.Dict[str, _meta.Value]) -> tp.Dict[str, _meta.Value]:
654
+ raw_values: tp.Dict[str, _meta.Value]) -> tp.Dict[str, _meta.Value]:
626
655
 
627
- unknown_params = list(filter(lambda p: p not in param_specs, param_values))
656
+ unknown_params = list(filter(lambda p: p not in param_specs, raw_values))
628
657
 
629
658
  if any(unknown_params):
630
659
  msg = f"Unknown parameters cannot be translated: [{', '.join(unknown_params)}]"
@@ -633,7 +662,7 @@ class DevModeTranslator:
633
662
 
634
663
  encoded_values = dict()
635
664
 
636
- for p_name, p_value in param_values.items():
665
+ for p_name, p_value in raw_values.items():
637
666
 
638
667
  if isinstance(p_value, _meta.Value):
639
668
  encoded_values[p_name] = p_value
@@ -651,23 +680,23 @@ class DevModeTranslator:
651
680
  @classmethod
652
681
  def _process_inputs_and_outputs(cls, sys_config: _cfg.RuntimeConfig, job_config: _cfg.JobConfig) -> _cfg.JobConfig:
653
682
 
654
- if job_config.job.jobType == _meta.JobType.RUN_MODEL:
655
- job_details = job_config.job.runModel
656
- model_obj = _util.get_job_resource(job_details.model, job_config)
683
+ job_detail = cls._get_job_detail(job_config)
684
+
685
+ if hasattr(job_detail, "model"):
686
+ model_obj = _util.get_job_resource(job_detail.model, job_config)
657
687
  required_inputs = model_obj.model.inputs
658
688
  required_outputs = model_obj.model.outputs
659
689
 
660
- elif job_config.job.jobType == _meta.JobType.RUN_FLOW:
661
- job_details = job_config.job.runFlow
662
- flow_obj = _util.get_job_resource(job_details.flow, job_config)
690
+ elif hasattr(job_detail, "flow"):
691
+ flow_obj = _util.get_job_resource(job_detail.flow, job_config)
663
692
  required_inputs = flow_obj.flow.inputs
664
693
  required_outputs = flow_obj.flow.outputs
665
694
 
666
695
  else:
667
696
  return job_config
668
697
 
669
- job_inputs = job_details.inputs
670
- job_outputs = job_details.outputs
698
+ job_inputs = job_detail.inputs
699
+ job_outputs = job_detail.outputs
671
700
  job_resources = job_config.resources
672
701
 
673
702
  for input_key, input_value in job_inputs.items():
@@ -12,8 +12,6 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- from __future__ import annotations
16
-
17
15
  import copy as cp
18
16
  import dataclasses as dc
19
17
  import enum
@@ -272,12 +270,13 @@ class JobProcessor(_actors.Actor):
272
270
  self.result_spec = result_spec
273
271
  self._models = models
274
272
  self._storage = storage
273
+ self._resolver = _func.FunctionResolver(models, storage)
275
274
  self._log = _util.logger_for_object(self)
276
275
 
277
276
  def on_start(self):
278
277
  self._log.info(f"Starting job [{self.job_key}]")
279
278
  self._models.create_scope(self.job_key)
280
- self.actors().spawn(GraphBuilder(self.job_config, self.result_spec, self._models, self._storage))
279
+ self.actors().spawn(GraphBuilder(self.job_config, self.result_spec, self._resolver))
281
280
 
282
281
  def on_stop(self):
283
282
  self._log.info(f"Cleaning up job [{self.job_key}]")
@@ -305,7 +304,7 @@ class JobProcessor(_actors.Actor):
305
304
 
306
305
  @_actors.Message
307
306
  def job_graph(self, graph: _EngineContext, root_id: NodeId):
308
- self.actors().spawn(GraphProcessor(graph, root_id))
307
+ self.actors().spawn(GraphProcessor(graph, root_id, self._resolver))
309
308
  self.actors().stop(self.actors().sender)
310
309
 
311
310
  @_actors.Message
@@ -331,15 +330,14 @@ class GraphBuilder(_actors.Actor):
331
330
  def __init__(
332
331
  self, job_config: _cfg.JobConfig,
333
332
  result_spec: _graph.JobResultSpec,
334
- models: _models.ModelLoader,
335
- storage: _storage.StorageManager):
333
+ resolver: _func.FunctionResolver):
336
334
 
337
335
  super().__init__()
338
336
  self.job_config = job_config
339
337
  self.result_spec = result_spec
340
338
  self.graph: tp.Optional[_EngineContext] = None
341
339
 
342
- self._resolver = _func.FunctionResolver(models, storage)
340
+ self._resolver = resolver
343
341
  self._log = _util.logger_for_object(self)
344
342
 
345
343
  def on_start(self):
@@ -378,11 +376,12 @@ class GraphProcessor(_actors.Actor):
378
376
  Once all running nodes are stopped, an error is reported to the parent
379
377
  """
380
378
 
381
- def __init__(self, graph: _EngineContext, root_id: NodeId):
379
+ def __init__(self, graph: _EngineContext, root_id: NodeId, resolver: _func.FunctionResolver):
382
380
  super().__init__()
383
381
  self.graph = graph
384
382
  self.root_id = root_id
385
383
  self.processors: tp.Dict[NodeId, _actors.ActorId] = dict()
384
+ self._resolver = resolver
386
385
  self._log = _util.logger_for_object(self)
387
386
 
388
387
  def on_start(self):
@@ -463,6 +462,62 @@ class GraphProcessor(_actors.Actor):
463
462
  # Job may have completed due to error propagation
464
463
  self.check_job_status(do_submit=False)
465
464
 
465
+ @_actors.Message
466
+ def update_graph(
467
+ self, requestor_id: NodeId,
468
+ new_nodes: tp.Dict[NodeId, _graph.Node],
469
+ new_deps: tp.Dict[NodeId, tp.List[_graph.Dependency]]):
470
+
471
+ new_graph = cp.copy(self.graph)
472
+ new_graph.nodes = cp.copy(new_graph.nodes)
473
+
474
+ # Attempt to insert a duplicate node is always an error
475
+ node_collision = list(filter(lambda nid: nid in self.graph.nodes, new_nodes))
476
+
477
+ # Only allow adding deps to pending nodes for now (adding deps to active nodes will require more work)
478
+ dep_collision = list(filter(lambda nid: nid not in self.graph.pending_nodes, new_deps))
479
+
480
+ dep_invalid = list(filter(
481
+ lambda dds: any(filter(lambda dd: dd.node_id not in new_nodes, dds)),
482
+ new_deps.values()))
483
+
484
+ if any(node_collision) or any(dep_collision) or any(dep_invalid):
485
+
486
+ self._log.error(f"Node collision during graph update (requested by {requestor_id})")
487
+ self._log.error(f"Duplicate node IDs: {node_collision or 'None'}")
488
+ self._log.error(f"Dependency updates for dead nodes: {dep_collision or 'None'}")
489
+ self._log.error(f"Dependencies added for existing nodes: {dep_invalid or 'None'}")
490
+
491
+ # Set an error on the node, and wait for it to complete normally
492
+ # The error will be picked up when the result is recorded
493
+ # If dependencies are added for an active node, more signalling will be needed
494
+ requestor = cp.copy(new_graph.nodes[requestor_id])
495
+ requestor.error = _ex.ETracInternal("Node collision during graph update")
496
+ new_graph.nodes[requestor_id] = requestor
497
+
498
+ return
499
+
500
+ new_graph.pending_nodes = cp.copy(new_graph.pending_nodes)
501
+
502
+ for node_id, node in new_nodes.items():
503
+ GraphLogger.log_node_add(node)
504
+ node_func = self._resolver.resolve_node(node)
505
+ new_node = _EngineNode(node, {}, function=node_func)
506
+ new_graph.nodes[node_id] = new_node
507
+ new_graph.pending_nodes.add(node_id)
508
+
509
+ for node_id, deps in new_deps.items():
510
+ engine_node = cp.copy(new_graph.nodes[node_id])
511
+ engine_node.dependencies = cp.copy(engine_node.dependencies)
512
+ for dep in deps:
513
+ GraphLogger.log_dependency_add(node_id, dep.node_id)
514
+ engine_node.dependencies[dep.node_id] = dep.dependency_type
515
+ new_graph.nodes[node_id] = engine_node
516
+
517
+ self.graph = new_graph
518
+
519
+ self.actors().send(self.actors().id, "submit_viable_nodes")
520
+
466
521
  @classmethod
467
522
  def _is_required_node(cls, node: _EngineNode, graph: _EngineContext):
468
523
 
@@ -654,8 +709,15 @@ class NodeProcessor(_actors.Actor):
654
709
 
655
710
  NodeLogger.log_node_start(self.node)
656
711
 
712
+ # Context contains only node states available when the context is set up
657
713
  ctx = NodeContextImpl(self.graph.nodes)
658
- result = self.node.function(ctx)
714
+
715
+ # Callback remains valid because it only lives inside the call stack for this message
716
+ callback = NodeCallbackImpl(self.actors(), self.node_id)
717
+
718
+ # Execute the node function
719
+ result = self.node.function(ctx, callback)
720
+
659
721
  self._check_result_type(result)
660
722
 
661
723
  NodeLogger.log_node_succeeded(self.node)
@@ -730,6 +792,37 @@ class DataNodeProcessor(NodeProcessor):
730
792
  super().__init__(graph, node_id, node)
731
793
 
732
794
 
795
+ class GraphLogger:
796
+
797
+ """
798
+ Log the activity of the GraphProcessor
799
+ """
800
+
801
+ _log = _util.logger_for_class(GraphProcessor)
802
+
803
+ @classmethod
804
+ def log_node_add(cls, node: _graph.Node):
805
+
806
+ node_name = node.id.name
807
+ namespace = node.id.namespace
808
+
809
+ cls._log.info(f"ADD {cls._func_type(node)} [{node_name}] / {namespace}")
810
+
811
+ @classmethod
812
+ def log_dependency_add(cls, node_id: NodeId, dep_id: NodeId):
813
+
814
+ if node_id.namespace == dep_id.namespace:
815
+ cls._log.info(f"ADD DEPENDENCY [{node_id.name}] -> [{dep_id.name}] / {node_id.namespace}")
816
+ else:
817
+ cls._log.info(f"ADD DEPENDENCY [{node_id.name}] / {node_id.namespace} -> [{dep_id.name}] / {dep_id.namespace}")
818
+
819
+ @classmethod
820
+ def _func_type(cls, node: _graph.Node):
821
+
822
+ func_type = type(node)
823
+ return func_type.__name__[:-4]
824
+
825
+
733
826
  class NodeLogger:
734
827
 
735
828
  """
@@ -912,3 +1005,21 @@ class NodeContextImpl(_func.NodeContext):
912
1005
  for node_id, node in self.__nodes.items():
913
1006
  if node.complete and not node.error:
914
1007
  yield node_id, node.result
1008
+
1009
+
1010
+ class NodeCallbackImpl(_func.NodeCallback):
1011
+
1012
+ """
1013
+ Callback impl is passed to node functions so they can call into the engine
1014
+ It is only valid as long as the node function runs inside the call stack of a single message
1015
+ """
1016
+
1017
+ def __init__(self, actor_ctx: _actors.ActorContext, node_id: NodeId):
1018
+ self.__actor_ctx = actor_ctx
1019
+ self.__node_id = node_id
1020
+
1021
+ def send_graph_updates(
1022
+ self, new_nodes: tp.Dict[NodeId, _graph.Node],
1023
+ new_deps: tp.Dict[NodeId, tp.List[_graph.Dependency]]):
1024
+
1025
+ self.__actor_ctx.send_parent("update_graph", self.__node_id, new_nodes, new_deps)