digitalkin 0.3.2.dev2__py3-none-any.whl → 0.3.2.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- digitalkin/__version__.py +1 -1
- digitalkin/core/job_manager/single_job_manager.py +14 -8
- digitalkin/core/task_manager/task_session.py +60 -98
- digitalkin/grpc_servers/module_servicer.py +31 -7
- digitalkin/models/core/task_monitor.py +4 -0
- digitalkin/models/module/__init__.py +10 -2
- digitalkin/models/module/base_types.py +61 -0
- digitalkin/models/module/module_context.py +19 -1
- digitalkin/models/module/module_types.py +28 -392
- digitalkin/models/module/setup_types.py +463 -0
- digitalkin/models/module/tool_reference.py +105 -0
- digitalkin/models/module/utility.py +22 -1
- digitalkin/modules/_base_module.py +41 -3
- digitalkin/modules/triggers/__init__.py +0 -4
- digitalkin/services/services_config.py +4 -0
- digitalkin/services/user_profile/user_profile_strategy.py +0 -15
- {digitalkin-0.3.2.dev2.dist-info → digitalkin-0.3.2.dev3.dist-info}/METADATA +1 -1
- {digitalkin-0.3.2.dev2.dist-info → digitalkin-0.3.2.dev3.dist-info}/RECORD +21 -18
- {digitalkin-0.3.2.dev2.dist-info → digitalkin-0.3.2.dev3.dist-info}/WHEEL +0 -0
- {digitalkin-0.3.2.dev2.dist-info → digitalkin-0.3.2.dev3.dist-info}/licenses/LICENSE +0 -0
- {digitalkin-0.3.2.dev2.dist-info → digitalkin-0.3.2.dev3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,463 @@
|
|
|
1
|
+
"""Setup model types with dynamic schema resolution."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import copy
|
|
6
|
+
import types
|
|
7
|
+
import typing
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, get_args, get_origin
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, ConfigDict, create_model
|
|
11
|
+
|
|
12
|
+
from digitalkin.logger import logger
|
|
13
|
+
from digitalkin.models.module.tool_reference import ToolReference
|
|
14
|
+
from digitalkin.utils.dynamic_schema import (
|
|
15
|
+
DynamicField,
|
|
16
|
+
get_fetchers,
|
|
17
|
+
has_dynamic,
|
|
18
|
+
resolve_safe,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from pydantic.fields import FieldInfo
|
|
23
|
+
|
|
24
|
+
from digitalkin.services.registry import RegistryStrategy
|
|
25
|
+
|
|
26
|
+
SetupModelT = TypeVar("SetupModelT", bound="SetupModel")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class SetupModel(BaseModel, Generic[SetupModelT]):
|
|
30
|
+
"""Base definition of setup model showing mandatory root fields.
|
|
31
|
+
|
|
32
|
+
Optionally, the setup model can define a config option in json_schema_extra
|
|
33
|
+
to be used to initialize the Kin. Supports dynamic schema providers for
|
|
34
|
+
runtime value generation.
|
|
35
|
+
|
|
36
|
+
Attributes:
|
|
37
|
+
model_fields: Inherited from Pydantic BaseModel, contains field definitions.
|
|
38
|
+
|
|
39
|
+
See Also:
|
|
40
|
+
- Documentation: docs/api/dynamic_schema.md
|
|
41
|
+
- Tests: tests/modules/test_setup_model.py
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
@classmethod
|
|
45
|
+
async def get_clean_model(
|
|
46
|
+
cls,
|
|
47
|
+
*,
|
|
48
|
+
config_fields: bool,
|
|
49
|
+
hidden_fields: bool,
|
|
50
|
+
force: bool = False,
|
|
51
|
+
) -> type[SetupModelT]:
|
|
52
|
+
"""Dynamically builds and returns a new BaseModel subclass with filtered fields.
|
|
53
|
+
|
|
54
|
+
This method filters fields based on their `json_schema_extra` metadata:
|
|
55
|
+
- Fields with `{"config": True}` are included only when `config_fields=True`
|
|
56
|
+
- Fields with `{"hidden": True}` are included only when `hidden_fields=True`
|
|
57
|
+
|
|
58
|
+
When `force=True`, fields with dynamic schema providers will have their
|
|
59
|
+
providers called to fetch fresh values for schema metadata like enums.
|
|
60
|
+
This includes recursively processing nested BaseModel fields.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
config_fields: If True, include fields marked with `{"config": True}`.
|
|
64
|
+
These are typically initial configuration fields.
|
|
65
|
+
hidden_fields: If True, include fields marked with `{"hidden": True}`.
|
|
66
|
+
These are typically runtime-only fields not shown in initial config.
|
|
67
|
+
force: If True, refresh dynamic schema fields by calling their providers.
|
|
68
|
+
Use this when you need up-to-date values from external sources like
|
|
69
|
+
databases or APIs. Default is False for performance.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
A new BaseModel subclass with filtered fields.
|
|
73
|
+
"""
|
|
74
|
+
clean_fields: dict[str, Any] = {}
|
|
75
|
+
|
|
76
|
+
for name, field_info in cls.model_fields.items():
|
|
77
|
+
extra = field_info.json_schema_extra or {}
|
|
78
|
+
is_config = bool(extra.get("config", False)) if isinstance(extra, dict) else False
|
|
79
|
+
is_hidden = bool(extra.get("hidden", False)) if isinstance(extra, dict) else False
|
|
80
|
+
|
|
81
|
+
# Skip config unless explicitly included
|
|
82
|
+
if is_config and not config_fields:
|
|
83
|
+
logger.debug("Skipping '%s' (config-only)", name)
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
# Skip hidden unless explicitly included
|
|
87
|
+
if is_hidden and not hidden_fields:
|
|
88
|
+
logger.debug("Skipping '%s' (hidden-only)", name)
|
|
89
|
+
continue
|
|
90
|
+
|
|
91
|
+
# Refresh dynamic schema fields when force=True
|
|
92
|
+
current_field_info = field_info
|
|
93
|
+
current_annotation = field_info.annotation
|
|
94
|
+
|
|
95
|
+
if force:
|
|
96
|
+
# Check if this field has DynamicField metadata
|
|
97
|
+
if has_dynamic(field_info):
|
|
98
|
+
current_field_info = await cls._refresh_field_schema(name, field_info)
|
|
99
|
+
|
|
100
|
+
# Check if the annotation is a nested BaseModel that might have dynamic fields
|
|
101
|
+
nested_model = cls._get_base_model_type(current_annotation)
|
|
102
|
+
if nested_model is not None:
|
|
103
|
+
refreshed_nested = await cls._refresh_nested_model(nested_model)
|
|
104
|
+
if refreshed_nested is not nested_model:
|
|
105
|
+
# Update annotation to use refreshed nested model
|
|
106
|
+
current_annotation = refreshed_nested
|
|
107
|
+
# Create new field_info with updated annotation (deep copy for safety)
|
|
108
|
+
current_field_info = copy.deepcopy(current_field_info)
|
|
109
|
+
current_field_info.annotation = current_annotation
|
|
110
|
+
|
|
111
|
+
clean_fields[name] = (current_annotation, current_field_info)
|
|
112
|
+
|
|
113
|
+
# Dynamically create a model e.g. "SetupModel"
|
|
114
|
+
m = create_model(
|
|
115
|
+
f"{cls.__name__}",
|
|
116
|
+
__base__=BaseModel,
|
|
117
|
+
__config__=ConfigDict(arbitrary_types_allowed=True),
|
|
118
|
+
**clean_fields,
|
|
119
|
+
)
|
|
120
|
+
return cast("type[SetupModelT]", m)
|
|
121
|
+
|
|
122
|
+
@classmethod
|
|
123
|
+
def _get_base_model_type(cls, annotation: type | None) -> type[BaseModel] | None:
|
|
124
|
+
"""Extract BaseModel type from an annotation.
|
|
125
|
+
|
|
126
|
+
Handles direct types, Optional, Union, list, dict, set, tuple, and other generics.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
annotation: The type annotation to inspect.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
The BaseModel subclass if found, None otherwise.
|
|
133
|
+
"""
|
|
134
|
+
if annotation is None:
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
# Direct BaseModel subclass check
|
|
138
|
+
if isinstance(annotation, type) and issubclass(annotation, BaseModel):
|
|
139
|
+
return annotation
|
|
140
|
+
|
|
141
|
+
origin = get_origin(annotation)
|
|
142
|
+
if origin is None:
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
args = get_args(annotation)
|
|
146
|
+
return cls._extract_base_model_from_args(origin, args)
|
|
147
|
+
|
|
148
|
+
@classmethod
|
|
149
|
+
def _extract_base_model_from_args(
|
|
150
|
+
cls,
|
|
151
|
+
origin: type,
|
|
152
|
+
args: tuple[type, ...],
|
|
153
|
+
) -> type[BaseModel] | None:
|
|
154
|
+
"""Extract BaseModel from generic type arguments.
|
|
155
|
+
|
|
156
|
+
Args:
|
|
157
|
+
origin: The generic origin type (list, dict, Union, etc.).
|
|
158
|
+
args: The type arguments.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
The BaseModel subclass if found, None otherwise.
|
|
162
|
+
"""
|
|
163
|
+
# Union/Optional: check each arg (supports both typing.Union and types.UnionType)
|
|
164
|
+
# Python 3.10+ uses types.UnionType for X | Y syntax
|
|
165
|
+
if origin is typing.Union or origin is types.UnionType:
|
|
166
|
+
return cls._find_base_model_in_args(args)
|
|
167
|
+
|
|
168
|
+
# list, set, frozenset: check first arg
|
|
169
|
+
if origin in {list, set, frozenset} and args:
|
|
170
|
+
return cls._check_base_model(args[0])
|
|
171
|
+
|
|
172
|
+
# dict: check value type (second arg)
|
|
173
|
+
dict_value_index = 1
|
|
174
|
+
if origin is dict and len(args) > dict_value_index:
|
|
175
|
+
return cls._check_base_model(args[dict_value_index])
|
|
176
|
+
|
|
177
|
+
# tuple: check first non-ellipsis arg
|
|
178
|
+
if origin is tuple:
|
|
179
|
+
return cls._find_base_model_in_args(args, skip_ellipsis=True)
|
|
180
|
+
|
|
181
|
+
return None
|
|
182
|
+
|
|
183
|
+
@classmethod
|
|
184
|
+
def _check_base_model(cls, arg: type) -> type[BaseModel] | None:
|
|
185
|
+
"""Check if arg is a BaseModel subclass.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
The BaseModel subclass if arg is one, None otherwise.
|
|
189
|
+
"""
|
|
190
|
+
if isinstance(arg, type) and issubclass(arg, BaseModel):
|
|
191
|
+
return arg
|
|
192
|
+
return None
|
|
193
|
+
|
|
194
|
+
@classmethod
|
|
195
|
+
def _find_base_model_in_args(
|
|
196
|
+
cls,
|
|
197
|
+
args: tuple[type, ...],
|
|
198
|
+
*,
|
|
199
|
+
skip_ellipsis: bool = False,
|
|
200
|
+
) -> type[BaseModel] | None:
|
|
201
|
+
"""Find first BaseModel in args.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
The first BaseModel subclass found, None otherwise.
|
|
205
|
+
"""
|
|
206
|
+
for arg in args:
|
|
207
|
+
if arg is type(None):
|
|
208
|
+
continue
|
|
209
|
+
if skip_ellipsis and arg is ...:
|
|
210
|
+
continue
|
|
211
|
+
result = cls._check_base_model(arg)
|
|
212
|
+
if result is not None:
|
|
213
|
+
return result
|
|
214
|
+
return None
|
|
215
|
+
|
|
216
|
+
@classmethod
|
|
217
|
+
async def _refresh_nested_model(cls, model_cls: type[BaseModel]) -> type[BaseModel]:
|
|
218
|
+
"""Refresh dynamic fields in a nested BaseModel.
|
|
219
|
+
|
|
220
|
+
Creates a new model class with all DynamicField metadata resolved.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
model_cls: The nested model class to refresh.
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
A new model class with refreshed fields, or the original if no changes.
|
|
227
|
+
"""
|
|
228
|
+
has_changes = False
|
|
229
|
+
clean_fields: dict[str, Any] = {}
|
|
230
|
+
|
|
231
|
+
for name, field_info in model_cls.model_fields.items():
|
|
232
|
+
current_field_info = field_info
|
|
233
|
+
current_annotation = field_info.annotation
|
|
234
|
+
|
|
235
|
+
# Check if field has DynamicField metadata
|
|
236
|
+
if has_dynamic(field_info):
|
|
237
|
+
current_field_info = await cls._refresh_field_schema(name, field_info)
|
|
238
|
+
has_changes = True
|
|
239
|
+
|
|
240
|
+
# Recursively check nested models
|
|
241
|
+
nested_model = cls._get_base_model_type(current_annotation)
|
|
242
|
+
if nested_model is not None:
|
|
243
|
+
refreshed_nested = await cls._refresh_nested_model(nested_model)
|
|
244
|
+
if refreshed_nested is not nested_model:
|
|
245
|
+
current_annotation = refreshed_nested
|
|
246
|
+
current_field_info = copy.deepcopy(current_field_info)
|
|
247
|
+
current_field_info.annotation = current_annotation
|
|
248
|
+
has_changes = True
|
|
249
|
+
|
|
250
|
+
clean_fields[name] = (current_annotation, current_field_info)
|
|
251
|
+
|
|
252
|
+
if not has_changes:
|
|
253
|
+
return model_cls
|
|
254
|
+
|
|
255
|
+
# Create new model with refreshed fields
|
|
256
|
+
logger.debug("Creating refreshed nested model for '%s'", model_cls.__name__)
|
|
257
|
+
return create_model(
|
|
258
|
+
model_cls.__name__,
|
|
259
|
+
__base__=BaseModel,
|
|
260
|
+
__config__=ConfigDict(arbitrary_types_allowed=True),
|
|
261
|
+
**clean_fields,
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
@classmethod
|
|
265
|
+
async def _refresh_field_schema(cls, field_name: str, field_info: FieldInfo) -> FieldInfo:
|
|
266
|
+
"""Refresh a field's json_schema_extra with fresh values from dynamic providers.
|
|
267
|
+
|
|
268
|
+
This method calls all dynamic providers registered for a field (via Annotated
|
|
269
|
+
metadata) and creates a new FieldInfo with the resolved values. The original
|
|
270
|
+
field_info is not modified.
|
|
271
|
+
|
|
272
|
+
Uses `resolve_safe()` for structured error handling, allowing partial success
|
|
273
|
+
when some fetchers fail. Successfully resolved values are still applied.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
field_name: The name of the field being refreshed (used for logging).
|
|
277
|
+
field_info: The original FieldInfo object containing the dynamic providers.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
A new FieldInfo object with the same attributes as the original, but with
|
|
281
|
+
`json_schema_extra` containing resolved values and Dynamic metadata removed.
|
|
282
|
+
|
|
283
|
+
Note:
|
|
284
|
+
If all fetchers fail, the original field_info is returned unchanged.
|
|
285
|
+
If some fetchers fail, successfully resolved values are still applied.
|
|
286
|
+
"""
|
|
287
|
+
fetchers = get_fetchers(field_info)
|
|
288
|
+
|
|
289
|
+
if not fetchers:
|
|
290
|
+
return field_info
|
|
291
|
+
|
|
292
|
+
fetcher_keys = list(fetchers.keys())
|
|
293
|
+
logger.debug(
|
|
294
|
+
"Refreshing dynamic schema for field '%s' with fetchers: %s",
|
|
295
|
+
field_name,
|
|
296
|
+
fetcher_keys,
|
|
297
|
+
extra={"field_name": field_name, "fetcher_keys": fetcher_keys},
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
# Resolve all fetchers with structured error handling
|
|
301
|
+
result = await resolve_safe(fetchers)
|
|
302
|
+
|
|
303
|
+
# Log any errors that occurred with full details
|
|
304
|
+
if result.errors:
|
|
305
|
+
for key, error in result.errors.items():
|
|
306
|
+
logger.warning(
|
|
307
|
+
"Failed to resolve '%s' for field '%s': %s: %s",
|
|
308
|
+
key,
|
|
309
|
+
field_name,
|
|
310
|
+
type(error).__name__,
|
|
311
|
+
str(error) or "(no message)",
|
|
312
|
+
extra={
|
|
313
|
+
"field_name": field_name,
|
|
314
|
+
"fetcher_key": key,
|
|
315
|
+
"error_type": type(error).__name__,
|
|
316
|
+
"error_message": str(error),
|
|
317
|
+
"error_repr": repr(error),
|
|
318
|
+
},
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
# If no values were resolved, return original field_info
|
|
322
|
+
if not result.values:
|
|
323
|
+
logger.warning(
|
|
324
|
+
"All fetchers failed for field '%s', keeping original",
|
|
325
|
+
field_name,
|
|
326
|
+
)
|
|
327
|
+
return field_info
|
|
328
|
+
|
|
329
|
+
# Build new json_schema_extra with resolved values merged
|
|
330
|
+
extra = field_info.json_schema_extra or {}
|
|
331
|
+
new_extra = {**extra, **result.values} if isinstance(extra, dict) else result.values
|
|
332
|
+
|
|
333
|
+
# Create a deep copy of the FieldInfo to avoid shared mutable state
|
|
334
|
+
new_field_info = copy.deepcopy(field_info)
|
|
335
|
+
new_field_info.json_schema_extra = new_extra
|
|
336
|
+
|
|
337
|
+
# Remove Dynamic from metadata (it's been resolved)
|
|
338
|
+
new_metadata = [m for m in new_field_info.metadata if not isinstance(m, DynamicField)]
|
|
339
|
+
new_field_info.metadata = new_metadata
|
|
340
|
+
|
|
341
|
+
logger.debug(
|
|
342
|
+
"Refreshed '%s' with dynamic values: %s",
|
|
343
|
+
field_name,
|
|
344
|
+
list(result.values.keys()),
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
return new_field_info
|
|
348
|
+
|
|
349
|
+
def resolve_tool_references(self, registry: RegistryStrategy) -> None:
|
|
350
|
+
"""Resolve all ToolReference fields in this setup instance.
|
|
351
|
+
|
|
352
|
+
Recursively walks through all fields, including nested BaseModel instances,
|
|
353
|
+
and resolves any ToolReference fields using the provided registry.
|
|
354
|
+
|
|
355
|
+
Args:
|
|
356
|
+
registry: Registry service to use for resolution.
|
|
357
|
+
"""
|
|
358
|
+
self._resolve_tool_references_recursive(self, registry)
|
|
359
|
+
|
|
360
|
+
@classmethod
|
|
361
|
+
def _resolve_tool_references_recursive(
|
|
362
|
+
cls,
|
|
363
|
+
model_instance: BaseModel,
|
|
364
|
+
registry: RegistryStrategy,
|
|
365
|
+
) -> None:
|
|
366
|
+
"""Recursively resolve ToolReference fields in a model instance.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
model_instance: The model instance to process.
|
|
370
|
+
registry: Registry service to use for resolution.
|
|
371
|
+
"""
|
|
372
|
+
for field_name, field_value in model_instance.__dict__.items():
|
|
373
|
+
if field_value is None:
|
|
374
|
+
continue
|
|
375
|
+
|
|
376
|
+
cls._resolve_field_value(field_name, field_value, registry)
|
|
377
|
+
|
|
378
|
+
@classmethod
|
|
379
|
+
def _resolve_field_value(
|
|
380
|
+
cls,
|
|
381
|
+
field_name: str,
|
|
382
|
+
field_value: BaseModel | ToolReference | list | dict,
|
|
383
|
+
registry: RegistryStrategy,
|
|
384
|
+
) -> None:
|
|
385
|
+
"""Resolve a single field value, handling different types.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
field_name: Name of the field for logging.
|
|
389
|
+
field_value: The value to process.
|
|
390
|
+
registry: Registry service to use for resolution.
|
|
391
|
+
"""
|
|
392
|
+
if isinstance(field_value, ToolReference):
|
|
393
|
+
cls._resolve_single_tool_reference(field_name, field_value, registry)
|
|
394
|
+
elif isinstance(field_value, BaseModel):
|
|
395
|
+
cls._resolve_tool_references_recursive(field_value, registry)
|
|
396
|
+
elif isinstance(field_value, list):
|
|
397
|
+
cls._resolve_list_items(field_value, registry)
|
|
398
|
+
elif isinstance(field_value, dict):
|
|
399
|
+
cls._resolve_dict_values(field_value, registry)
|
|
400
|
+
|
|
401
|
+
@classmethod
|
|
402
|
+
def _resolve_single_tool_reference(
|
|
403
|
+
cls,
|
|
404
|
+
field_name: str,
|
|
405
|
+
tool_ref: ToolReference,
|
|
406
|
+
registry: RegistryStrategy,
|
|
407
|
+
) -> None:
|
|
408
|
+
"""Resolve a single ToolReference instance.
|
|
409
|
+
|
|
410
|
+
Args:
|
|
411
|
+
field_name: Name of the field for logging.
|
|
412
|
+
tool_ref: The ToolReference instance.
|
|
413
|
+
registry: Registry service to use for resolution.
|
|
414
|
+
"""
|
|
415
|
+
try:
|
|
416
|
+
tool_ref.resolve(registry)
|
|
417
|
+
logger.debug(
|
|
418
|
+
"Resolved ToolReference field '%s'",
|
|
419
|
+
field_name,
|
|
420
|
+
extra={"field_name": field_name, "mode": tool_ref.config.mode.value},
|
|
421
|
+
)
|
|
422
|
+
except Exception:
|
|
423
|
+
logger.exception(
|
|
424
|
+
"Failed to resolve ToolReference field '%s'",
|
|
425
|
+
field_name,
|
|
426
|
+
extra={"field_name": field_name, "config": tool_ref.config.model_dump()},
|
|
427
|
+
)
|
|
428
|
+
|
|
429
|
+
@classmethod
|
|
430
|
+
def _resolve_list_items(
|
|
431
|
+
cls,
|
|
432
|
+
items: list,
|
|
433
|
+
registry: RegistryStrategy,
|
|
434
|
+
) -> None:
|
|
435
|
+
"""Resolve ToolReference instances in a list.
|
|
436
|
+
|
|
437
|
+
Args:
|
|
438
|
+
items: List of items to process.
|
|
439
|
+
registry: Registry service to use for resolution.
|
|
440
|
+
"""
|
|
441
|
+
for item in items:
|
|
442
|
+
if isinstance(item, ToolReference):
|
|
443
|
+
cls._resolve_single_tool_reference("list_item", item, registry)
|
|
444
|
+
elif isinstance(item, BaseModel):
|
|
445
|
+
cls._resolve_tool_references_recursive(item, registry)
|
|
446
|
+
|
|
447
|
+
@classmethod
|
|
448
|
+
def _resolve_dict_values(
|
|
449
|
+
cls,
|
|
450
|
+
mapping: dict,
|
|
451
|
+
registry: RegistryStrategy,
|
|
452
|
+
) -> None:
|
|
453
|
+
"""Resolve ToolReference instances in a dict's values.
|
|
454
|
+
|
|
455
|
+
Args:
|
|
456
|
+
mapping: Dict to process.
|
|
457
|
+
registry: Registry service to use for resolution.
|
|
458
|
+
"""
|
|
459
|
+
for item in mapping.values():
|
|
460
|
+
if isinstance(item, ToolReference):
|
|
461
|
+
cls._resolve_single_tool_reference("dict_value", item, registry)
|
|
462
|
+
elif isinstance(item, BaseModel):
|
|
463
|
+
cls._resolve_tool_references_recursive(item, registry)
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Tool reference types for archetype module configuration."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
|
7
|
+
|
|
8
|
+
from digitalkin.models.services.registry import ModuleInfo
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from digitalkin.services.registry import RegistryStrategy
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ToolSelectionMode(str, Enum):
|
|
15
|
+
"""Mode for tool selection in archetype setup."""
|
|
16
|
+
|
|
17
|
+
FIXED = "fixed"
|
|
18
|
+
TAG = "tag"
|
|
19
|
+
DISCOVERABLE = "discoverable"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ToolReferenceConfig(BaseModel):
|
|
23
|
+
"""Configuration for how a tool should be selected."""
|
|
24
|
+
|
|
25
|
+
mode: ToolSelectionMode = Field(default=ToolSelectionMode.FIXED)
|
|
26
|
+
fixed_id: str | None = Field(default=None, description="Module ID for FIXED mode")
|
|
27
|
+
tag: str | None = Field(default=None, description="Search tag for TAG mode")
|
|
28
|
+
organization_id: str | None = Field(default=None, description="Filter by organization")
|
|
29
|
+
|
|
30
|
+
@model_validator(mode="after")
|
|
31
|
+
def validate_config(self) -> "ToolReferenceConfig":
|
|
32
|
+
"""Validate configuration based on mode.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Self if validation passes.
|
|
36
|
+
|
|
37
|
+
Raises:
|
|
38
|
+
ValueError: If required field is missing for the mode.
|
|
39
|
+
"""
|
|
40
|
+
if self.mode == ToolSelectionMode.FIXED and not self.fixed_id:
|
|
41
|
+
msg = "fixed_id required when mode is FIXED"
|
|
42
|
+
raise ValueError(msg)
|
|
43
|
+
if self.mode == ToolSelectionMode.TAG and not self.tag:
|
|
44
|
+
msg = "tag required when mode is TAG"
|
|
45
|
+
raise ValueError(msg)
|
|
46
|
+
return self
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ToolReference(BaseModel):
|
|
50
|
+
"""Reference to a tool module for archetype configuration.
|
|
51
|
+
|
|
52
|
+
Frontend sets config, backend resolves to actual ModuleInfo.
|
|
53
|
+
The resolved module_id is persisted in selected_module_id for
|
|
54
|
+
subsequent StartModule calls without re-resolution.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
config: ToolReferenceConfig
|
|
58
|
+
selected_module_id: str | None = Field(default=None, description="Resolved module ID after resolution")
|
|
59
|
+
_cached_info: ModuleInfo | None = PrivateAttr(default=None)
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def module_info(self) -> ModuleInfo | None:
|
|
63
|
+
"""Get cached ModuleInfo if resolved."""
|
|
64
|
+
return self._cached_info
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def is_resolved(self) -> bool:
|
|
68
|
+
"""Check if this reference has been resolved."""
|
|
69
|
+
return self._cached_info is not None or self.selected_module_id is not None
|
|
70
|
+
|
|
71
|
+
def resolve(self, registry: "RegistryStrategy") -> ModuleInfo | None:
|
|
72
|
+
"""Resolve this reference using the provided registry.
|
|
73
|
+
|
|
74
|
+
For FIXED mode, looks up by fixed_id.
|
|
75
|
+
For TAG mode, searches by tag and takes first result.
|
|
76
|
+
For DISCOVERABLE mode, returns None (LLM handles at runtime).
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
registry: Registry service to use for resolution.
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
ModuleInfo if resolved, None if not resolvable or DISCOVERABLE mode.
|
|
83
|
+
"""
|
|
84
|
+
if self.config.mode == ToolSelectionMode.DISCOVERABLE:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
if self.config.mode == ToolSelectionMode.FIXED and self.config.fixed_id:
|
|
88
|
+
info = registry.discover_by_id(self.config.fixed_id)
|
|
89
|
+
if info:
|
|
90
|
+
self._cached_info = info
|
|
91
|
+
self.selected_module_id = self.config.fixed_id
|
|
92
|
+
return info
|
|
93
|
+
|
|
94
|
+
if self.config.mode == ToolSelectionMode.TAG and self.config.tag:
|
|
95
|
+
results = registry.search(
|
|
96
|
+
name=self.config.tag,
|
|
97
|
+
module_type="tool",
|
|
98
|
+
organization_id=self.config.organization_id,
|
|
99
|
+
)
|
|
100
|
+
if results:
|
|
101
|
+
self._cached_info = results[0]
|
|
102
|
+
self.selected_module_id = results[0].module_id
|
|
103
|
+
return results[0]
|
|
104
|
+
|
|
105
|
+
return None
|
|
@@ -4,11 +4,12 @@ These protocols are automatically available to all modules and don't need to be
|
|
|
4
4
|
explicitly included in module output unions.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
from datetime import datetime, timezone
|
|
7
8
|
from typing import Any, ClassVar, Literal
|
|
8
9
|
|
|
9
10
|
from pydantic import BaseModel, Field
|
|
10
11
|
|
|
11
|
-
from digitalkin.models.module.
|
|
12
|
+
from digitalkin.models.module.base_types import DataTrigger
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
class UtilityProtocol(DataTrigger):
|
|
@@ -27,6 +28,26 @@ class EndOfStreamOutput(UtilityProtocol):
|
|
|
27
28
|
protocol: Literal["end_of_stream"] = "end_of_stream" # type: ignore
|
|
28
29
|
|
|
29
30
|
|
|
31
|
+
class ModuleStartInfoOutput(UtilityProtocol):
|
|
32
|
+
"""Output sent when module starts with execution context.
|
|
33
|
+
|
|
34
|
+
This protocol is sent as the first message when a module starts,
|
|
35
|
+
providing the client with essential execution context information.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
protocol: Literal["module_start_info"] = "module_start_info" # type: ignore
|
|
39
|
+
job_id: str = Field(..., description="Unique job identifier")
|
|
40
|
+
mission_id: str = Field(..., description="Mission identifier")
|
|
41
|
+
setup_id: str = Field(..., description="Setup identifier")
|
|
42
|
+
setup_version_id: str = Field(..., description="Setup version identifier")
|
|
43
|
+
module_id: str = Field(..., description="Module identifier")
|
|
44
|
+
module_name: str = Field(..., description="Human-readable module name")
|
|
45
|
+
started_at: str = Field(
|
|
46
|
+
default_factory=lambda: datetime.now(tz=timezone.utc).isoformat(),
|
|
47
|
+
description="ISO timestamp when module started",
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
30
51
|
class HealthcheckPingInput(UtilityProtocol):
|
|
31
52
|
"""Input for healthcheck ping request."""
|
|
32
53
|
|
|
@@ -18,7 +18,9 @@ from digitalkin.models.module.module_types import (
|
|
|
18
18
|
SecretModelT,
|
|
19
19
|
SetupModelT,
|
|
20
20
|
)
|
|
21
|
-
from digitalkin.models.module.
|
|
21
|
+
from digitalkin.models.module.tool_reference import ToolReference
|
|
22
|
+
from digitalkin.models.module.utility import EndOfStreamOutput, ModuleStartInfoOutput
|
|
23
|
+
from digitalkin.models.services.registry import ModuleInfo
|
|
22
24
|
from digitalkin.models.services.storage import BaseRole
|
|
23
25
|
from digitalkin.modules.trigger_handler import TriggerHandler
|
|
24
26
|
from digitalkin.services.services_config import ServicesConfig, ServicesStrategy
|
|
@@ -76,6 +78,7 @@ class BaseModule( # noqa: PLR0904
|
|
|
76
78
|
registry: RegistryStrategy
|
|
77
79
|
snapshot: SnapshotStrategy
|
|
78
80
|
storage: StorageStrategy
|
|
81
|
+
user_profile: UserProfileStrategy
|
|
79
82
|
"""
|
|
80
83
|
logger.debug("Service initialisation: %s", self.services_config_strategies.keys())
|
|
81
84
|
return {
|
|
@@ -436,6 +439,22 @@ class BaseModule( # noqa: PLR0904
|
|
|
436
439
|
else:
|
|
437
440
|
self._status = ModuleStatus.STOPPING
|
|
438
441
|
|
|
442
|
+
@staticmethod
|
|
443
|
+
def _extract_tool_cache(setup_data: SetupModelT) -> dict[str, ModuleInfo]:
|
|
444
|
+
"""Extract resolved ToolReference info from setup data.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
setup_data: The setup model containing potential ToolReference fields.
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
Dict mapping field names to resolved ModuleInfo.
|
|
451
|
+
"""
|
|
452
|
+
cache: dict[str, ModuleInfo] = {}
|
|
453
|
+
for name, value in setup_data.__dict__.items():
|
|
454
|
+
if isinstance(value, ToolReference) and value.module_info:
|
|
455
|
+
cache[name] = value.module_info
|
|
456
|
+
return cache
|
|
457
|
+
|
|
439
458
|
async def start(
|
|
440
459
|
self,
|
|
441
460
|
input_data: InputModelT,
|
|
@@ -446,7 +465,26 @@ class BaseModule( # noqa: PLR0904
|
|
|
446
465
|
"""Start the module."""
|
|
447
466
|
try:
|
|
448
467
|
self.context.callbacks.send_message = callback
|
|
449
|
-
|
|
468
|
+
|
|
469
|
+
# Populate tool cache from resolved ToolReference fields
|
|
470
|
+
self.context.tool_cache = self._extract_tool_cache(setup_data)
|
|
471
|
+
|
|
472
|
+
# Send module start info as first message
|
|
473
|
+
await callback(
|
|
474
|
+
DataModel(
|
|
475
|
+
root=ModuleStartInfoOutput(
|
|
476
|
+
job_id=self.context.session.job_id,
|
|
477
|
+
mission_id=self.context.session.mission_id,
|
|
478
|
+
setup_id=self.context.session.setup_id,
|
|
479
|
+
setup_version_id=self.context.session.setup_version_id,
|
|
480
|
+
module_id=self.get_module_id(),
|
|
481
|
+
module_name=self.name,
|
|
482
|
+
),
|
|
483
|
+
annotations={"role": BaseRole.SYSTEM},
|
|
484
|
+
)
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
logger.info("Initialize module %s", self.context.session.job_id)
|
|
450
488
|
await self.initialize(self.context, setup_data)
|
|
451
489
|
except Exception as e:
|
|
452
490
|
self._status = ModuleStatus.FAILED
|
|
@@ -467,7 +505,7 @@ class BaseModule( # noqa: PLR0904
|
|
|
467
505
|
try:
|
|
468
506
|
logger.debug("Init the discovered input handlers.")
|
|
469
507
|
self.triggers_discoverer.init_handlers(self.context)
|
|
470
|
-
logger.debug(
|
|
508
|
+
logger.debug("Run lifecycle %s", self.context.session.job_id)
|
|
471
509
|
await self._run_lifecycle(input_data, setup_data)
|
|
472
510
|
except Exception:
|
|
473
511
|
self._status = ModuleStatus.FAILED
|
|
@@ -6,7 +6,3 @@ They provide standard functionality available to all modules.
|
|
|
6
6
|
Note: These are internal triggers. External code should not import them directly.
|
|
7
7
|
Use UtilityRegistry.get_builtin_triggers() to access the trigger classes.
|
|
8
8
|
"""
|
|
9
|
-
|
|
10
|
-
# No public exports - all triggers are internal
|
|
11
|
-
# Access via: UtilityRegistry.get_builtin_triggers()
|
|
12
|
-
__all__: list[str] = []
|