digitalkin 0.3.2.dev2__py3-none-any.whl → 0.3.2.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
digitalkin/__version__.py CHANGED
@@ -5,4 +5,4 @@ from importlib.metadata import PackageNotFoundError, version
5
5
  try:
6
6
  __version__ = version("digitalkin")
7
7
  except PackageNotFoundError:
8
- __version__ = "0.3.2.dev2"
8
+ __version__ = "0.3.2.dev4"
@@ -15,8 +15,8 @@ from digitalkin.core.task_manager.local_task_manager import LocalTaskManager
15
15
  from digitalkin.core.task_manager.task_session import TaskSession
16
16
  from digitalkin.logger import logger
17
17
  from digitalkin.models.core.task_monitor import TaskStatus
18
+ from digitalkin.models.module.base_types import InputModelT, OutputModelT, SetupModelT
18
19
  from digitalkin.models.module.module import ModuleCodeModel
19
- from digitalkin.models.module.module_types import InputModelT, OutputModelT, SetupModelT
20
20
  from digitalkin.modules._base_module import BaseModule
21
21
  from digitalkin.services.services_models import ServicesMode
22
22
 
@@ -86,7 +86,10 @@ class SingleJobManager(BaseJobManager[InputModelT, OutputModelT, SetupModelT]):
86
86
  message=f"Module {job_id} did not respond within 30 seconds",
87
87
  )
88
88
  finally:
89
- logger.info(f"{job_id=}: {session.queue.empty()}")
89
+ logger.debug(
90
+ "Config setup response retrieved",
91
+ extra={"job_id": job_id, "queue_empty": session.queue.empty()},
92
+ )
90
93
 
91
94
  async def create_config_setup_instance_job(
92
95
  self,
@@ -126,7 +129,7 @@ class SingleJobManager(BaseJobManager[InputModelT, OutputModelT, SetupModelT]):
126
129
  except Exception:
127
130
  # Remove the module from the manager in case of an error.
128
131
  del self.tasks_sessions[job_id]
129
- logger.exception("Failed to start module %s: %s", job_id)
132
+ logger.exception("Failed to start module", extra={"job_id": job_id})
130
133
  raise
131
134
  else:
132
135
  return job_id
@@ -140,7 +143,8 @@ class SingleJobManager(BaseJobManager[InputModelT, OutputModelT, SetupModelT]):
140
143
  job_id: The unique identifier of the job.
141
144
  output_data: The output data produced by the job.
142
145
  """
143
- await self.tasks_sessions[job_id].queue.put(output_data.model_dump())
146
+ session = self.tasks_sessions[job_id]
147
+ await session.queue.put(output_data.model_dump())
144
148
 
145
149
  @asynccontextmanager # type: ignore
146
150
  async def generate_stream_consumer(self, job_id: str) -> AsyncIterator[AsyncGenerator[dict[str, Any], None]]: # type: ignore
@@ -259,6 +263,18 @@ class SingleJobManager(BaseJobManager[InputModelT, OutputModelT, SetupModelT]):
259
263
  logger.info("Managed task started: '%s'", job_id, extra={"task_id": job_id})
260
264
  return job_id
261
265
 
266
+ async def clean_session(self, task_id: str, mission_id: str) -> bool:
267
+ """Clean a task's session.
268
+
269
+ Args:
270
+ task_id: Unique identifier for the task.
271
+ mission_id: Mission identifier.
272
+
273
+ Returns:
274
+ bool: True if the task was successfully cleaned, False otherwise.
275
+ """
276
+ return await self._task_manager.clean_session(task_id, mission_id)
277
+
262
278
  async def stop_module(self, job_id: str) -> bool:
263
279
  """Stop a running module job.
264
280
 
@@ -271,20 +287,23 @@ class SingleJobManager(BaseJobManager[InputModelT, OutputModelT, SetupModelT]):
271
287
  Raises:
272
288
  Exception: If an error occurs while stopping the module.
273
289
  """
274
- logger.info(f"STOP required for {job_id=}")
290
+ logger.info("Stop module requested", extra={"job_id": job_id})
275
291
 
276
292
  async with self._lock:
277
293
  session = self.tasks_sessions.get(job_id)
278
294
 
279
295
  if not session:
280
- logger.warning(f"session with id: {job_id} not found")
296
+ logger.warning("Session not found", extra={"job_id": job_id})
281
297
  return False
282
298
  try:
283
299
  await session.module.stop()
284
300
  await self.cancel_task(job_id, session.mission_id)
285
- logger.debug(f"session {job_id} ({session.module.name}) stopped successfully")
286
- except Exception as e:
287
- logger.error(f"Error while stopping module {job_id}: {e}")
301
+ logger.debug(
302
+ "Module stopped successfully",
303
+ extra={"job_id": job_id, "mission_id": session.mission_id},
304
+ )
305
+ except Exception:
306
+ logger.exception("Error stopping module", extra={"job_id": job_id})
288
307
  raise
289
308
  else:
290
309
  return True
@@ -84,9 +84,12 @@ class TaskSession:
84
84
  self._heartbeat_interval = heartbeat_interval
85
85
 
86
86
  logger.info(
87
- "TaskContext initialized for task: '%s'",
88
- task_id,
89
- extra={"task_id": task_id, "mission_id": mission_id, "heartbeat_interval": heartbeat_interval},
87
+ "TaskSession initialized",
88
+ extra={
89
+ "task_id": task_id,
90
+ "mission_id": mission_id,
91
+ "heartbeat_interval": str(heartbeat_interval),
92
+ },
90
93
  )
91
94
 
92
95
  @property
@@ -99,6 +102,21 @@ class TaskSession:
99
102
  """Task paused status."""
100
103
  return self._paused.is_set()
101
104
 
105
+ @property
106
+ def setup_id(self) -> str:
107
+ """Get setup_id from module context."""
108
+ return self.module.context.session.setup_id
109
+
110
+ @property
111
+ def setup_version_id(self) -> str:
112
+ """Get setup_version_id from module context."""
113
+ return self.module.context.session.setup_version_id
114
+
115
+ @property
116
+ def session_ids(self) -> dict[str, str]:
117
+ """Get all session IDs from module context for structured logging."""
118
+ return self.module.context.session.current_ids()
119
+
102
120
  async def send_heartbeat(self) -> bool:
103
121
  """Rate-limited heartbeat with connection resilience.
104
122
 
@@ -108,6 +126,8 @@ class TaskSession:
108
126
  heartbeat = HeartbeatMessage(
109
127
  task_id=self.task_id,
110
128
  mission_id=self.mission_id,
129
+ setup_id=self.setup_id,
130
+ setup_version_id=self.setup_version_id,
111
131
  timestamp=datetime.datetime.now(datetime.timezone.utc),
112
132
  )
113
133
 
@@ -120,23 +140,17 @@ class TaskSession:
120
140
  return True
121
141
  except Exception as e:
122
142
  logger.error(
123
- "Heartbeat exception for task: '%s'",
124
- self.task_id,
125
- extra={"task_id": self.task_id, "error": str(e)},
143
+ "Heartbeat exception",
144
+ extra={**self.session_ids, "error": str(e)},
126
145
  exc_info=True,
127
146
  )
128
- logger.error(
129
- "Initial heartbeat failed for task: '%s'",
130
- self.task_id,
131
- extra={"task_id": self.task_id},
132
- )
147
+ logger.error("Initial heartbeat failed", extra=self.session_ids)
133
148
  return False
134
149
 
135
150
  if (heartbeat.timestamp - self._last_heartbeat) < self._heartbeat_interval:
136
151
  logger.debug(
137
- "Heartbeat skipped due to rate limiting for task: '%s' | delta=%s",
138
- self.task_id,
139
- heartbeat.timestamp - self._last_heartbeat,
152
+ "Heartbeat skipped due to rate limiting",
153
+ extra={**self.session_ids, "delta": str(heartbeat.timestamp - self._last_heartbeat)},
140
154
  )
141
155
  return True
142
156
 
@@ -147,39 +161,24 @@ class TaskSession:
147
161
  return True
148
162
  except Exception as e:
149
163
  logger.error(
150
- "Heartbeat exception for task: '%s'",
151
- self.task_id,
152
- extra={"task_id": self.task_id, "error": str(e)},
164
+ "Heartbeat exception",
165
+ extra={**self.session_ids, "error": str(e)},
153
166
  exc_info=True,
154
167
  )
155
- logger.warning(
156
- "Heartbeat failed for task: '%s'",
157
- self.task_id,
158
- extra={"task_id": self.task_id},
159
- )
168
+ logger.warning("Heartbeat failed", extra=self.session_ids)
160
169
  return False
161
170
 
162
171
  async def generate_heartbeats(self) -> None:
163
172
  """Periodic heartbeat generator with cancellation support."""
164
- logger.debug(
165
- "Heartbeat generator started for task: '%s'",
166
- self.task_id,
167
- extra={"task_id": self.task_id, "mission_id": self.mission_id},
168
- )
173
+ logger.debug("Heartbeat generator started", extra=self.session_ids)
169
174
  while not self.cancelled:
170
175
  logger.debug(
171
- "Heartbeat tick for task: '%s', cancelled=%s",
172
- self.task_id,
173
- self.cancelled,
174
- extra={"task_id": self.task_id, "mission_id": self.mission_id},
176
+ "Heartbeat tick",
177
+ extra={**self.session_ids, "cancelled": self.cancelled},
175
178
  )
176
179
  success = await self.send_heartbeat()
177
180
  if not success:
178
- logger.error(
179
- "Heartbeat failed, cancelling task: '%s'",
180
- self.task_id,
181
- extra={"task_id": self.task_id, "mission_id": self.mission_id},
182
- )
181
+ logger.error("Heartbeat failed, cancelling task", extra=self.session_ids)
183
182
  await self._handle_cancel(CancellationReason.HEARTBEAT_FAILURE)
184
183
  break
185
184
  await asyncio.sleep(self._heartbeat_interval.total_seconds())
@@ -187,11 +186,7 @@ class TaskSession:
187
186
  async def wait_if_paused(self) -> None:
188
187
  """Block execution if task is paused."""
189
188
  if self._paused.is_set():
190
- logger.info(
191
- "Task paused, waiting for resume: '%s'",
192
- self.task_id,
193
- extra={"task_id": self.task_id},
194
- )
189
+ logger.info("Task paused, waiting for resume", extra=self.session_ids)
195
190
  await self._paused.wait()
196
191
 
197
192
  async def listen_signals(self) -> None: # noqa: C901
@@ -200,18 +195,14 @@ class TaskSession:
200
195
  Raises:
201
196
  CancelledError: Asyncio when task cancelling
202
197
  """
203
- logger.info(
204
- "Signal listener started for task: '%s'",
205
- self.task_id,
206
- extra={"task_id": self.task_id},
207
- )
198
+ logger.info("Signal listener started", extra=self.session_ids)
208
199
  if self.signal_record_id is None:
209
200
  self.signal_record_id = (await self.db.select_by_task_id("tasks", self.task_id)).get("id")
210
201
 
211
202
  live_id, live_signals = await self.db.start_live("tasks")
212
203
  try:
213
204
  async for signal in live_signals:
214
- logger.debug("Signal received for task '%s': %s", self.task_id, signal)
205
+ logger.debug("Signal received", extra={**self.session_ids, "signal": signal})
215
206
  if self.cancelled:
216
207
  break
217
208
 
@@ -228,26 +219,17 @@ class TaskSession:
228
219
  await self._handle_status_request()
229
220
 
230
221
  except asyncio.CancelledError:
231
- logger.debug(
232
- "Signal listener cancelled for task: '%s'",
233
- self.task_id,
234
- extra={"task_id": self.task_id},
235
- )
222
+ logger.debug("Signal listener cancelled", extra=self.session_ids)
236
223
  raise
237
224
  except Exception as e:
238
225
  logger.error(
239
- "Signal listener fatal error for task: '%s'",
240
- self.task_id,
241
- extra={"task_id": self.task_id, "error": str(e)},
226
+ "Signal listener fatal error",
227
+ extra={**self.session_ids, "error": str(e)},
242
228
  exc_info=True,
243
229
  )
244
230
  finally:
245
231
  await self.db.stop_live(live_id)
246
- logger.info(
247
- "Signal listener stopped for task: '%s'",
248
- self.task_id,
249
- extra={"task_id": self.task_id},
250
- )
232
+ logger.info("Signal listener stopped", extra=self.session_ids)
251
233
 
252
234
  async def _handle_cancel(self, reason: CancellationReason = CancellationReason.UNKNOWN) -> None:
253
235
  """Idempotent cancellation with acknowledgment and reason tracking.
@@ -257,13 +239,9 @@ class TaskSession:
257
239
  """
258
240
  if self.is_cancelled.is_set():
259
241
  logger.debug(
260
- "Cancel ignored - task already cancelled: '%s' (existing reason: %s, new reason: %s)",
261
- self.task_id,
262
- self.cancellation_reason.value,
263
- reason.value,
242
+ "Cancel ignored - already cancelled",
264
243
  extra={
265
- "task_id": self.task_id,
266
- "mission_id": self.mission_id,
244
+ **self.session_ids,
267
245
  "existing_reason": self.cancellation_reason.value,
268
246
  "new_reason": reason.value,
269
247
  },
@@ -277,25 +255,13 @@ class TaskSession:
277
255
  # Log with appropriate level based on reason
278
256
  if reason in {CancellationReason.SUCCESS_CLEANUP, CancellationReason.FAILURE_CLEANUP}:
279
257
  logger.debug(
280
- "Task cancelled (cleanup): '%s', reason: %s",
281
- self.task_id,
282
- reason.value,
283
- extra={
284
- "task_id": self.task_id,
285
- "mission_id": self.mission_id,
286
- "cancellation_reason": reason.value,
287
- },
258
+ "Task cancelled (cleanup)",
259
+ extra={**self.session_ids, "cancellation_reason": reason.value},
288
260
  )
289
261
  else:
290
262
  logger.info(
291
- "Task cancelled: '%s', reason: %s",
292
- self.task_id,
293
- reason.value,
294
- extra={
295
- "task_id": self.task_id,
296
- "mission_id": self.mission_id,
297
- "cancellation_reason": reason.value,
298
- },
263
+ "Task cancelled",
264
+ extra={**self.session_ids, "cancellation_reason": reason.value},
299
265
  )
300
266
 
301
267
  # Resume if paused so cancellation can proceed
@@ -308,6 +274,8 @@ class TaskSession:
308
274
  SignalMessage(
309
275
  task_id=self.task_id,
310
276
  mission_id=self.mission_id,
277
+ setup_id=self.setup_id,
278
+ setup_version_id=self.setup_version_id,
311
279
  action=SignalType.ACK_CANCEL,
312
280
  status=self.status,
313
281
  ).model_dump(),
@@ -316,11 +284,7 @@ class TaskSession:
316
284
  async def _handle_pause(self) -> None:
317
285
  """Pause task execution."""
318
286
  if not self._paused.is_set():
319
- logger.info(
320
- "Pausing task: '%s'",
321
- self.task_id,
322
- extra={"task_id": self.task_id},
323
- )
287
+ logger.info("Task paused", extra=self.session_ids)
324
288
  self._paused.set()
325
289
 
326
290
  await self.db.update(
@@ -329,6 +293,8 @@ class TaskSession:
329
293
  SignalMessage(
330
294
  task_id=self.task_id,
331
295
  mission_id=self.mission_id,
296
+ setup_id=self.setup_id,
297
+ setup_version_id=self.setup_version_id,
332
298
  action=SignalType.ACK_PAUSE,
333
299
  status=self.status,
334
300
  ).model_dump(),
@@ -337,11 +303,7 @@ class TaskSession:
337
303
  async def _handle_resume(self) -> None:
338
304
  """Resume paused task."""
339
305
  if self._paused.is_set():
340
- logger.info(
341
- "Resuming task: '%s'",
342
- self.task_id,
343
- extra={"task_id": self.task_id},
344
- )
306
+ logger.info("Task resumed", extra=self.session_ids)
345
307
  self._paused.clear()
346
308
 
347
309
  await self.db.update(
@@ -350,6 +312,8 @@ class TaskSession:
350
312
  SignalMessage(
351
313
  task_id=self.task_id,
352
314
  mission_id=self.mission_id,
315
+ setup_id=self.setup_id,
316
+ setup_version_id=self.setup_version_id,
353
317
  action=SignalType.ACK_RESUME,
354
318
  status=self.status,
355
319
  ).model_dump(),
@@ -361,18 +325,16 @@ class TaskSession:
361
325
  "tasks",
362
326
  self.signal_record_id, # type: ignore
363
327
  SignalMessage(
364
- mission_id=self.mission_id,
365
328
  task_id=self.task_id,
329
+ mission_id=self.mission_id,
330
+ setup_id=self.setup_id,
331
+ setup_version_id=self.setup_version_id,
366
332
  status=self.status,
367
333
  action=SignalType.ACK_STATUS,
368
334
  ).model_dump(),
369
335
  )
370
336
 
371
- logger.debug(
372
- "Status report sent for task: '%s'",
373
- self.task_id,
374
- extra={"task_id": self.task_id},
375
- )
337
+ logger.debug("Status report sent", extra=self.session_ids)
376
338
 
377
339
  async def cleanup(self) -> None:
378
340
  """Clean up task session resources.
@@ -19,6 +19,7 @@ from digitalkin.logger import logger
19
19
  from digitalkin.models.core.job_manager_models import JobManagerMode
20
20
  from digitalkin.models.module.module import ModuleStatus
21
21
  from digitalkin.modules._base_module import BaseModule
22
+ from digitalkin.services.registry import GrpcRegistry, RegistryStrategy
22
23
  from digitalkin.services.services_models import ServicesMode
23
24
  from digitalkin.services.setup.default_setup import DefaultSetup
24
25
  from digitalkin.services.setup.grpc_setup import GrpcSetup
@@ -40,6 +41,7 @@ class ModuleServicer(module_service_pb2_grpc.ModuleServiceServicer, ArgParser):
40
41
  args: Namespace
41
42
  setup: SetupStrategy
42
43
  job_manager: BaseJobManager
44
+ _registry_cache: RegistryStrategy | None = None
43
45
 
44
46
  def _add_parser_args(self, parser: ArgumentParser) -> None:
45
47
  super()._add_parser_args(parser)
@@ -82,6 +84,26 @@ class ModuleServicer(module_service_pb2_grpc.ModuleServiceServicer, ArgParser):
82
84
  )
83
85
  self.setup = GrpcSetup() if self.args.services_mode == ServicesMode.REMOTE else DefaultSetup()
84
86
 
87
+ def _get_registry(self) -> RegistryStrategy | None:
88
+ """Get a cached registry instance if configured.
89
+
90
+ Returns:
91
+ Cached GrpcRegistry instance if registry config exists, None otherwise.
92
+ """
93
+ if self._registry_cache is not None:
94
+ return self._registry_cache
95
+
96
+ registry_config = self.module_class.services_config_params.get("registry")
97
+ if not registry_config:
98
+ return None
99
+
100
+ client_config = registry_config.get("client_config")
101
+ if not client_config:
102
+ return None
103
+
104
+ self._registry_cache = GrpcRegistry("", "", "", client_config)
105
+ return self._registry_cache
106
+
85
107
  async def ConfigSetupModule( # noqa: N802
86
108
  self,
87
109
  request: lifecycle_pb2.ConfigSetupModuleRequest,
@@ -125,6 +147,15 @@ class ModuleServicer(module_service_pb2_grpc.ModuleServiceServicer, ArgParser):
125
147
  msg = "No config setup data returned."
126
148
  raise ServicerError(msg)
127
149
 
150
+ # Resolve tool references in config_setup_data if registry is configured
151
+ # This also builds the tool_cache for LLM access during execution
152
+ registry = self._get_registry()
153
+ if registry:
154
+ if hasattr(config_setup_data, "resolve_tool_references"):
155
+ config_setup_data.resolve_tool_references(registry)
156
+ if hasattr(config_setup_data, "build_tool_cache"):
157
+ config_setup_data.build_tool_cache()
158
+
128
159
  # create a task to run the module in background
129
160
  job_id = await self.job_manager.create_config_setup_instance_job(
130
161
  config_setup_data,
@@ -139,8 +170,8 @@ class ModuleServicer(module_service_pb2_grpc.ModuleServiceServicer, ArgParser):
139
170
  return lifecycle_pb2.ConfigSetupModuleResponse(success=False)
140
171
 
141
172
  updated_setup_data = await self.job_manager.generate_config_setup_module_response(job_id)
142
- logger.info("Setup updated")
143
- logger.debug(f"Updated setup data: {updated_setup_data=}")
173
+ logger.info("Setup updated", extra={"job_id": job_id})
174
+ logger.debug("Updated setup data", extra={"job_id": job_id, "setup_data": updated_setup_data})
144
175
  setup_version.content = json_format.ParseDict(
145
176
  updated_setup_data,
146
177
  struct_pb2.Struct(),
@@ -249,17 +280,19 @@ class ModuleServicer(module_service_pb2_grpc.ModuleServiceServicer, ArgParser):
249
280
  Returns:
250
281
  A response indicating success or failure.
251
282
  """
252
- logger.debug("StopModule called for module: '%s'", self.module_class.__name__)
283
+ logger.debug(
284
+ "StopModule called",
285
+ extra={"module_class": self.module_class.__name__, "job_id": request.job_id},
286
+ )
253
287
 
254
288
  response: bool = await self.job_manager.stop_module(request.job_id)
255
289
  if not response:
256
- message = f"Job {request.job_id} not found"
257
- logger.warning(message)
290
+ logger.warning("Job not found for stop request", extra={"job_id": request.job_id})
258
291
  context.set_code(grpc.StatusCode.NOT_FOUND)
259
- context.set_details(message)
292
+ context.set_details(f"Job {request.job_id} not found")
260
293
  return lifecycle_pb2.StopModuleResponse(success=False)
261
294
 
262
- logger.debug("Job %s stopped successfully", request.job_id, extra={"job_id": request.job_id})
295
+ logger.debug("Job stopped successfully", extra={"job_id": request.job_id})
263
296
  return lifecycle_pb2.StopModuleResponse(success=True)
264
297
 
265
298
  async def GetModuleStatus( # noqa: N802
@@ -55,6 +55,8 @@ class SignalMessage(BaseModel):
55
55
 
56
56
  task_id: str = Field(..., description="Unique identifier for the task")
57
57
  mission_id: str = Field(..., description="Identifier for the mission")
58
+ setup_id: str = Field(default="", description="Identifier for the setup")
59
+ setup_version_id: str = Field(default="", description="Identifier for the setup version")
58
60
  status: TaskStatus = Field(..., description="Current status of the task")
59
61
  action: SignalType = Field(..., description="Type of signal action")
60
62
  timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
@@ -67,4 +69,6 @@ class HeartbeatMessage(BaseModel):
67
69
 
68
70
  task_id: str = Field(..., description="Unique identifier for the task")
69
71
  mission_id: str = Field(..., description="Identifier for the mission")
72
+ setup_id: str = Field(default="", description="Identifier for the setup")
73
+ setup_version_id: str = Field(default="", description="Identifier for the setup version")
70
74
  timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
@@ -6,20 +6,28 @@ from digitalkin.models.module.module_types import (
6
6
  DataTrigger,
7
7
  SetupModel,
8
8
  )
9
+ from digitalkin.models.module.tool_reference import (
10
+ ToolReference,
11
+ ToolReferenceConfig,
12
+ ToolSelectionMode,
13
+ )
9
14
  from digitalkin.models.module.utility import (
10
15
  EndOfStreamOutput,
16
+ ModuleStartInfoOutput,
11
17
  UtilityProtocol,
12
18
  UtilityRegistry,
13
19
  )
14
20
 
15
21
  __all__ = [
16
- # Core types (used by all SDK users)
17
22
  "DataModel",
18
23
  "DataTrigger",
19
- # Utility (commonly used)
20
24
  "EndOfStreamOutput",
21
25
  "ModuleContext",
26
+ "ModuleStartInfoOutput",
22
27
  "SetupModel",
28
+ "ToolReference",
29
+ "ToolReferenceConfig",
30
+ "ToolSelectionMode",
23
31
  "UtilityProtocol",
24
32
  "UtilityRegistry",
25
33
  ]
@@ -0,0 +1,61 @@
1
+ """Base types for module models."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from datetime import datetime, timezone
6
+ from typing import TYPE_CHECKING, ClassVar, Generic, TypeVar
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+ if TYPE_CHECKING:
11
+ from digitalkin.models.module.setup_types import SetupModel
12
+
13
+
14
+ class DataTrigger(BaseModel):
15
+ """Defines the root input/output model exposing the protocol.
16
+
17
+ The mandatory protocol is important to define the module beahvior following the user or agent input/output.
18
+
19
+ Example:
20
+ class MyInput(DataModel):
21
+ root: DataTrigger
22
+ user_define_data: Any
23
+
24
+ # Usage
25
+ my_input = MyInput(root=DataTrigger(protocol="message"))
26
+ print(my_input.root.protocol) # Output: message
27
+ """
28
+
29
+ protocol: ClassVar[str]
30
+ created_at: str = Field(
31
+ default_factory=lambda: datetime.now(tz=timezone.utc).isoformat(),
32
+ title="Created At",
33
+ description="Timestamp when the payload was created.",
34
+ )
35
+
36
+
37
+ DataTriggerT = TypeVar("DataTriggerT", bound=DataTrigger)
38
+
39
+
40
+ class DataModel(BaseModel, Generic[DataTriggerT]):
41
+ """Base definition of input/output model showing mandatory root fields.
42
+
43
+ The Model define the Module Input/output, usually referring to multiple input/output type defined by an union.
44
+
45
+ Example:
46
+ class ModuleInput(DataModel):
47
+ root: FileInput | MessageInput
48
+ """
49
+
50
+ root: DataTriggerT
51
+ annotations: dict[str, str] = Field(
52
+ default={},
53
+ title="Annotations",
54
+ description="Additional metadata or annotations related to the output. ex {'role': 'user'}",
55
+ )
56
+
57
+
58
+ InputModelT = TypeVar("InputModelT", bound=DataModel)
59
+ OutputModelT = TypeVar("OutputModelT", bound=DataModel)
60
+ SecretModelT = TypeVar("SecretModelT", bound=BaseModel)
61
+ SetupModelT = TypeVar("SetupModelT", bound="SetupModel")