digitalkin 0.2.12__py3-none-any.whl → 0.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. digitalkin/__version__.py +1 -1
  2. digitalkin/grpc_servers/_base_server.py +15 -17
  3. digitalkin/grpc_servers/module_server.py +9 -10
  4. digitalkin/grpc_servers/module_servicer.py +107 -85
  5. digitalkin/grpc_servers/registry_server.py +3 -6
  6. digitalkin/grpc_servers/registry_servicer.py +18 -19
  7. digitalkin/grpc_servers/utils/grpc_client_wrapper.py +3 -5
  8. digitalkin/logger.py +45 -1
  9. digitalkin/models/module/module.py +1 -0
  10. digitalkin/modules/_base_module.py +44 -5
  11. digitalkin/modules/job_manager/base_job_manager.py +139 -0
  12. digitalkin/modules/job_manager/job_manager_models.py +44 -0
  13. digitalkin/modules/job_manager/single_job_manager.py +218 -0
  14. digitalkin/modules/job_manager/taskiq_broker.py +173 -0
  15. digitalkin/modules/job_manager/taskiq_job_manager.py +213 -0
  16. digitalkin/services/cost/default_cost.py +8 -4
  17. digitalkin/services/cost/grpc_cost.py +15 -7
  18. digitalkin/services/filesystem/default_filesystem.py +2 -4
  19. digitalkin/services/filesystem/grpc_filesystem.py +8 -5
  20. digitalkin/services/setup/__init__.py +1 -0
  21. digitalkin/services/setup/default_setup.py +10 -12
  22. digitalkin/services/setup/grpc_setup.py +8 -10
  23. digitalkin/services/storage/default_storage.py +11 -5
  24. digitalkin/services/storage/grpc_storage.py +23 -8
  25. digitalkin/utils/arg_parser.py +5 -48
  26. digitalkin/utils/development_mode_action.py +51 -0
  27. {digitalkin-0.2.12.dist-info → digitalkin-0.2.13.dist-info}/METADATA +42 -11
  28. {digitalkin-0.2.12.dist-info → digitalkin-0.2.13.dist-info}/RECORD +35 -28
  29. {digitalkin-0.2.12.dist-info → digitalkin-0.2.13.dist-info}/WHEEL +1 -1
  30. modules/cpu_intensive_module.py +271 -0
  31. modules/minimal_llm_module.py +200 -56
  32. modules/storage_module.py +5 -6
  33. modules/text_transform_module.py +1 -1
  34. digitalkin/modules/job_manager.py +0 -177
  35. {digitalkin-0.2.12.dist-info → digitalkin-0.2.13.dist-info}/licenses/LICENSE +0 -0
  36. {digitalkin-0.2.12.dist-info → digitalkin-0.2.13.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,271 @@
1
+ """Simple module calling an LLM."""
2
+
3
+ import logging
4
+ from collections.abc import Callable
5
+ from typing import Any, ClassVar, Literal
6
+
7
+ from pydantic import BaseModel, Field
8
+
9
+ from digitalkin.grpc_servers.utils.models import ClientConfig, SecurityMode, ServerConfig, ServerMode
10
+ from digitalkin.modules._base_module import BaseModule
11
+ from digitalkin.services.services_models import ServicesStrategy
12
+ from digitalkin.services.setup.setup_strategy import SetupData
13
+
14
+ # Configure logging with clear formatting
15
+ logging.basicConfig(
16
+ level=logging.DEBUG,
17
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
18
+ )
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class MessageInputPayload(BaseModel):
23
+ """Message trigger model for the CPU Archetype module."""
24
+
25
+ payload_type: Literal["message"] = "message"
26
+ user_prompt: str = Field(
27
+ ...,
28
+ title="User Prompt",
29
+ description="The prompt provided by the user for processing.",
30
+ )
31
+
32
+
33
+ class InputFile(BaseModel):
34
+ """File model for the CPU Archetype module."""
35
+
36
+ name: str = Field(
37
+ ...,
38
+ title="File Name",
39
+ description="The name of the file to be processed.",
40
+ )
41
+ content: bytes = Field(
42
+ ...,
43
+ title="File Content",
44
+ description="The content of the file to be processed.",
45
+ )
46
+
47
+ file_type: str = Field(
48
+ ...,
49
+ title="File Type",
50
+ description="The type of the file to be processed.",
51
+ )
52
+
53
+
54
+ class FileInputPayload(BaseModel):
55
+ """File input model for the CPU Archetype module."""
56
+
57
+ payload_type: Literal["file"] = "file"
58
+ files: list[InputFile] = Field(
59
+ ...,
60
+ title="Files",
61
+ description="List of files to be processed.",
62
+ )
63
+
64
+
65
+ class CPUInput(BaseModel):
66
+ """Input model defining what data the module expects."""
67
+
68
+ payload: MessageInputPayload | FileInputPayload = Field(
69
+ ...,
70
+ discriminator="payload_type",
71
+ title="Payload",
72
+ description="Either a message or list of file input.",
73
+ )
74
+
75
+
76
+ class MessageOutputPayload(BaseModel):
77
+ """Message output model for the CPU Archetype module."""
78
+
79
+ payload_type: Literal["message"] = "message"
80
+ user_response: str = Field(
81
+ ...,
82
+ title="User Response",
83
+ description="The response generated by the assistant based on the user prompt.",
84
+ )
85
+
86
+
87
+ class OutputFile(BaseModel):
88
+ """File model for the CPU Archetype module."""
89
+
90
+ name: str = Field(
91
+ ...,
92
+ title="File Name",
93
+ description="The name of the file to be processed.",
94
+ )
95
+ url: str | None = Field(
96
+ ...,
97
+ title="File URL",
98
+ description="The URL of the file to be processed.",
99
+ )
100
+
101
+ message: str | None = Field(
102
+ None,
103
+ title="Message",
104
+ description="Optional message associated with the file.",
105
+ )
106
+
107
+
108
+ class FileOutputPayload(BaseModel):
109
+ """File output model for the CPU Archetype module."""
110
+
111
+ payload_type: Literal["file"] = "file"
112
+ files: list[OutputFile] = Field(
113
+ ...,
114
+ title="Files",
115
+ description="List of files generated by the assistant.",
116
+ )
117
+
118
+
119
+ class CPUOutput(BaseModel):
120
+ """Output model defining what data the module produces."""
121
+
122
+ payload: MessageOutputPayload | FileOutputPayload = Field(
123
+ ...,
124
+ discriminator="payload_type",
125
+ title="Payload",
126
+ description="Either a message or file response.",
127
+ )
128
+
129
+
130
+ class CPUSetup(BaseModel):
131
+ """Setup model defining module configuration parameters."""
132
+
133
+ model_name: str = Field(
134
+ ...,
135
+ title="Model Name",
136
+ description="The name of the CPU model to use for processing.",
137
+ )
138
+ developer_prompt: str = Field(
139
+ ...,
140
+ title="Developer Prompt",
141
+ description="The developer prompt new versions of system prompt, it defines the behavior of the assistant.",
142
+ )
143
+ temperature: float = Field(
144
+ 0.7,
145
+ title="Temperature",
146
+ description="Controls the randomness of the model's output. Higher values make output more random.",
147
+ )
148
+ max_tokens: int = Field(
149
+ 100,
150
+ title="Max Tokens",
151
+ description="The maximum number of tokens to generate in the response.",
152
+ )
153
+
154
+
155
+ class CPUToolSecret(BaseModel):
156
+ """Secret model defining module configuration parameters."""
157
+
158
+
159
+ server_config = ServerConfig(
160
+ host="[::]",
161
+ port=50151,
162
+ mode=ServerMode.ASYNC,
163
+ security=SecurityMode.INSECURE,
164
+ max_workers=10,
165
+ credentials=None,
166
+ )
167
+
168
+
169
+ client_config = ClientConfig(
170
+ host="[::]",
171
+ port=50151,
172
+ mode=ServerMode.ASYNC,
173
+ security=SecurityMode.INSECURE,
174
+ credentials=None,
175
+ )
176
+
177
+
178
+ class CPUIntensiveModule(BaseModule[CPUInput, CPUOutput, CPUSetup, CPUToolSecret]):
179
+ """A CPU endpoint tool module module."""
180
+
181
+ name = "CPUIntensiveModule"
182
+ description = "A module that interacts with CPU API to process text"
183
+
184
+ # Define the schema formats for the module
185
+ input_format = CPUInput
186
+ output_format = CPUOutput
187
+ setup_format = CPUSetup
188
+ secret_format = CPUToolSecret
189
+
190
+ # Define module metadata for discovery
191
+ metadata: ClassVar[dict[str, Any]] = {
192
+ "name": "CPUIntensiveModule",
193
+ "description": "Transforms input text using a streaming LLM response.",
194
+ "version": "1.0.0",
195
+ "tags": ["text", "transformation", "encryption", "streaming"],
196
+ }
197
+ # Define services_config_params with default values
198
+ services_config_strategies: ClassVar[dict[str, ServicesStrategy | None]] = {}
199
+ services_config_params: ClassVar[dict[str, dict[str, Any | None] | None]] = {
200
+ "storage": {
201
+ "config": {"chat_history": None},
202
+ "client_config": client_config,
203
+ },
204
+ "filesystem": {
205
+ "config": {},
206
+ "client_config": client_config,
207
+ },
208
+ "cost": {
209
+ "config": {},
210
+ "client_config": client_config,
211
+ },
212
+ }
213
+
214
+ async def initialize(self, setup_data: SetupData) -> None:
215
+ """Initialize the module capabilities.
216
+
217
+ This method is called when the module is loaded by the server.
218
+ Use it to set up module-specific resources or configurations.
219
+ """
220
+
221
+ async def run(
222
+ self,
223
+ input_data: CPUInput,
224
+ setup_data: CPUSetup,
225
+ callback: Callable,
226
+ ) -> None:
227
+ """Run the module.
228
+
229
+ Args:
230
+ input_data: Input data for the module
231
+ setup_data: Setup data for the module
232
+ callback: Callback function to report progress
233
+
234
+ Raises:
235
+ ValueError: If the payload type is unknown
236
+ """
237
+ # Validate the input data
238
+ input_model = self.input_format.model_validate(input_data)
239
+ self.setup_format.model_validate(setup_data)
240
+ logger.debug("Running with input data: %s", input_model)
241
+
242
+ if not hasattr(input_model, "payload"):
243
+ error_msg = "Input data is missing 'payload' field"
244
+ raise ValueError(error_msg)
245
+
246
+ if not hasattr(input_model.payload, "payload_type"):
247
+ error_msg = "Input payload is missing 'type' field"
248
+ raise ValueError(error_msg)
249
+
250
+ total = 0
251
+ input = MessageInputPayload.model_validate(input_model.payload).user_prompt
252
+
253
+ for i in range(int(input)):
254
+ total += i * i
255
+ if i % 100 == 0 or i == int(input) - 1:
256
+ message_output_payload = MessageOutputPayload(
257
+ payload_type="message",
258
+ user_response=f"result iteration {i}: {total}",
259
+ )
260
+ output_model = self.output_format.model_validate({"payload": message_output_payload})
261
+ await callback(output_data=output_model)
262
+ logger.info("Job %s completed", self.job_id)
263
+
264
+ async def cleanup(self) -> None:
265
+ """Clean up any resources when the module is stopped.
266
+
267
+ This method is called when the module is being shut down.
268
+ Use it to close connections, free resources, etc.
269
+ """
270
+ logger.info("Cleaning up module %s", self.metadata["name"])
271
+ # Release any resources here if needed.
@@ -1,44 +1,157 @@
1
1
  """Simple module calling an LLM."""
2
2
 
3
3
  import logging
4
+ import os
4
5
  from collections.abc import Callable
5
- from typing import Any, ClassVar
6
+ from typing import Any, ClassVar, Literal
6
7
 
7
- import grpc
8
8
  import openai
9
- from pydantic import BaseModel
9
+ from pydantic import BaseModel, Field
10
10
 
11
- from digitalkin.grpc_servers.utils.models import SecurityMode, ClientConfig, ServerMode
11
+ from digitalkin.grpc_servers.utils.models import ClientConfig, SecurityMode, ServerMode
12
12
  from digitalkin.modules._base_module import BaseModule
13
+ from digitalkin.services.services_models import ServicesStrategy
13
14
  from digitalkin.services.setup.setup_strategy import SetupData
14
15
 
15
16
  # Configure logging with clear formatting
16
17
  logging.basicConfig(
17
- level=logging.INFO,
18
+ level=logging.DEBUG,
18
19
  format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
19
20
  )
20
21
  logger = logging.getLogger(__name__)
21
22
 
22
23
 
23
- # Define schema models using Pydantic
24
- class OpenAIToolInput(BaseModel):
24
+ class MessageInputPayload(BaseModel):
25
+ """Message trigger model for the OpenAI Archetype module."""
26
+
27
+ payload_type: Literal["message"] = "message"
28
+ user_prompt: str = Field(
29
+ ...,
30
+ title="User Prompt",
31
+ description="The prompt provided by the user for processing.",
32
+ )
33
+
34
+
35
+ class InputFile(BaseModel):
36
+ """File model for the OpenAI Archetype module."""
37
+
38
+ name: str = Field(
39
+ ...,
40
+ title="File Name",
41
+ description="The name of the file to be processed.",
42
+ )
43
+ content: bytes = Field(
44
+ ...,
45
+ title="File Content",
46
+ description="The content of the file to be processed.",
47
+ )
48
+
49
+ file_type: str = Field(
50
+ ...,
51
+ title="File Type",
52
+ description="The type of the file to be processed.",
53
+ )
54
+
55
+
56
+ class FileInputPayload(BaseModel):
57
+ """File input model for the OpenAI Archetype module."""
58
+
59
+ payload_type: Literal["file"] = "file"
60
+ files: list[InputFile] = Field(
61
+ ...,
62
+ title="Files",
63
+ description="List of files to be processed.",
64
+ )
65
+
66
+
67
+ class OpenAIInput(BaseModel):
25
68
  """Input model defining what data the module expects."""
26
69
 
27
- prompt: str
70
+ payload: MessageInputPayload | FileInputPayload = Field(
71
+ ...,
72
+ discriminator="payload_type",
73
+ title="Payload",
74
+ description="Either a message or list of file input.",
75
+ )
76
+
77
+
78
+ class MessageOutputPayload(BaseModel):
79
+ """Message output model for the OpenAI Archetype module."""
80
+
81
+ payload_type: Literal["message"] = "message"
82
+ user_response: str = Field(
83
+ ...,
84
+ title="User Response",
85
+ description="The response generated by the assistant based on the user prompt.",
86
+ )
87
+
88
+
89
+ class OutputFile(BaseModel):
90
+ """File model for the OpenAI Archetype module."""
91
+
92
+ name: str = Field(
93
+ ...,
94
+ title="File Name",
95
+ description="The name of the file to be processed.",
96
+ )
97
+ url: str | None = Field(
98
+ ...,
99
+ title="File URL",
100
+ description="The URL of the file to be processed.",
101
+ )
102
+
103
+ message: str | None = Field(
104
+ None,
105
+ title="Message",
106
+ description="Optional message associated with the file.",
107
+ )
108
+
109
+
110
+ class FileOutputPayload(BaseModel):
111
+ """File output model for the OpenAI Archetype module."""
112
+
113
+ payload_type: Literal["file"] = "file"
114
+ files: list[OutputFile] = Field(
115
+ ...,
116
+ title="Files",
117
+ description="List of files generated by the assistant.",
118
+ )
28
119
 
29
120
 
30
- class OpenAIToolOutput(BaseModel):
121
+ class OpenAIOutput(BaseModel):
31
122
  """Output model defining what data the module produces."""
32
123
 
33
- response: str
124
+ payload: MessageOutputPayload | FileOutputPayload = Field(
125
+ ...,
126
+ discriminator="payload_type",
127
+ title="Payload",
128
+ description="Either a message or file response.",
129
+ )
34
130
 
35
131
 
36
- class OpenAIToolSetup(BaseModel):
132
+ class OpenAISetup(BaseModel):
37
133
  """Setup model defining module configuration parameters."""
38
134
 
39
- openai_key: str
40
- model_name: str
41
- dev_prompt: str
135
+ model_name: str = Field(
136
+ ...,
137
+ title="Model Name",
138
+ description="The name of the OpenAI model to use for processing.",
139
+ )
140
+ developer_prompt: str = Field(
141
+ ...,
142
+ title="Developer Prompt",
143
+ description="The developer prompt new versions of system prompt, it defines the behavior of the assistant.",
144
+ )
145
+ temperature: float = Field(
146
+ 0.7,
147
+ title="Temperature",
148
+ description="Controls the randomness of the model's output. Higher values make output more random.",
149
+ )
150
+ max_tokens: int = Field(
151
+ 100,
152
+ title="Max Tokens",
153
+ description="The maximum number of tokens to generate in the response.",
154
+ )
42
155
 
43
156
 
44
157
  class OpenAIToolSecret(BaseModel):
@@ -54,38 +167,42 @@ client_config = ClientConfig(
54
167
  )
55
168
 
56
169
 
57
- class OpenAIToolModule(BaseModule[OpenAIToolInput, OpenAIToolOutput, OpenAIToolSetup, OpenAIToolSecret]):
170
+ class OpenAIToolModule(BaseModule[OpenAIInput, OpenAIOutput, OpenAISetup, OpenAIToolSecret]):
58
171
  """A openAI endpoint tool module module."""
59
172
 
60
173
  name = "OpenAIToolModule"
61
174
  description = "A module that interacts with OpenAI API to process text"
62
175
 
63
176
  # Define the schema formats for the module
64
- input_format = OpenAIToolInput
65
- output_format = OpenAIToolOutput
66
- setup_format = OpenAIToolSetup
177
+ input_format = OpenAIInput
178
+ output_format = OpenAIOutput
179
+ setup_format = OpenAISetup
67
180
  secret_format = OpenAIToolSecret
68
181
 
69
182
  openai_client: openai.OpenAI
70
183
 
71
184
  # Define module metadata for discovery
72
185
  metadata: ClassVar[dict[str, Any]] = {
73
- "name": "Minimal_LLM_Tool",
186
+ "name": "OpenAIToolModule",
74
187
  "description": "Transforms input text using a streaming LLM response.",
75
188
  "version": "1.0.0",
76
189
  "tags": ["text", "transformation", "encryption", "streaming"],
77
190
  }
78
191
  # Define services_config_params with default values
79
- services_config_strategies = {}
80
- services_config_params = {
192
+ services_config_strategies: ClassVar[dict[str, ServicesStrategy | None]] = {}
193
+ services_config_params: ClassVar[dict[str, dict[str, Any | None] | None]] = {
81
194
  "storage": {
82
- "config": {"setups": OpenAIToolSetup},
195
+ "config": {"setups": OpenAISetup},
83
196
  "client_config": client_config,
84
197
  },
85
198
  "filesystem": {
86
199
  "config": {},
87
200
  "client_config": client_config,
88
201
  },
202
+ "cost": {
203
+ "config": {},
204
+ "client_config": client_config,
205
+ },
89
206
  }
90
207
 
91
208
  async def initialize(self, setup_data: SetupData) -> None:
@@ -94,7 +211,7 @@ class OpenAIToolModule(BaseModule[OpenAIToolInput, OpenAIToolOutput, OpenAIToolS
94
211
  This method is called when the module is loaded by the server.
95
212
  Use it to set up module-specific resources or configurations.
96
213
  """
97
- self.openai_client = openai.OpenAI(api_key=setup_data.current_setup_version.content["openai_key"])
214
+ self.client: openai.AsyncOpenAI = openai.AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
98
215
  # Define what capabilities this module provides
99
216
  self.capabilities = ["text-processing", "streaming", "transformation"]
100
217
  logger.info(
@@ -103,10 +220,10 @@ class OpenAIToolModule(BaseModule[OpenAIToolInput, OpenAIToolOutput, OpenAIToolS
103
220
  self.capabilities,
104
221
  )
105
222
 
106
- async def run(
223
+ async def run_message(
107
224
  self,
108
- input_data: dict[str, Any],
109
- setup_data: SetupData,
225
+ input_model: MessageInputPayload,
226
+ setup_model: OpenAISetup,
110
227
  callback: Callable,
111
228
  ) -> None:
112
229
  """Process input text and stream LLM responses.
@@ -122,37 +239,64 @@ class OpenAIToolModule(BaseModule[OpenAIToolInput, OpenAIToolOutput, OpenAIToolS
122
239
  openai.APIConnectionError: If an API connection error occurs.
123
240
  Exception: For any unexpected runtime errors.
124
241
  """
125
- logger.info(
126
- "Running job %s with prompt: '%s' on model: %s",
127
- self.job_id,
128
- input_data["prompt"],
129
- setup_data.current_setup_version.content["model_name"],
242
+ # response = await self.client.responses.create(
243
+ # model=setup_model.model_name,
244
+ # instructions=setup_model.developer_prompt,
245
+ # temperature=setup_model.temperature,
246
+ # max_output_tokens=setup_model.max_tokens,
247
+ # input=input_model.user_prompt,
248
+ # )
249
+ # logger.info("Recieved answer from OpenAI: %s", response)
250
+
251
+ # Get and save the output data
252
+ message_output_payload = MessageOutputPayload(
253
+ payload_type="message",
254
+ user_response="Mock data",
255
+ # user_response=response.output_text,
130
256
  )
131
- try:
132
- response = self.openai_client.responses.create(
133
- model=setup_data.current_setup_version.content["model_name"],
134
- tools=[{"type": "web_search_preview"}],
135
- instructions=setup_data.current_setup_version.content["dev_prompt"],
136
- input=input_data["prompt"],
137
- )
138
- if not response.output_text:
139
- raise openai.APIConnectionError
140
- output_data = OpenAIToolOutput(response=response.output_text).model_dump()
141
-
142
- except openai.AuthenticationError as _:
143
- message = "Authentication Error, OPENAI auth token was never set."
144
- logger.exception(message)
145
- output_data = {
146
- "error": {
147
- "code": grpc.StatusCode.UNAUTHENTICATED,
148
- "error_message": message,
149
- }
150
- }
151
- except openai.APIConnectionError as _:
152
- message = "API Error, please try again."
153
- logger.exception(message)
154
- output_data = {"error": {"code": grpc.StatusCode.UNAVAILABLE, "error_message": message}}
155
- await callback(job_id=self.job_id, output_data=output_data)
257
+ output_model = self.output_format.model_validate({"payload": message_output_payload})
258
+ await callback(output_data=output_model)
259
+
260
+ async def run(
261
+ self,
262
+ input_data: OpenAIInput,
263
+ setup_data: OpenAISetup,
264
+ callback: Callable,
265
+ ) -> None:
266
+ """Run the module.
267
+
268
+ Args:
269
+ input_data: Input data for the module
270
+ setup_data: Setup data for the module
271
+ callback: Callback function to report progress
272
+
273
+ Raises:
274
+ ValueError: If the payload type is unknown
275
+ """
276
+ # Validate the input data
277
+ input_model = self.input_format.model_validate(input_data)
278
+ setup_model = self.setup_format.model_validate(setup_data)
279
+ logger.debug("Running with input data: %s", input_model)
280
+
281
+ if not hasattr(input_model, "payload"):
282
+ error_msg = "Input data is missing 'payload' field"
283
+ raise ValueError(error_msg)
284
+
285
+ if not hasattr(input_model.payload, "payload_type"):
286
+ error_msg = "Input payload is missing 'type' field"
287
+ raise ValueError(error_msg)
288
+
289
+ if input_model.payload.payload_type == "message":
290
+ # Validate against MessageInputPayload
291
+ message_payload = MessageInputPayload.model_validate(input_model.payload)
292
+ await self.run_message(message_payload, setup_model, callback)
293
+ elif input_model.payload.payload_type == "file":
294
+ # Validate against FileInputPayload
295
+ file_payload = FileInputPayload.model_validate(input_model.payload)
296
+ await self.run_file(file_payload, setup_model, callback)
297
+ else:
298
+ error_msg = f"Unknown input type '{input_model.payload.payload_type}'. Expected 'message' or 'file'."
299
+ raise ValueError(error_msg)
156
300
  logger.info("Job %s completed", self.job_id)
157
301
 
158
302
  async def cleanup(self) -> None:
modules/storage_module.py CHANGED
@@ -3,7 +3,7 @@
3
3
  import asyncio
4
4
  import datetime
5
5
  from collections.abc import Callable
6
- from typing import Any
6
+ from typing import TYPE_CHECKING, Any
7
7
 
8
8
  from pydantic import BaseModel, Field
9
9
 
@@ -12,7 +12,9 @@ from digitalkin.models.module import ModuleStatus
12
12
  from digitalkin.modules.archetype_module import ArchetypeModule
13
13
  from digitalkin.services.services_config import ServicesConfig
14
14
  from digitalkin.services.services_models import ServicesMode
15
- from digitalkin.services.storage.storage_strategy import StorageRecord
15
+
16
+ if TYPE_CHECKING:
17
+ from digitalkin.services.storage.storage_strategy import StorageRecord
16
18
 
17
19
 
18
20
  class ExampleInput(BaseModel):
@@ -120,10 +122,7 @@ class ExampleModule(ArchetypeModule[ExampleInput, ExampleOutput, ExampleSetup, E
120
122
 
121
123
  # Store the output data in storage
122
124
  storage_id = self.storage.store(
123
- collection="example",
124
- record_id=f"example_outputs",
125
- data=output_data.model_dump(),
126
- data_type="OUTPUT"
125
+ collection="example", record_id="example_outputs", data=output_data.model_dump(), data_type="OUTPUT"
127
126
  )
128
127
 
129
128
  logger.info("Stored output data with ID: %s", storage_id)
@@ -6,7 +6,7 @@ from typing import Any, ClassVar
6
6
 
7
7
  from pydantic import BaseModel
8
8
 
9
- from digitalkin.grpc_servers.utils.models import SecurityMode, ClientConfig, ServerMode
9
+ from digitalkin.grpc_servers.utils.models import ClientConfig, SecurityMode, ServerMode
10
10
  from digitalkin.modules._base_module import BaseModule
11
11
  from digitalkin.services.setup.setup_strategy import SetupData
12
12
  from digitalkin.services.storage.storage_strategy import DataType, StorageRecord