digitalkin 0.3.2.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- base_server/__init__.py +1 -0
- base_server/mock/__init__.py +5 -0
- base_server/mock/mock_pb2.py +39 -0
- base_server/mock/mock_pb2_grpc.py +102 -0
- base_server/server_async_insecure.py +125 -0
- base_server/server_async_secure.py +143 -0
- base_server/server_sync_insecure.py +103 -0
- base_server/server_sync_secure.py +122 -0
- digitalkin/__init__.py +8 -0
- digitalkin/__version__.py +8 -0
- digitalkin/core/__init__.py +1 -0
- digitalkin/core/common/__init__.py +9 -0
- digitalkin/core/common/factories.py +156 -0
- digitalkin/core/job_manager/__init__.py +1 -0
- digitalkin/core/job_manager/base_job_manager.py +288 -0
- digitalkin/core/job_manager/single_job_manager.py +354 -0
- digitalkin/core/job_manager/taskiq_broker.py +311 -0
- digitalkin/core/job_manager/taskiq_job_manager.py +541 -0
- digitalkin/core/task_manager/__init__.py +1 -0
- digitalkin/core/task_manager/base_task_manager.py +539 -0
- digitalkin/core/task_manager/local_task_manager.py +108 -0
- digitalkin/core/task_manager/remote_task_manager.py +87 -0
- digitalkin/core/task_manager/surrealdb_repository.py +266 -0
- digitalkin/core/task_manager/task_executor.py +249 -0
- digitalkin/core/task_manager/task_session.py +406 -0
- digitalkin/grpc_servers/__init__.py +1 -0
- digitalkin/grpc_servers/_base_server.py +486 -0
- digitalkin/grpc_servers/module_server.py +208 -0
- digitalkin/grpc_servers/module_servicer.py +516 -0
- digitalkin/grpc_servers/utils/__init__.py +1 -0
- digitalkin/grpc_servers/utils/exceptions.py +29 -0
- digitalkin/grpc_servers/utils/grpc_client_wrapper.py +88 -0
- digitalkin/grpc_servers/utils/grpc_error_handler.py +53 -0
- digitalkin/grpc_servers/utils/utility_schema_extender.py +97 -0
- digitalkin/logger.py +157 -0
- digitalkin/mixins/__init__.py +19 -0
- digitalkin/mixins/base_mixin.py +10 -0
- digitalkin/mixins/callback_mixin.py +24 -0
- digitalkin/mixins/chat_history_mixin.py +110 -0
- digitalkin/mixins/cost_mixin.py +76 -0
- digitalkin/mixins/file_history_mixin.py +93 -0
- digitalkin/mixins/filesystem_mixin.py +46 -0
- digitalkin/mixins/logger_mixin.py +51 -0
- digitalkin/mixins/storage_mixin.py +79 -0
- digitalkin/models/__init__.py +8 -0
- digitalkin/models/core/__init__.py +1 -0
- digitalkin/models/core/job_manager_models.py +36 -0
- digitalkin/models/core/task_monitor.py +70 -0
- digitalkin/models/grpc_servers/__init__.py +1 -0
- digitalkin/models/grpc_servers/models.py +275 -0
- digitalkin/models/grpc_servers/types.py +24 -0
- digitalkin/models/module/__init__.py +25 -0
- digitalkin/models/module/module.py +40 -0
- digitalkin/models/module/module_context.py +149 -0
- digitalkin/models/module/module_types.py +393 -0
- digitalkin/models/module/utility.py +146 -0
- digitalkin/models/services/__init__.py +10 -0
- digitalkin/models/services/cost.py +54 -0
- digitalkin/models/services/registry.py +42 -0
- digitalkin/models/services/storage.py +44 -0
- digitalkin/modules/__init__.py +11 -0
- digitalkin/modules/_base_module.py +517 -0
- digitalkin/modules/archetype_module.py +23 -0
- digitalkin/modules/tool_module.py +23 -0
- digitalkin/modules/trigger_handler.py +48 -0
- digitalkin/modules/triggers/__init__.py +12 -0
- digitalkin/modules/triggers/healthcheck_ping_trigger.py +45 -0
- digitalkin/modules/triggers/healthcheck_services_trigger.py +63 -0
- digitalkin/modules/triggers/healthcheck_status_trigger.py +52 -0
- digitalkin/py.typed +0 -0
- digitalkin/services/__init__.py +30 -0
- digitalkin/services/agent/__init__.py +6 -0
- digitalkin/services/agent/agent_strategy.py +19 -0
- digitalkin/services/agent/default_agent.py +13 -0
- digitalkin/services/base_strategy.py +22 -0
- digitalkin/services/communication/__init__.py +7 -0
- digitalkin/services/communication/communication_strategy.py +76 -0
- digitalkin/services/communication/default_communication.py +101 -0
- digitalkin/services/communication/grpc_communication.py +223 -0
- digitalkin/services/cost/__init__.py +14 -0
- digitalkin/services/cost/cost_strategy.py +100 -0
- digitalkin/services/cost/default_cost.py +114 -0
- digitalkin/services/cost/grpc_cost.py +138 -0
- digitalkin/services/filesystem/__init__.py +7 -0
- digitalkin/services/filesystem/default_filesystem.py +417 -0
- digitalkin/services/filesystem/filesystem_strategy.py +252 -0
- digitalkin/services/filesystem/grpc_filesystem.py +317 -0
- digitalkin/services/identity/__init__.py +6 -0
- digitalkin/services/identity/default_identity.py +15 -0
- digitalkin/services/identity/identity_strategy.py +14 -0
- digitalkin/services/registry/__init__.py +27 -0
- digitalkin/services/registry/default_registry.py +141 -0
- digitalkin/services/registry/exceptions.py +47 -0
- digitalkin/services/registry/grpc_registry.py +306 -0
- digitalkin/services/registry/registry_models.py +43 -0
- digitalkin/services/registry/registry_strategy.py +98 -0
- digitalkin/services/services_config.py +200 -0
- digitalkin/services/services_models.py +65 -0
- digitalkin/services/setup/__init__.py +1 -0
- digitalkin/services/setup/default_setup.py +219 -0
- digitalkin/services/setup/grpc_setup.py +343 -0
- digitalkin/services/setup/setup_strategy.py +145 -0
- digitalkin/services/snapshot/__init__.py +6 -0
- digitalkin/services/snapshot/default_snapshot.py +39 -0
- digitalkin/services/snapshot/snapshot_strategy.py +30 -0
- digitalkin/services/storage/__init__.py +7 -0
- digitalkin/services/storage/default_storage.py +228 -0
- digitalkin/services/storage/grpc_storage.py +214 -0
- digitalkin/services/storage/storage_strategy.py +273 -0
- digitalkin/services/user_profile/__init__.py +12 -0
- digitalkin/services/user_profile/default_user_profile.py +55 -0
- digitalkin/services/user_profile/grpc_user_profile.py +69 -0
- digitalkin/services/user_profile/user_profile_strategy.py +40 -0
- digitalkin/utils/__init__.py +29 -0
- digitalkin/utils/arg_parser.py +92 -0
- digitalkin/utils/development_mode_action.py +51 -0
- digitalkin/utils/dynamic_schema.py +483 -0
- digitalkin/utils/llm_ready_schema.py +75 -0
- digitalkin/utils/package_discover.py +357 -0
- digitalkin-0.3.2.dev2.dist-info/METADATA +602 -0
- digitalkin-0.3.2.dev2.dist-info/RECORD +131 -0
- digitalkin-0.3.2.dev2.dist-info/WHEEL +5 -0
- digitalkin-0.3.2.dev2.dist-info/licenses/LICENSE +430 -0
- digitalkin-0.3.2.dev2.dist-info/top_level.txt +4 -0
- modules/__init__.py +0 -0
- modules/cpu_intensive_module.py +280 -0
- modules/dynamic_setup_module.py +338 -0
- modules/minimal_llm_module.py +347 -0
- modules/text_transform_module.py +203 -0
- services/filesystem_module.py +200 -0
- services/storage_module.py +206 -0
|
@@ -0,0 +1,347 @@
|
|
|
1
|
+
"""Simple module calling an LLM."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import os
|
|
5
|
+
from collections.abc import Callable
|
|
6
|
+
from typing import Any, ClassVar, Literal
|
|
7
|
+
|
|
8
|
+
import openai
|
|
9
|
+
from digitalkin.grpc_servers.utils.models import ClientConfig, SecurityMode, ServerMode
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
from digitalkin.modules._base_module import BaseModule
|
|
13
|
+
from digitalkin.services.services_models import ServicesStrategy
|
|
14
|
+
|
|
15
|
+
# Configure logging with clear formatting
|
|
16
|
+
logging.basicConfig(
|
|
17
|
+
level=logging.DEBUG,
|
|
18
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
19
|
+
)
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MessageInputPayload(BaseModel):
|
|
24
|
+
"""Message trigger model for the OpenAI Archetype module."""
|
|
25
|
+
|
|
26
|
+
payload_type: Literal["message"] = "message"
|
|
27
|
+
user_prompt: str = Field(
|
|
28
|
+
...,
|
|
29
|
+
title="User Prompt",
|
|
30
|
+
description="The prompt provided by the user for processing.",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class InputFile(BaseModel):
|
|
35
|
+
"""File model for the OpenAI Archetype module."""
|
|
36
|
+
|
|
37
|
+
name: str = Field(
|
|
38
|
+
...,
|
|
39
|
+
title="File Name",
|
|
40
|
+
description="The name of the file to be processed.",
|
|
41
|
+
)
|
|
42
|
+
content: bytes = Field(
|
|
43
|
+
...,
|
|
44
|
+
title="File Content",
|
|
45
|
+
description="The content of the file to be processed.",
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
file_type: str = Field(
|
|
49
|
+
...,
|
|
50
|
+
title="File Type",
|
|
51
|
+
description="The type of the file to be processed.",
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class FileInputPayload(BaseModel):
|
|
56
|
+
"""File input model for the OpenAI Archetype module."""
|
|
57
|
+
|
|
58
|
+
payload_type: Literal["file"] = "file"
|
|
59
|
+
files: list[InputFile] = Field(
|
|
60
|
+
...,
|
|
61
|
+
title="Files",
|
|
62
|
+
description="List of files to be processed.",
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class OpenAIInput(BaseModel):
|
|
67
|
+
"""Input model defining what data the module expects."""
|
|
68
|
+
|
|
69
|
+
payload: MessageInputPayload | FileInputPayload = Field(
|
|
70
|
+
...,
|
|
71
|
+
discriminator="payload_type",
|
|
72
|
+
title="Payload",
|
|
73
|
+
description="Either a message or list of file input.",
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class MessageOutputPayload(BaseModel):
|
|
78
|
+
"""Message output model for the OpenAI Archetype module."""
|
|
79
|
+
|
|
80
|
+
payload_type: Literal["message"] = "message"
|
|
81
|
+
user_response: str = Field(
|
|
82
|
+
...,
|
|
83
|
+
title="User Response",
|
|
84
|
+
description="The response generated by the assistant based on the user prompt.",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class OutputFile(BaseModel):
|
|
89
|
+
"""File model for the OpenAI Archetype module."""
|
|
90
|
+
|
|
91
|
+
name: str = Field(
|
|
92
|
+
...,
|
|
93
|
+
title="File Name",
|
|
94
|
+
description="The name of the file to be processed.",
|
|
95
|
+
)
|
|
96
|
+
url: str | None = Field(
|
|
97
|
+
...,
|
|
98
|
+
title="File URL",
|
|
99
|
+
description="The URL of the file to be processed.",
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
message: str | None = Field(
|
|
103
|
+
None,
|
|
104
|
+
title="Message",
|
|
105
|
+
description="Optional message associated with the file.",
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class FileOutputPayload(BaseModel):
|
|
110
|
+
"""File output model for the OpenAI Archetype module."""
|
|
111
|
+
|
|
112
|
+
payload_type: Literal["file"] = "file"
|
|
113
|
+
files: list[OutputFile] = Field(
|
|
114
|
+
...,
|
|
115
|
+
title="Files",
|
|
116
|
+
description="List of files generated by the assistant.",
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class OpenAIOutput(BaseModel):
|
|
121
|
+
"""Output model defining what data the module produces."""
|
|
122
|
+
|
|
123
|
+
payload: MessageOutputPayload | FileOutputPayload = Field(
|
|
124
|
+
...,
|
|
125
|
+
discriminator="payload_type",
|
|
126
|
+
title="Payload",
|
|
127
|
+
description="Either a message or file response.",
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class OpenAISetup(BaseModel):
|
|
132
|
+
"""Setup model defining module configuration parameters."""
|
|
133
|
+
|
|
134
|
+
model_name: str = Field(
|
|
135
|
+
...,
|
|
136
|
+
title="Model Name",
|
|
137
|
+
description="The name of the OpenAI model to use for processing.",
|
|
138
|
+
)
|
|
139
|
+
developer_prompt: str = Field(
|
|
140
|
+
...,
|
|
141
|
+
title="Developer Prompt",
|
|
142
|
+
description="The developer prompt new versions of system prompt, it defines the behavior of the assistant.",
|
|
143
|
+
)
|
|
144
|
+
temperature: float = Field(
|
|
145
|
+
0.7,
|
|
146
|
+
title="Temperature",
|
|
147
|
+
description="Controls the randomness of the model's output. Higher values make output more random.",
|
|
148
|
+
)
|
|
149
|
+
max_tokens: int = Field(
|
|
150
|
+
100,
|
|
151
|
+
title="Max Tokens",
|
|
152
|
+
description="The maximum number of tokens to generate in the response.",
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class OpenAIConfigSetup(BaseModel):
|
|
157
|
+
"""Setup model defining module configuration parameters."""
|
|
158
|
+
|
|
159
|
+
rag_files: list[bytes] = Field(
|
|
160
|
+
...,
|
|
161
|
+
title="RAG Files",
|
|
162
|
+
description="Files used for retrieval-augmented generation (RAG) with the OpenAI module.",
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
class OpenAIToolSecret(BaseModel):
|
|
167
|
+
"""Secret model defining module configuration parameters."""
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
client_config = ClientConfig(
|
|
171
|
+
host="[::]",
|
|
172
|
+
port=50151,
|
|
173
|
+
mode=ServerMode.ASYNC,
|
|
174
|
+
security=SecurityMode.INSECURE,
|
|
175
|
+
credentials=None,
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
class OpenAIToolModule(
|
|
180
|
+
BaseModule[
|
|
181
|
+
OpenAIInput,
|
|
182
|
+
OpenAIOutput,
|
|
183
|
+
OpenAISetup,
|
|
184
|
+
OpenAIToolSecret,
|
|
185
|
+
OpenAIConfigSetup,
|
|
186
|
+
]
|
|
187
|
+
):
|
|
188
|
+
"""A openAI endpoint tool module module."""
|
|
189
|
+
|
|
190
|
+
name = "OpenAIToolModule"
|
|
191
|
+
description = "A module that interacts with OpenAI API to process text"
|
|
192
|
+
|
|
193
|
+
# Define the schema formats for the module
|
|
194
|
+
config_setup_format = OpenAIConfigSetup
|
|
195
|
+
input_format = OpenAIInput
|
|
196
|
+
output_format = OpenAIOutput
|
|
197
|
+
setup_format = OpenAISetup
|
|
198
|
+
secret_format = OpenAIToolSecret
|
|
199
|
+
|
|
200
|
+
openai_client: openai.OpenAI
|
|
201
|
+
|
|
202
|
+
# Define module metadata for discovery
|
|
203
|
+
metadata: ClassVar[dict[str, Any]] = {
|
|
204
|
+
"name": "OpenAIToolModule",
|
|
205
|
+
"description": "Transforms input text using a streaming LLM response.",
|
|
206
|
+
"version": "1.0.0",
|
|
207
|
+
"tags": ["text", "transformation", "encryption", "streaming"],
|
|
208
|
+
}
|
|
209
|
+
# Define services_config_params with default values
|
|
210
|
+
services_config_strategies: ClassVar[dict[str, ServicesStrategy | None]] = {}
|
|
211
|
+
services_config_params: ClassVar[dict[str, dict[str, Any | None] | None]] = {
|
|
212
|
+
"storage": {
|
|
213
|
+
"config": {"setups": OpenAISetup},
|
|
214
|
+
"client_config": client_config,
|
|
215
|
+
},
|
|
216
|
+
"filesystem": {
|
|
217
|
+
"config": {},
|
|
218
|
+
"client_config": client_config,
|
|
219
|
+
},
|
|
220
|
+
"cost": {
|
|
221
|
+
"config": {},
|
|
222
|
+
"client_config": client_config,
|
|
223
|
+
},
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
async def run_config_setup(
|
|
227
|
+
self,
|
|
228
|
+
config_setup_data: OpenAIConfigSetup,
|
|
229
|
+
setup_data: OpenAISetup,
|
|
230
|
+
callback: Callable,
|
|
231
|
+
) -> None:
|
|
232
|
+
"""Configure the module with additional setup data.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
config_setup_data: Additional configuration content.
|
|
236
|
+
setup_data: Initial setup data for the module.
|
|
237
|
+
callback: Function to send output data back to the client.
|
|
238
|
+
"""
|
|
239
|
+
logger.info("Configuring OpenAIToolModule with additional setup data. %s", config_setup_data)
|
|
240
|
+
|
|
241
|
+
# Here you can process config_content and update setup_data as needed
|
|
242
|
+
# For now, we just return the original setup_data
|
|
243
|
+
setup_data.developer_prompt = "| + |".join(f.decode("utf-8") for f in config_setup_data.rag_files)
|
|
244
|
+
await callback(setup_data)
|
|
245
|
+
|
|
246
|
+
async def initialize(self, setup_data: OpenAISetup) -> None:
|
|
247
|
+
"""Initialize the module capabilities.
|
|
248
|
+
|
|
249
|
+
This method is called when the module is loaded by the server.
|
|
250
|
+
Use it to set up module-specific resources or configurations.
|
|
251
|
+
"""
|
|
252
|
+
self.client: openai.AsyncOpenAI = openai.AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
|
253
|
+
# Define what capabilities this module provides
|
|
254
|
+
self.capabilities = ["text-processing", "streaming", "transformation"]
|
|
255
|
+
logger.info(
|
|
256
|
+
"Module %s initialized with capabilities: %s",
|
|
257
|
+
self.metadata["name"],
|
|
258
|
+
self.capabilities,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
async def run_message(
|
|
262
|
+
self,
|
|
263
|
+
input_model: MessageInputPayload,
|
|
264
|
+
setup_model: OpenAISetup,
|
|
265
|
+
callback: Callable,
|
|
266
|
+
) -> None:
|
|
267
|
+
"""Process input text and stream LLM responses.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
input_data: Contains the text to process.
|
|
271
|
+
setup_data: Contains model configuration and development prompt.
|
|
272
|
+
callback: Function to send output data back to the client.
|
|
273
|
+
|
|
274
|
+
Raises:
|
|
275
|
+
grpc.RpcError: If gRPC communication fails.
|
|
276
|
+
openai.AuthenticationError: If authentication with OpenAI fails.
|
|
277
|
+
openai.APIConnectionError: If an API connection error occurs.
|
|
278
|
+
Exception: For any unexpected runtime errors.
|
|
279
|
+
"""
|
|
280
|
+
# response = await self.client.responses.create(
|
|
281
|
+
# model=setup_model.model_name,
|
|
282
|
+
# instructions=setup_model.developer_prompt,
|
|
283
|
+
# temperature=setup_model.temperature,
|
|
284
|
+
# max_output_tokens=setup_model.max_tokens,
|
|
285
|
+
# input=input_model.user_prompt,
|
|
286
|
+
# )
|
|
287
|
+
# logger.info("Recieved answer from OpenAI: %s", response)
|
|
288
|
+
|
|
289
|
+
# Get and save the output data
|
|
290
|
+
message_output_payload = MessageOutputPayload(
|
|
291
|
+
payload_type="message",
|
|
292
|
+
user_response="Mock data",
|
|
293
|
+
# user_response=response.output_text,
|
|
294
|
+
)
|
|
295
|
+
output_model = self.output_format.model_validate({"payload": message_output_payload})
|
|
296
|
+
await callback(output_data=output_model)
|
|
297
|
+
|
|
298
|
+
async def run(
|
|
299
|
+
self,
|
|
300
|
+
input_data: OpenAIInput,
|
|
301
|
+
setup_data: OpenAISetup,
|
|
302
|
+
callback: Callable,
|
|
303
|
+
) -> None:
|
|
304
|
+
"""Run the module.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
input_data: Input data for the module
|
|
308
|
+
setup_data: Setup data for the module
|
|
309
|
+
callback: Callback function to report progress
|
|
310
|
+
|
|
311
|
+
Raises:
|
|
312
|
+
ValueError: If the payload type is unknown
|
|
313
|
+
"""
|
|
314
|
+
# Validate the input data
|
|
315
|
+
input_model = self.input_format.model_validate(input_data)
|
|
316
|
+
setup_model = self.setup_format.model_validate(setup_data)
|
|
317
|
+
logger.debug("Running with input data: %s", input_model)
|
|
318
|
+
|
|
319
|
+
if not hasattr(input_model, "payload"):
|
|
320
|
+
error_msg = "Input data is missing 'payload' field"
|
|
321
|
+
raise ValueError(error_msg)
|
|
322
|
+
|
|
323
|
+
if not hasattr(input_model.payload, "payload_type"):
|
|
324
|
+
error_msg = "Input payload is missing 'type' field"
|
|
325
|
+
raise ValueError(error_msg)
|
|
326
|
+
|
|
327
|
+
if input_model.payload.payload_type == "message":
|
|
328
|
+
# Validate against MessageInputPayload
|
|
329
|
+
message_payload = MessageInputPayload.model_validate(input_model.payload)
|
|
330
|
+
await self.run_message(message_payload, setup_model, callback)
|
|
331
|
+
elif input_model.payload.payload_type == "file":
|
|
332
|
+
# Validate against FileInputPayload
|
|
333
|
+
file_payload = FileInputPayload.model_validate(input_model.payload)
|
|
334
|
+
await self.run_file(file_payload, setup_model, callback)
|
|
335
|
+
else:
|
|
336
|
+
error_msg = f"Unknown input type '{input_model.payload.payload_type}'. Expected 'message' or 'file'."
|
|
337
|
+
raise ValueError(error_msg)
|
|
338
|
+
logger.info("Job %s completed", self.job_id)
|
|
339
|
+
|
|
340
|
+
async def cleanup(self) -> None:
|
|
341
|
+
"""Clean up any resources when the module is stopped.
|
|
342
|
+
|
|
343
|
+
This method is called when the module is being shut down.
|
|
344
|
+
Use it to close connections, free resources, etc.
|
|
345
|
+
"""
|
|
346
|
+
logger.info("Cleaning up module %s", self.metadata["name"])
|
|
347
|
+
# Release any resources here if needed.
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
"""Simple module example transforming a text."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from collections.abc import Callable
|
|
5
|
+
from typing import Any, ClassVar
|
|
6
|
+
|
|
7
|
+
from digitalkin.grpc_servers.utils.models import ClientConfig, SecurityMode, ServerMode
|
|
8
|
+
from pydantic import BaseModel
|
|
9
|
+
|
|
10
|
+
from digitalkin.modules._base_module import BaseModule
|
|
11
|
+
from digitalkin.services.setup.setup_strategy import SetupData
|
|
12
|
+
from digitalkin.services.storage.storage_strategy import DataType, StorageRecord
|
|
13
|
+
|
|
14
|
+
# Configure logging with clear formatting
|
|
15
|
+
logging.basicConfig(
|
|
16
|
+
level=logging.INFO,
|
|
17
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
18
|
+
)
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# Define schema models using Pydantic
|
|
23
|
+
class TextTransformInput(BaseModel):
|
|
24
|
+
"""Input model defining what data the module expects."""
|
|
25
|
+
|
|
26
|
+
text: str
|
|
27
|
+
transform_count: int = 1 # Default to 1 transformation
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class TextTransformOutput(BaseModel):
|
|
31
|
+
"""Output model defining what data the module produces."""
|
|
32
|
+
|
|
33
|
+
transformed_text: str
|
|
34
|
+
iteration: int # Tracks which transformation this is
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class TextTransformSetup(BaseModel):
|
|
38
|
+
"""Setup model defining module configuration parameters."""
|
|
39
|
+
|
|
40
|
+
shift_amount: int = 1 # Default Caesar shift by 1
|
|
41
|
+
uppercase: bool = False # Whether to convert to uppercase
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class TextTransformSecret(BaseModel):
|
|
45
|
+
"""Secret model defining module configuration parameters."""
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class TextTransformStorage(BaseModel):
|
|
49
|
+
"""Secret model defining module configuration parameters."""
|
|
50
|
+
|
|
51
|
+
module: str = "Text_Transform_Module"
|
|
52
|
+
user: str = "user"
|
|
53
|
+
consumption: int = 0
|
|
54
|
+
ended: bool = False
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
client_config = ClientConfig(
|
|
58
|
+
host="[::]",
|
|
59
|
+
port=50151,
|
|
60
|
+
mode=ServerMode.ASYNC,
|
|
61
|
+
security=SecurityMode.INSECURE,
|
|
62
|
+
credentials=None,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class TextTransformModule(BaseModule[TextTransformInput, TextTransformOutput, TextTransformSetup, TextTransformSecret]):
|
|
67
|
+
"""A text transformation module that demonstrates streaming capabilities.
|
|
68
|
+
|
|
69
|
+
This module takes text input and performs multiple transformations on it,
|
|
70
|
+
sending back each transformation as a separate output message.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
# Define the schema formats for the module
|
|
74
|
+
name = "Text_Transform_Module"
|
|
75
|
+
input_format = TextTransformInput
|
|
76
|
+
output_format = TextTransformOutput
|
|
77
|
+
setup_format = TextTransformSetup
|
|
78
|
+
secret_format = TextTransformSecret
|
|
79
|
+
|
|
80
|
+
# Define module metadata for discovery
|
|
81
|
+
metadata: ClassVar[dict[str, Any]] = {
|
|
82
|
+
"name": "Text_Transform_Module",
|
|
83
|
+
"description": "Transforms input text using Caesar cipher with streaming output",
|
|
84
|
+
"version": "1.0.0",
|
|
85
|
+
"tags": ["text", "transformation", "encryption", "streaming"],
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
# Define services_config_params with default values
|
|
89
|
+
services_config_strategies = {}
|
|
90
|
+
services_config_params = {
|
|
91
|
+
"storage": {
|
|
92
|
+
"config": {"monitor": TextTransformStorage, "setups": TextTransformStorage},
|
|
93
|
+
"client_config": client_config,
|
|
94
|
+
},
|
|
95
|
+
"filesystem": {
|
|
96
|
+
"client_config": client_config,
|
|
97
|
+
},
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
async def initialize(self, setup_data: SetupData) -> None:
|
|
101
|
+
"""Initialize the module capabilities.
|
|
102
|
+
|
|
103
|
+
This method is called when the module is loaded by the server.
|
|
104
|
+
Use it to set up module-specific resources or configurations.
|
|
105
|
+
|
|
106
|
+
Raises:
|
|
107
|
+
Exception: If initialization fails.
|
|
108
|
+
"""
|
|
109
|
+
# Define what capabilities this module provides
|
|
110
|
+
self.capabilities = ["text-processing", "streaming", "transformation"]
|
|
111
|
+
logger.info(
|
|
112
|
+
"Module %s initialized with capabilities: %s",
|
|
113
|
+
self.metadata["name"],
|
|
114
|
+
self.capabilities,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
self.db_id = self.storage.store(
|
|
118
|
+
"monitor",
|
|
119
|
+
{
|
|
120
|
+
"module": self.metadata["name"],
|
|
121
|
+
"user": f"xxxx+{self.job_id}",
|
|
122
|
+
"consumption": 0,
|
|
123
|
+
"ended": False,
|
|
124
|
+
},
|
|
125
|
+
data_type=DataType.VIEW,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
async def run(
|
|
129
|
+
self,
|
|
130
|
+
input_data: dict[str, Any],
|
|
131
|
+
setup_data: SetupData,
|
|
132
|
+
callback: Callable,
|
|
133
|
+
) -> None:
|
|
134
|
+
"""Process input text and stream transformation results.
|
|
135
|
+
|
|
136
|
+
This method implements a Caesar cipher transformation on input text.
|
|
137
|
+
It demonstrates streaming capability by sending multiple outputs through
|
|
138
|
+
the callback for each transformation iteration.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
input_data: Contains the text to transform and number of iterations.
|
|
142
|
+
setup_data: Contains shift amount and uppercase flags.
|
|
143
|
+
callback: Function to send output data back to the client.
|
|
144
|
+
"""
|
|
145
|
+
text = input_data["text"]
|
|
146
|
+
transform_count = int(input_data["transform_count"])
|
|
147
|
+
logger.info("%s | %s", setup_data, type(setup_data))
|
|
148
|
+
shift_amount = int(setup_data.current_setup_version.content["shift_amount"])
|
|
149
|
+
uppercase = setup_data.current_setup_version.content["uppercase"]
|
|
150
|
+
|
|
151
|
+
logger.info(
|
|
152
|
+
"Running job %s with text: '%s', iterations: %s",
|
|
153
|
+
self.job_id,
|
|
154
|
+
text,
|
|
155
|
+
transform_count,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Process the text for each iteration
|
|
159
|
+
for i in range(transform_count):
|
|
160
|
+
# Apply Caesar cipher (shift each character by specified amount)
|
|
161
|
+
transformed = "".join([chr(ord(char) + shift_amount) if char.isalpha() else char for char in text])
|
|
162
|
+
|
|
163
|
+
# Apply uppercase transformation if configured
|
|
164
|
+
if uppercase:
|
|
165
|
+
transformed = transformed.upper()
|
|
166
|
+
|
|
167
|
+
output_data = TextTransformOutput(transformed_text=transformed, iteration=i + 1)
|
|
168
|
+
|
|
169
|
+
logger.info(
|
|
170
|
+
"Sending transformation %s/%s: '%s'",
|
|
171
|
+
i + 1,
|
|
172
|
+
transform_count,
|
|
173
|
+
transformed,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
monitor_obj: StorageRecord | None = self.storage.read("monitor")
|
|
177
|
+
if monitor_obj is None:
|
|
178
|
+
logger.error("Monitor object not found in storage.")
|
|
179
|
+
break
|
|
180
|
+
monitor_obj.data.consumption += 1
|
|
181
|
+
updated_monitor_obj: StorageRecord | None = self.storage.modify("monitor", monitor_obj.data.model_dump())
|
|
182
|
+
self.db_id = updated_monitor_obj.name if updated_monitor_obj else "monitor"
|
|
183
|
+
|
|
184
|
+
# Send results through callback and wait for acknowledgment
|
|
185
|
+
await callback(job_id=self.job_id, output_data=output_data.model_dump())
|
|
186
|
+
text = transformed
|
|
187
|
+
|
|
188
|
+
logger.info("Job %s completed with %s transformations", self.job_id, transform_count)
|
|
189
|
+
|
|
190
|
+
async def cleanup(self) -> None:
|
|
191
|
+
"""Clean up any resources when the module is stopped.
|
|
192
|
+
|
|
193
|
+
This method is called when the module is being shut down.
|
|
194
|
+
Use it to close connections, free resources, etc.
|
|
195
|
+
"""
|
|
196
|
+
logger.info(f"Cleaning up module {self.metadata['name']}")
|
|
197
|
+
monitor_obj = self.storage.read("monitor")
|
|
198
|
+
if monitor_obj is None:
|
|
199
|
+
logger.error("Monitor object not found in storage.")
|
|
200
|
+
return
|
|
201
|
+
monitor_obj.data.ended = True
|
|
202
|
+
updated_monitor_obj: StorageRecord | None = self.storage.modify("monitor", monitor_obj.data.model_dump())
|
|
203
|
+
self.db_id = updated_monitor_obj.name if updated_monitor_obj else "monitor"
|