digitalkin 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- digitalkin/__version__.py +1 -1
- digitalkin/grpc_servers/_base_server.py +15 -17
- digitalkin/grpc_servers/module_server.py +9 -10
- digitalkin/grpc_servers/module_servicer.py +108 -85
- digitalkin/grpc_servers/registry_server.py +3 -6
- digitalkin/grpc_servers/registry_servicer.py +18 -19
- digitalkin/grpc_servers/utils/grpc_client_wrapper.py +3 -5
- digitalkin/logger.py +45 -1
- digitalkin/models/module/module.py +1 -0
- digitalkin/modules/_base_module.py +47 -6
- digitalkin/modules/job_manager/base_job_manager.py +139 -0
- digitalkin/modules/job_manager/job_manager_models.py +44 -0
- digitalkin/modules/job_manager/single_job_manager.py +218 -0
- digitalkin/modules/job_manager/taskiq_broker.py +173 -0
- digitalkin/modules/job_manager/taskiq_job_manager.py +213 -0
- digitalkin/services/base_strategy.py +3 -1
- digitalkin/services/cost/cost_strategy.py +64 -16
- digitalkin/services/cost/default_cost.py +95 -12
- digitalkin/services/cost/grpc_cost.py +149 -60
- digitalkin/services/filesystem/default_filesystem.py +5 -6
- digitalkin/services/filesystem/filesystem_strategy.py +3 -2
- digitalkin/services/filesystem/grpc_filesystem.py +31 -26
- digitalkin/services/services_config.py +6 -5
- digitalkin/services/setup/__init__.py +1 -0
- digitalkin/services/setup/default_setup.py +10 -12
- digitalkin/services/setup/grpc_setup.py +8 -10
- digitalkin/services/storage/default_storage.py +13 -6
- digitalkin/services/storage/grpc_storage.py +25 -9
- digitalkin/services/storage/storage_strategy.py +3 -2
- digitalkin/utils/arg_parser.py +5 -48
- digitalkin/utils/development_mode_action.py +51 -0
- {digitalkin-0.2.11.dist-info → digitalkin-0.2.13.dist-info}/METADATA +43 -12
- {digitalkin-0.2.11.dist-info → digitalkin-0.2.13.dist-info}/RECORD +40 -33
- {digitalkin-0.2.11.dist-info → digitalkin-0.2.13.dist-info}/WHEEL +1 -1
- modules/cpu_intensive_module.py +271 -0
- modules/minimal_llm_module.py +200 -56
- modules/storage_module.py +5 -6
- modules/text_transform_module.py +1 -1
- digitalkin/modules/job_manager.py +0 -176
- {digitalkin-0.2.11.dist-info → digitalkin-0.2.13.dist-info}/licenses/LICENSE +0 -0
- {digitalkin-0.2.11.dist-info → digitalkin-0.2.13.dist-info}/top_level.txt +0 -0
modules/minimal_llm_module.py
CHANGED
|
@@ -1,44 +1,157 @@
|
|
|
1
1
|
"""Simple module calling an LLM."""
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
+
import os
|
|
4
5
|
from collections.abc import Callable
|
|
5
|
-
from typing import Any, ClassVar
|
|
6
|
+
from typing import Any, ClassVar, Literal
|
|
6
7
|
|
|
7
|
-
import grpc
|
|
8
8
|
import openai
|
|
9
|
-
from pydantic import BaseModel
|
|
9
|
+
from pydantic import BaseModel, Field
|
|
10
10
|
|
|
11
|
-
from digitalkin.grpc_servers.utils.models import
|
|
11
|
+
from digitalkin.grpc_servers.utils.models import ClientConfig, SecurityMode, ServerMode
|
|
12
12
|
from digitalkin.modules._base_module import BaseModule
|
|
13
|
+
from digitalkin.services.services_models import ServicesStrategy
|
|
13
14
|
from digitalkin.services.setup.setup_strategy import SetupData
|
|
14
15
|
|
|
15
16
|
# Configure logging with clear formatting
|
|
16
17
|
logging.basicConfig(
|
|
17
|
-
level=logging.
|
|
18
|
+
level=logging.DEBUG,
|
|
18
19
|
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
19
20
|
)
|
|
20
21
|
logger = logging.getLogger(__name__)
|
|
21
22
|
|
|
22
23
|
|
|
23
|
-
|
|
24
|
-
|
|
24
|
+
class MessageInputPayload(BaseModel):
|
|
25
|
+
"""Message trigger model for the OpenAI Archetype module."""
|
|
26
|
+
|
|
27
|
+
payload_type: Literal["message"] = "message"
|
|
28
|
+
user_prompt: str = Field(
|
|
29
|
+
...,
|
|
30
|
+
title="User Prompt",
|
|
31
|
+
description="The prompt provided by the user for processing.",
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class InputFile(BaseModel):
|
|
36
|
+
"""File model for the OpenAI Archetype module."""
|
|
37
|
+
|
|
38
|
+
name: str = Field(
|
|
39
|
+
...,
|
|
40
|
+
title="File Name",
|
|
41
|
+
description="The name of the file to be processed.",
|
|
42
|
+
)
|
|
43
|
+
content: bytes = Field(
|
|
44
|
+
...,
|
|
45
|
+
title="File Content",
|
|
46
|
+
description="The content of the file to be processed.",
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
file_type: str = Field(
|
|
50
|
+
...,
|
|
51
|
+
title="File Type",
|
|
52
|
+
description="The type of the file to be processed.",
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class FileInputPayload(BaseModel):
|
|
57
|
+
"""File input model for the OpenAI Archetype module."""
|
|
58
|
+
|
|
59
|
+
payload_type: Literal["file"] = "file"
|
|
60
|
+
files: list[InputFile] = Field(
|
|
61
|
+
...,
|
|
62
|
+
title="Files",
|
|
63
|
+
description="List of files to be processed.",
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class OpenAIInput(BaseModel):
|
|
25
68
|
"""Input model defining what data the module expects."""
|
|
26
69
|
|
|
27
|
-
|
|
70
|
+
payload: MessageInputPayload | FileInputPayload = Field(
|
|
71
|
+
...,
|
|
72
|
+
discriminator="payload_type",
|
|
73
|
+
title="Payload",
|
|
74
|
+
description="Either a message or list of file input.",
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class MessageOutputPayload(BaseModel):
|
|
79
|
+
"""Message output model for the OpenAI Archetype module."""
|
|
80
|
+
|
|
81
|
+
payload_type: Literal["message"] = "message"
|
|
82
|
+
user_response: str = Field(
|
|
83
|
+
...,
|
|
84
|
+
title="User Response",
|
|
85
|
+
description="The response generated by the assistant based on the user prompt.",
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class OutputFile(BaseModel):
|
|
90
|
+
"""File model for the OpenAI Archetype module."""
|
|
91
|
+
|
|
92
|
+
name: str = Field(
|
|
93
|
+
...,
|
|
94
|
+
title="File Name",
|
|
95
|
+
description="The name of the file to be processed.",
|
|
96
|
+
)
|
|
97
|
+
url: str | None = Field(
|
|
98
|
+
...,
|
|
99
|
+
title="File URL",
|
|
100
|
+
description="The URL of the file to be processed.",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
message: str | None = Field(
|
|
104
|
+
None,
|
|
105
|
+
title="Message",
|
|
106
|
+
description="Optional message associated with the file.",
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class FileOutputPayload(BaseModel):
|
|
111
|
+
"""File output model for the OpenAI Archetype module."""
|
|
112
|
+
|
|
113
|
+
payload_type: Literal["file"] = "file"
|
|
114
|
+
files: list[OutputFile] = Field(
|
|
115
|
+
...,
|
|
116
|
+
title="Files",
|
|
117
|
+
description="List of files generated by the assistant.",
|
|
118
|
+
)
|
|
28
119
|
|
|
29
120
|
|
|
30
|
-
class
|
|
121
|
+
class OpenAIOutput(BaseModel):
|
|
31
122
|
"""Output model defining what data the module produces."""
|
|
32
123
|
|
|
33
|
-
|
|
124
|
+
payload: MessageOutputPayload | FileOutputPayload = Field(
|
|
125
|
+
...,
|
|
126
|
+
discriminator="payload_type",
|
|
127
|
+
title="Payload",
|
|
128
|
+
description="Either a message or file response.",
|
|
129
|
+
)
|
|
34
130
|
|
|
35
131
|
|
|
36
|
-
class
|
|
132
|
+
class OpenAISetup(BaseModel):
|
|
37
133
|
"""Setup model defining module configuration parameters."""
|
|
38
134
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
135
|
+
model_name: str = Field(
|
|
136
|
+
...,
|
|
137
|
+
title="Model Name",
|
|
138
|
+
description="The name of the OpenAI model to use for processing.",
|
|
139
|
+
)
|
|
140
|
+
developer_prompt: str = Field(
|
|
141
|
+
...,
|
|
142
|
+
title="Developer Prompt",
|
|
143
|
+
description="The developer prompt new versions of system prompt, it defines the behavior of the assistant.",
|
|
144
|
+
)
|
|
145
|
+
temperature: float = Field(
|
|
146
|
+
0.7,
|
|
147
|
+
title="Temperature",
|
|
148
|
+
description="Controls the randomness of the model's output. Higher values make output more random.",
|
|
149
|
+
)
|
|
150
|
+
max_tokens: int = Field(
|
|
151
|
+
100,
|
|
152
|
+
title="Max Tokens",
|
|
153
|
+
description="The maximum number of tokens to generate in the response.",
|
|
154
|
+
)
|
|
42
155
|
|
|
43
156
|
|
|
44
157
|
class OpenAIToolSecret(BaseModel):
|
|
@@ -54,38 +167,42 @@ client_config = ClientConfig(
|
|
|
54
167
|
)
|
|
55
168
|
|
|
56
169
|
|
|
57
|
-
class OpenAIToolModule(BaseModule[
|
|
170
|
+
class OpenAIToolModule(BaseModule[OpenAIInput, OpenAIOutput, OpenAISetup, OpenAIToolSecret]):
|
|
58
171
|
"""A openAI endpoint tool module module."""
|
|
59
172
|
|
|
60
173
|
name = "OpenAIToolModule"
|
|
61
174
|
description = "A module that interacts with OpenAI API to process text"
|
|
62
175
|
|
|
63
176
|
# Define the schema formats for the module
|
|
64
|
-
input_format =
|
|
65
|
-
output_format =
|
|
66
|
-
setup_format =
|
|
177
|
+
input_format = OpenAIInput
|
|
178
|
+
output_format = OpenAIOutput
|
|
179
|
+
setup_format = OpenAISetup
|
|
67
180
|
secret_format = OpenAIToolSecret
|
|
68
181
|
|
|
69
182
|
openai_client: openai.OpenAI
|
|
70
183
|
|
|
71
184
|
# Define module metadata for discovery
|
|
72
185
|
metadata: ClassVar[dict[str, Any]] = {
|
|
73
|
-
"name": "
|
|
186
|
+
"name": "OpenAIToolModule",
|
|
74
187
|
"description": "Transforms input text using a streaming LLM response.",
|
|
75
188
|
"version": "1.0.0",
|
|
76
189
|
"tags": ["text", "transformation", "encryption", "streaming"],
|
|
77
190
|
}
|
|
78
191
|
# Define services_config_params with default values
|
|
79
|
-
services_config_strategies = {}
|
|
80
|
-
services_config_params = {
|
|
192
|
+
services_config_strategies: ClassVar[dict[str, ServicesStrategy | None]] = {}
|
|
193
|
+
services_config_params: ClassVar[dict[str, dict[str, Any | None] | None]] = {
|
|
81
194
|
"storage": {
|
|
82
|
-
"config": {"setups":
|
|
195
|
+
"config": {"setups": OpenAISetup},
|
|
83
196
|
"client_config": client_config,
|
|
84
197
|
},
|
|
85
198
|
"filesystem": {
|
|
86
199
|
"config": {},
|
|
87
200
|
"client_config": client_config,
|
|
88
201
|
},
|
|
202
|
+
"cost": {
|
|
203
|
+
"config": {},
|
|
204
|
+
"client_config": client_config,
|
|
205
|
+
},
|
|
89
206
|
}
|
|
90
207
|
|
|
91
208
|
async def initialize(self, setup_data: SetupData) -> None:
|
|
@@ -94,7 +211,7 @@ class OpenAIToolModule(BaseModule[OpenAIToolInput, OpenAIToolOutput, OpenAIToolS
|
|
|
94
211
|
This method is called when the module is loaded by the server.
|
|
95
212
|
Use it to set up module-specific resources or configurations.
|
|
96
213
|
"""
|
|
97
|
-
self.
|
|
214
|
+
self.client: openai.AsyncOpenAI = openai.AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
|
98
215
|
# Define what capabilities this module provides
|
|
99
216
|
self.capabilities = ["text-processing", "streaming", "transformation"]
|
|
100
217
|
logger.info(
|
|
@@ -103,10 +220,10 @@ class OpenAIToolModule(BaseModule[OpenAIToolInput, OpenAIToolOutput, OpenAIToolS
|
|
|
103
220
|
self.capabilities,
|
|
104
221
|
)
|
|
105
222
|
|
|
106
|
-
async def
|
|
223
|
+
async def run_message(
|
|
107
224
|
self,
|
|
108
|
-
|
|
109
|
-
|
|
225
|
+
input_model: MessageInputPayload,
|
|
226
|
+
setup_model: OpenAISetup,
|
|
110
227
|
callback: Callable,
|
|
111
228
|
) -> None:
|
|
112
229
|
"""Process input text and stream LLM responses.
|
|
@@ -122,37 +239,64 @@ class OpenAIToolModule(BaseModule[OpenAIToolInput, OpenAIToolOutput, OpenAIToolS
|
|
|
122
239
|
openai.APIConnectionError: If an API connection error occurs.
|
|
123
240
|
Exception: For any unexpected runtime errors.
|
|
124
241
|
"""
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
242
|
+
# response = await self.client.responses.create(
|
|
243
|
+
# model=setup_model.model_name,
|
|
244
|
+
# instructions=setup_model.developer_prompt,
|
|
245
|
+
# temperature=setup_model.temperature,
|
|
246
|
+
# max_output_tokens=setup_model.max_tokens,
|
|
247
|
+
# input=input_model.user_prompt,
|
|
248
|
+
# )
|
|
249
|
+
# logger.info("Recieved answer from OpenAI: %s", response)
|
|
250
|
+
|
|
251
|
+
# Get and save the output data
|
|
252
|
+
message_output_payload = MessageOutputPayload(
|
|
253
|
+
payload_type="message",
|
|
254
|
+
user_response="Mock data",
|
|
255
|
+
# user_response=response.output_text,
|
|
130
256
|
)
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
257
|
+
output_model = self.output_format.model_validate({"payload": message_output_payload})
|
|
258
|
+
await callback(output_data=output_model)
|
|
259
|
+
|
|
260
|
+
async def run(
|
|
261
|
+
self,
|
|
262
|
+
input_data: OpenAIInput,
|
|
263
|
+
setup_data: OpenAISetup,
|
|
264
|
+
callback: Callable,
|
|
265
|
+
) -> None:
|
|
266
|
+
"""Run the module.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
input_data: Input data for the module
|
|
270
|
+
setup_data: Setup data for the module
|
|
271
|
+
callback: Callback function to report progress
|
|
272
|
+
|
|
273
|
+
Raises:
|
|
274
|
+
ValueError: If the payload type is unknown
|
|
275
|
+
"""
|
|
276
|
+
# Validate the input data
|
|
277
|
+
input_model = self.input_format.model_validate(input_data)
|
|
278
|
+
setup_model = self.setup_format.model_validate(setup_data)
|
|
279
|
+
logger.debug("Running with input data: %s", input_model)
|
|
280
|
+
|
|
281
|
+
if not hasattr(input_model, "payload"):
|
|
282
|
+
error_msg = "Input data is missing 'payload' field"
|
|
283
|
+
raise ValueError(error_msg)
|
|
284
|
+
|
|
285
|
+
if not hasattr(input_model.payload, "payload_type"):
|
|
286
|
+
error_msg = "Input payload is missing 'type' field"
|
|
287
|
+
raise ValueError(error_msg)
|
|
288
|
+
|
|
289
|
+
if input_model.payload.payload_type == "message":
|
|
290
|
+
# Validate against MessageInputPayload
|
|
291
|
+
message_payload = MessageInputPayload.model_validate(input_model.payload)
|
|
292
|
+
await self.run_message(message_payload, setup_model, callback)
|
|
293
|
+
elif input_model.payload.payload_type == "file":
|
|
294
|
+
# Validate against FileInputPayload
|
|
295
|
+
file_payload = FileInputPayload.model_validate(input_model.payload)
|
|
296
|
+
await self.run_file(file_payload, setup_model, callback)
|
|
297
|
+
else:
|
|
298
|
+
error_msg = f"Unknown input type '{input_model.payload.payload_type}'. Expected 'message' or 'file'."
|
|
299
|
+
raise ValueError(error_msg)
|
|
156
300
|
logger.info("Job %s completed", self.job_id)
|
|
157
301
|
|
|
158
302
|
async def cleanup(self) -> None:
|
modules/storage_module.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
import asyncio
|
|
4
4
|
import datetime
|
|
5
5
|
from collections.abc import Callable
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import TYPE_CHECKING, Any
|
|
7
7
|
|
|
8
8
|
from pydantic import BaseModel, Field
|
|
9
9
|
|
|
@@ -12,7 +12,9 @@ from digitalkin.models.module import ModuleStatus
|
|
|
12
12
|
from digitalkin.modules.archetype_module import ArchetypeModule
|
|
13
13
|
from digitalkin.services.services_config import ServicesConfig
|
|
14
14
|
from digitalkin.services.services_models import ServicesMode
|
|
15
|
-
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from digitalkin.services.storage.storage_strategy import StorageRecord
|
|
16
18
|
|
|
17
19
|
|
|
18
20
|
class ExampleInput(BaseModel):
|
|
@@ -120,10 +122,7 @@ class ExampleModule(ArchetypeModule[ExampleInput, ExampleOutput, ExampleSetup, E
|
|
|
120
122
|
|
|
121
123
|
# Store the output data in storage
|
|
122
124
|
storage_id = self.storage.store(
|
|
123
|
-
collection="example",
|
|
124
|
-
record_id=f"example_outputs",
|
|
125
|
-
data=output_data.model_dump(),
|
|
126
|
-
data_type="OUTPUT"
|
|
125
|
+
collection="example", record_id="example_outputs", data=output_data.model_dump(), data_type="OUTPUT"
|
|
127
126
|
)
|
|
128
127
|
|
|
129
128
|
logger.info("Stored output data with ID: %s", storage_id)
|
modules/text_transform_module.py
CHANGED
|
@@ -6,7 +6,7 @@ from typing import Any, ClassVar
|
|
|
6
6
|
|
|
7
7
|
from pydantic import BaseModel
|
|
8
8
|
|
|
9
|
-
from digitalkin.grpc_servers.utils.models import
|
|
9
|
+
from digitalkin.grpc_servers.utils.models import ClientConfig, SecurityMode, ServerMode
|
|
10
10
|
from digitalkin.modules._base_module import BaseModule
|
|
11
11
|
from digitalkin.services.setup.setup_strategy import SetupData
|
|
12
12
|
from digitalkin.services.storage.storage_strategy import DataType, StorageRecord
|
|
@@ -1,176 +0,0 @@
|
|
|
1
|
-
"""Background module manager."""
|
|
2
|
-
|
|
3
|
-
import asyncio
|
|
4
|
-
import uuid
|
|
5
|
-
from argparse import ArgumentParser, Namespace
|
|
6
|
-
from collections.abc import Callable, Coroutine
|
|
7
|
-
from typing import Any
|
|
8
|
-
|
|
9
|
-
from digitalkin.logger import logger
|
|
10
|
-
from digitalkin.models import ModuleStatus
|
|
11
|
-
from digitalkin.models.module import InputModelT, OutputModelT, SecretModelT, SetupModelT
|
|
12
|
-
from digitalkin.modules._base_module import BaseModule
|
|
13
|
-
from digitalkin.services.services_config import ServicesConfig
|
|
14
|
-
from digitalkin.services.services_models import ServicesMode
|
|
15
|
-
from digitalkin.utils.arg_parser import ArgParser, DevelopmentModeMappingAction
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class JobManager(ArgParser):
|
|
19
|
-
"""Background module manager."""
|
|
20
|
-
|
|
21
|
-
args: Namespace
|
|
22
|
-
|
|
23
|
-
@staticmethod
|
|
24
|
-
async def _job_specific_callback(
|
|
25
|
-
callback: Callable[[str, OutputModelT], Coroutine[Any, Any, None]], job_id: str
|
|
26
|
-
) -> Callable[[OutputModelT], Coroutine[Any, Any, None]]:
|
|
27
|
-
"""Return a callback function for the job.
|
|
28
|
-
|
|
29
|
-
Args:
|
|
30
|
-
callback: Callback function to be called when the job is done
|
|
31
|
-
job_id: Identifiant du module
|
|
32
|
-
|
|
33
|
-
Returns:
|
|
34
|
-
Callable: Callback function
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
def callback_wrapper(output_data: OutputModelT) -> Coroutine[Any, Any, None]:
|
|
38
|
-
"""Wrapper for the callback function.
|
|
39
|
-
|
|
40
|
-
Args:
|
|
41
|
-
output_data: Output data of the job
|
|
42
|
-
|
|
43
|
-
Returns:
|
|
44
|
-
Coroutine: Callback function
|
|
45
|
-
"""
|
|
46
|
-
return callback(job_id, output_data)
|
|
47
|
-
|
|
48
|
-
return callback_wrapper
|
|
49
|
-
|
|
50
|
-
def _add_parser_args(self, parser: ArgumentParser) -> None:
|
|
51
|
-
super()._add_parser_args(parser)
|
|
52
|
-
parser.add_argument(
|
|
53
|
-
"-d",
|
|
54
|
-
"--dev-mode",
|
|
55
|
-
env_var="SERVICE_MODE",
|
|
56
|
-
choices=ServicesMode.__members__,
|
|
57
|
-
default="local",
|
|
58
|
-
action=DevelopmentModeMappingAction,
|
|
59
|
-
dest="services_mode",
|
|
60
|
-
help="Define Module Service configurations for endpoints",
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
def __init__(self, module_class: type[BaseModule]) -> None:
|
|
64
|
-
"""Initialize the job manager."""
|
|
65
|
-
self.module_class = module_class
|
|
66
|
-
self.modules: dict[str, BaseModule] = {}
|
|
67
|
-
self._lock = asyncio.Lock()
|
|
68
|
-
super().__init__()
|
|
69
|
-
|
|
70
|
-
services_config = ServicesConfig(
|
|
71
|
-
services_config_strategies=self.module_class.services_config_strategies,
|
|
72
|
-
services_config_params=self.module_class.services_config_params,
|
|
73
|
-
mode=self.args.services_mode,
|
|
74
|
-
)
|
|
75
|
-
setattr(self.module_class, "services_config", services_config)
|
|
76
|
-
|
|
77
|
-
async def create_job( # noqa: D417
|
|
78
|
-
self,
|
|
79
|
-
input_data: InputModelT,
|
|
80
|
-
setup_data: SetupModelT,
|
|
81
|
-
mission_id: str,
|
|
82
|
-
callback: Callable[[str, OutputModelT], Coroutine[Any, Any, None]],
|
|
83
|
-
) -> tuple[str, BaseModule[InputModelT, OutputModelT, SetupModelT, SecretModelT]]: # type: ignore
|
|
84
|
-
"""Start new module job in background (asyncio).
|
|
85
|
-
|
|
86
|
-
Args:
|
|
87
|
-
module_class: Classe du module à instancier
|
|
88
|
-
*args: Arguments à passer au constructeur du module
|
|
89
|
-
**kwargs: Arguments à passer au constructeur du module
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
str: job_id of the module entity
|
|
93
|
-
"""
|
|
94
|
-
job_id = str(uuid.uuid4())
|
|
95
|
-
"""TODO: check uniqueness of the job_id"""
|
|
96
|
-
# Création et démarrage du module
|
|
97
|
-
module = self.module_class(job_id, mission_id=mission_id)
|
|
98
|
-
self.modules[job_id] = module
|
|
99
|
-
try:
|
|
100
|
-
await module.start(input_data, setup_data, await JobManager._job_specific_callback(callback, job_id))
|
|
101
|
-
logger.info("Module %s (%s) started successfully", job_id, module.name)
|
|
102
|
-
except Exception:
|
|
103
|
-
# En cas d'erreur, supprimer le module du gestionnaire
|
|
104
|
-
del self.modules[job_id]
|
|
105
|
-
logger.exception("Échec du démarrage du module %s: %s", job_id)
|
|
106
|
-
raise
|
|
107
|
-
else:
|
|
108
|
-
return job_id, module
|
|
109
|
-
|
|
110
|
-
async def stop_module(self, job_id: str) -> bool:
|
|
111
|
-
"""Arrête un module en cours d'exécution.
|
|
112
|
-
|
|
113
|
-
Args:
|
|
114
|
-
job_id: Identifiant du module à arrêter
|
|
115
|
-
|
|
116
|
-
Returns:
|
|
117
|
-
True si le module a été arrêté, False s'il n'existe pas.
|
|
118
|
-
"""
|
|
119
|
-
async with self._lock:
|
|
120
|
-
module = self.modules.get(job_id)
|
|
121
|
-
if not module:
|
|
122
|
-
logger.warning(f"Module {job_id} introuvable")
|
|
123
|
-
return False
|
|
124
|
-
try:
|
|
125
|
-
await module.stop()
|
|
126
|
-
logger.info(f"Module {job_id} ({module.name}) arrêté avec succès")
|
|
127
|
-
except Exception as e:
|
|
128
|
-
logger.error(f"Erreur lors de l'arrêt du module {job_id}: {e}")
|
|
129
|
-
raise
|
|
130
|
-
else:
|
|
131
|
-
return True
|
|
132
|
-
|
|
133
|
-
def get_module_status(self, job_id: str) -> ModuleStatus | None:
|
|
134
|
-
"""Obtient le statut d'un module.
|
|
135
|
-
|
|
136
|
-
Args:
|
|
137
|
-
job_id: Identifiant du module
|
|
138
|
-
|
|
139
|
-
Returns:
|
|
140
|
-
Le statut du module ou None si le module n'existe pas.
|
|
141
|
-
"""
|
|
142
|
-
module = self.modules.get(job_id)
|
|
143
|
-
return module.status if module else None
|
|
144
|
-
|
|
145
|
-
def get_module(self, job_id: str) -> BaseModule | None:
|
|
146
|
-
"""Récupère une référence au module.
|
|
147
|
-
|
|
148
|
-
Args:
|
|
149
|
-
job_id: Identifiant du module
|
|
150
|
-
|
|
151
|
-
Returns:
|
|
152
|
-
Le module ou None s'il n'existe pas.
|
|
153
|
-
"""
|
|
154
|
-
return self.modules.get(job_id)
|
|
155
|
-
|
|
156
|
-
async def stop_all_modules(self) -> None:
|
|
157
|
-
"""Arrête tous les modules en cours d'exécution."""
|
|
158
|
-
async with self._lock:
|
|
159
|
-
stop_tasks = [self.stop_module(job_id) for job_id in list(self.modules.keys())]
|
|
160
|
-
if stop_tasks:
|
|
161
|
-
await asyncio.gather(*stop_tasks, return_exceptions=True)
|
|
162
|
-
|
|
163
|
-
def list_modules(self) -> dict[str, dict[str, Any]]:
|
|
164
|
-
"""Liste tous les modules avec leur statut.
|
|
165
|
-
|
|
166
|
-
Returns:
|
|
167
|
-
Dictionnaire des modules avec leurs informations.
|
|
168
|
-
"""
|
|
169
|
-
return {
|
|
170
|
-
job_id: {
|
|
171
|
-
"name": module.name,
|
|
172
|
-
"status": module.status,
|
|
173
|
-
"class": module.__class__.__name__,
|
|
174
|
-
}
|
|
175
|
-
for job_id, module in self.modules.items()
|
|
176
|
-
}
|
|
File without changes
|
|
File without changes
|