fabricatio 0.2.6.dev2__cp312-cp312-win_amd64.whl → 0.2.7.dev2__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/__init__.py +7 -24
- fabricatio/_rust.cp312-win_amd64.pyd +0 -0
- fabricatio/_rust.pyi +22 -0
- fabricatio/actions/article.py +147 -19
- fabricatio/actions/output.py +21 -6
- fabricatio/actions/rag.py +51 -3
- fabricatio/capabilities/correct.py +34 -4
- fabricatio/capabilities/rag.py +67 -16
- fabricatio/capabilities/rating.py +15 -6
- fabricatio/capabilities/review.py +7 -4
- fabricatio/capabilities/task.py +5 -5
- fabricatio/config.py +29 -21
- fabricatio/decorators.py +32 -0
- fabricatio/models/action.py +117 -43
- fabricatio/models/extra.py +724 -84
- fabricatio/models/generic.py +60 -9
- fabricatio/models/kwargs_types.py +40 -10
- fabricatio/models/role.py +30 -6
- fabricatio/models/tool.py +6 -2
- fabricatio/models/usages.py +94 -47
- fabricatio/models/utils.py +25 -0
- fabricatio/parser.py +2 -0
- fabricatio/workflows/articles.py +12 -1
- fabricatio-0.2.7.dev2.data/scripts/tdown.exe +0 -0
- {fabricatio-0.2.6.dev2.dist-info → fabricatio-0.2.7.dev2.dist-info}/METADATA +6 -2
- fabricatio-0.2.7.dev2.dist-info/RECORD +42 -0
- {fabricatio-0.2.6.dev2.dist-info → fabricatio-0.2.7.dev2.dist-info}/WHEEL +1 -1
- fabricatio-0.2.6.dev2.data/scripts/tdown.exe +0 -0
- fabricatio-0.2.6.dev2.dist-info/RECORD +0 -42
- {fabricatio-0.2.6.dev2.dist-info → fabricatio-0.2.7.dev2.dist-info}/licenses/LICENSE +0 -0
@@ -10,6 +10,7 @@ from fabricatio.journal import logger
|
|
10
10
|
from fabricatio.models.generic import WithBriefing
|
11
11
|
from fabricatio.models.kwargs_types import ValidateKwargs
|
12
12
|
from fabricatio.models.usages import LLMUsage
|
13
|
+
from fabricatio.models.utils import override_kwargs
|
13
14
|
from fabricatio.parser import JsonCapture
|
14
15
|
from more_itertools import flatten, windowed
|
15
16
|
from pydantic import NonNegativeInt, PositiveInt
|
@@ -126,13 +127,13 @@ class GiveRating(WithBriefing, LLMUsage):
|
|
126
127
|
return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
|
127
128
|
|
128
129
|
async def draft_rating_manual(
|
129
|
-
self, topic: str, criteria: Set[str], **kwargs: Unpack[ValidateKwargs[Dict[str, str]]]
|
130
|
+
self, topic: str, criteria: Optional[Set[str]] = None, **kwargs: Unpack[ValidateKwargs[Dict[str, str]]]
|
130
131
|
) -> Optional[Dict[str, str]]:
|
131
132
|
"""Drafts a rating manual based on a topic and dimensions.
|
132
133
|
|
133
134
|
Args:
|
134
135
|
topic (str): The topic for the rating manual.
|
135
|
-
criteria (Set[str]): A set of
|
136
|
+
criteria (Optional[Set[str]], optional): A set of criteria for the rating manual. If not specified, then this method will draft the criteria automatically.
|
136
137
|
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
137
138
|
|
138
139
|
Returns:
|
@@ -148,6 +149,14 @@ class GiveRating(WithBriefing, LLMUsage):
|
|
148
149
|
return json_data
|
149
150
|
return None
|
150
151
|
|
152
|
+
criteria = criteria or await self.draft_rating_criteria(
|
153
|
+
topic, **self.prepend_sys_msg(override_kwargs(dict(kwargs), default=None))
|
154
|
+
)
|
155
|
+
|
156
|
+
if criteria is None:
|
157
|
+
logger.error(f"Failed to draft rating criteria for topic {topic}")
|
158
|
+
return None
|
159
|
+
|
151
160
|
return await self.aask_validate(
|
152
161
|
question=(
|
153
162
|
TEMPLATE_MANAGER.render_template(
|
@@ -159,7 +168,7 @@ class GiveRating(WithBriefing, LLMUsage):
|
|
159
168
|
)
|
160
169
|
),
|
161
170
|
validator=_validator,
|
162
|
-
**self.
|
171
|
+
**self.prepend_sys_msg(kwargs),
|
163
172
|
)
|
164
173
|
|
165
174
|
async def draft_rating_criteria(
|
@@ -191,7 +200,7 @@ class GiveRating(WithBriefing, LLMUsage):
|
|
191
200
|
validator=lambda resp: set(out)
|
192
201
|
if (out := JsonCapture.validate_with(resp, list, str, criteria_count)) is not None
|
193
202
|
else out,
|
194
|
-
**self.
|
203
|
+
**self.prepend_sys_msg(kwargs),
|
195
204
|
)
|
196
205
|
|
197
206
|
async def draft_rating_criteria_from_examples(
|
@@ -244,7 +253,7 @@ class GiveRating(WithBriefing, LLMUsage):
|
|
244
253
|
validator=lambda resp: JsonCapture.validate_with(
|
245
254
|
resp, target_type=list, elements_type=str, length=reasons_count
|
246
255
|
),
|
247
|
-
**self.
|
256
|
+
**self.prepend_sys_msg(kwargs),
|
248
257
|
)
|
249
258
|
)
|
250
259
|
# extract certain mount of criteria from reasons according to their importance and frequency
|
@@ -301,7 +310,7 @@ class GiveRating(WithBriefing, LLMUsage):
|
|
301
310
|
for pair in windows
|
302
311
|
],
|
303
312
|
validator=lambda resp: JsonCapture.validate_with(resp, target_type=float),
|
304
|
-
**self.
|
313
|
+
**self.prepend_sys_msg(kwargs),
|
305
314
|
)
|
306
315
|
weights = [1]
|
307
316
|
for rw in relative_weights:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"""A module that provides functionality to rate tasks based on a rating manual and score range."""
|
2
2
|
|
3
|
-
from typing import List, Optional, Self, Set, Unpack, cast
|
3
|
+
from typing import Dict, List, Optional, Self, Set, Unpack, cast
|
4
4
|
|
5
5
|
from fabricatio._rust_instances import TEMPLATE_MANAGER
|
6
6
|
from fabricatio.capabilities.propose import Propose
|
@@ -121,7 +121,7 @@ class ReviewResult[T](ProposedAble, Display):
|
|
121
121
|
ReviewResult[K]: The current instance with updated reference type.
|
122
122
|
"""
|
123
123
|
self._ref = ref # pyright: ignore [reportAttributeAccessIssue]
|
124
|
-
return cast(ReviewResult[K], self)
|
124
|
+
return cast("ReviewResult[K]", self)
|
125
125
|
|
126
126
|
def deref(self) -> T:
|
127
127
|
"""Retrieve the referenced object that was reviewed.
|
@@ -200,13 +200,14 @@ class Review(GiveRating, Propose):
|
|
200
200
|
ReviewResult[Task[T]]: A review result containing identified problems and proposed solutions,
|
201
201
|
with a reference to the original task.
|
202
202
|
"""
|
203
|
-
return cast(ReviewResult[Task[T]], await self.review_obj(task, **kwargs))
|
203
|
+
return cast("ReviewResult[Task[T]]", await self.review_obj(task, **kwargs))
|
204
204
|
|
205
205
|
async def review_string(
|
206
206
|
self,
|
207
207
|
input_text: str,
|
208
208
|
topic: str,
|
209
209
|
criteria: Optional[Set[str]] = None,
|
210
|
+
rating_manual: Optional[Dict[str, str]] = None,
|
210
211
|
**kwargs: Unpack[ValidateKwargs[ReviewResult[str]]],
|
211
212
|
) -> ReviewResult[str]:
|
212
213
|
"""Review a string based on specified topic and criteria.
|
@@ -219,6 +220,7 @@ class Review(GiveRating, Propose):
|
|
219
220
|
topic (str): The subject topic for the review criteria.
|
220
221
|
criteria (Optional[Set[str]], optional): A set of criteria for the review.
|
221
222
|
If not provided, criteria will be drafted automatically. Defaults to None.
|
223
|
+
rating_manual (Optional[Dict[str,str]], optional): A dictionary of rating criteria and their corresponding scores.
|
222
224
|
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
223
225
|
|
224
226
|
Returns:
|
@@ -227,12 +229,13 @@ class Review(GiveRating, Propose):
|
|
227
229
|
"""
|
228
230
|
default = None
|
229
231
|
if "default" in kwargs:
|
232
|
+
# this `default` is the default for the `propose` method
|
230
233
|
default = kwargs.pop("default")
|
231
234
|
|
232
235
|
criteria = criteria or (await self.draft_rating_criteria(topic, **kwargs))
|
233
236
|
if not criteria:
|
234
237
|
raise ValueError("No criteria provided for review.")
|
235
|
-
manual = await self.draft_rating_manual(topic, criteria, **kwargs)
|
238
|
+
manual = rating_manual or await self.draft_rating_manual(topic, criteria, **kwargs)
|
236
239
|
|
237
240
|
if default is not None:
|
238
241
|
kwargs["default"] = default
|
fabricatio/capabilities/task.py
CHANGED
@@ -23,7 +23,7 @@ class ProposeTask(WithBriefing, Propose):
|
|
23
23
|
self,
|
24
24
|
prompt: str,
|
25
25
|
**kwargs: Unpack[ValidateKwargs[Task[T]]],
|
26
|
-
) -> Task[T]:
|
26
|
+
) -> Optional[Task[T]]:
|
27
27
|
"""Asynchronously proposes a task based on a given prompt and parameters.
|
28
28
|
|
29
29
|
Parameters:
|
@@ -37,7 +37,7 @@ class ProposeTask(WithBriefing, Propose):
|
|
37
37
|
logger.error(err := f"{self.name}: Prompt must be provided.")
|
38
38
|
raise ValueError(err)
|
39
39
|
|
40
|
-
return await self.propose(Task, prompt, **self.
|
40
|
+
return await self.propose(Task, prompt, **self.prepend_sys_msg(cast("Dict[str, Any]", kwargs)))
|
41
41
|
|
42
42
|
|
43
43
|
class HandleTask(WithBriefing, ToolBoxUsage):
|
@@ -81,10 +81,10 @@ class HandleTask(WithBriefing, ToolBoxUsage):
|
|
81
81
|
return await self.aask_validate(
|
82
82
|
question=q,
|
83
83
|
validator=_validator,
|
84
|
-
**self.
|
84
|
+
**self.prepend_sys_msg(cast("Dict[str, Any]", kwargs)),
|
85
85
|
)
|
86
86
|
|
87
|
-
async def
|
87
|
+
async def handle_fine_grind(
|
88
88
|
self,
|
89
89
|
task: Task,
|
90
90
|
data: Dict[str, Any],
|
@@ -110,4 +110,4 @@ class HandleTask(WithBriefing, ToolBoxUsage):
|
|
110
110
|
|
111
111
|
async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
|
112
112
|
"""Asynchronously handles a task based on a given task object and parameters."""
|
113
|
-
return await self.
|
113
|
+
return await self.handle_fine_grind(task, data, **kwargs)
|
fabricatio/config.py
CHANGED
@@ -48,37 +48,37 @@ class LLMConfig(BaseModel):
|
|
48
48
|
"""
|
49
49
|
|
50
50
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
51
|
-
api_endpoint: HttpUrl = Field(default=HttpUrl("https://api.openai.com"))
|
51
|
+
api_endpoint: Optional[HttpUrl] = Field(default=HttpUrl("https://api.openai.com"))
|
52
52
|
"""OpenAI API Endpoint."""
|
53
53
|
|
54
|
-
api_key: SecretStr = Field(default=SecretStr(""))
|
54
|
+
api_key: Optional[SecretStr] = Field(default=SecretStr("sk-setyourkey"))
|
55
55
|
"""OpenAI API key. Empty by default for security reasons, should be set before use."""
|
56
56
|
|
57
|
-
timeout: PositiveInt = Field(default=300)
|
57
|
+
timeout: Optional[PositiveInt] = Field(default=300)
|
58
58
|
"""The timeout of the LLM model in seconds. Default is 300 seconds as per request."""
|
59
59
|
|
60
|
-
max_retries: PositiveInt = Field(default=3)
|
60
|
+
max_retries: Optional[PositiveInt] = Field(default=3)
|
61
61
|
"""The maximum number of retries. Default is 3 retries."""
|
62
62
|
|
63
|
-
model: str = Field(default="gpt-3.5-turbo")
|
63
|
+
model: Optional[str] = Field(default="gpt-3.5-turbo")
|
64
64
|
"""The LLM model name. Set to 'gpt-3.5-turbo' as per request."""
|
65
65
|
|
66
|
-
temperature: NonNegativeFloat = Field(default=1.0)
|
66
|
+
temperature: Optional[NonNegativeFloat] = Field(default=1.0)
|
67
67
|
"""The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request."""
|
68
68
|
|
69
|
-
stop_sign: str | List[str] = Field(
|
69
|
+
stop_sign: Optional[str | List[str]] = Field(default=None)
|
70
70
|
"""The stop sign of the LLM model. No default stop sign specified."""
|
71
71
|
|
72
|
-
top_p: NonNegativeFloat = Field(default=0.35)
|
72
|
+
top_p: Optional[NonNegativeFloat] = Field(default=0.35)
|
73
73
|
"""The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request."""
|
74
74
|
|
75
|
-
generation_count: PositiveInt = Field(default=1)
|
75
|
+
generation_count: Optional[PositiveInt] = Field(default=1)
|
76
76
|
"""The number of generations to generate. Default is 1."""
|
77
77
|
|
78
|
-
stream: bool = Field(default=False)
|
78
|
+
stream: Optional[bool] = Field(default=False)
|
79
79
|
"""Whether to stream the LLM model's response. Default is False."""
|
80
80
|
|
81
|
-
max_tokens: PositiveInt = Field(default=
|
81
|
+
max_tokens: Optional[PositiveInt] = Field(default=None)
|
82
82
|
"""The maximum number of tokens to generate. Set to 8192 as per request."""
|
83
83
|
|
84
84
|
rpm: Optional[PositiveInt] = Field(default=100)
|
@@ -93,7 +93,7 @@ class EmbeddingConfig(BaseModel):
|
|
93
93
|
|
94
94
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
95
95
|
|
96
|
-
model: str = Field(default="text-embedding-ada-002")
|
96
|
+
model: Optional[str] = Field(default="text-embedding-ada-002")
|
97
97
|
"""The embedding model name. """
|
98
98
|
|
99
99
|
dimensions: Optional[PositiveInt] = Field(default=None)
|
@@ -102,10 +102,10 @@ class EmbeddingConfig(BaseModel):
|
|
102
102
|
timeout: Optional[PositiveInt] = Field(default=None)
|
103
103
|
"""The timeout of the embedding model in seconds."""
|
104
104
|
|
105
|
-
max_sequence_length: PositiveInt = Field(default=8192)
|
105
|
+
max_sequence_length: Optional[PositiveInt] = Field(default=8192)
|
106
106
|
"""The maximum sequence length of the embedding model. Default is 8192 as per request."""
|
107
107
|
|
108
|
-
caching: bool = Field(default=False)
|
108
|
+
caching: Optional[bool] = Field(default=False)
|
109
109
|
"""Whether to cache the embedding. Default is False."""
|
110
110
|
|
111
111
|
api_endpoint: Optional[HttpUrl] = None
|
@@ -148,13 +148,13 @@ class DebugConfig(BaseModel):
|
|
148
148
|
log_level: Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"] = Field(default="INFO")
|
149
149
|
"""The log level of the application."""
|
150
150
|
|
151
|
-
log_file: FilePath = Field(default=Path(rf"{ROAMING_DIR}\fabricatio.log"))
|
151
|
+
log_file: FilePath = Field(default=Path(rf"{ROAMING_DIR}\fabricatio.log"), frozen=True)
|
152
152
|
"""The log file of the application."""
|
153
153
|
|
154
|
-
rotation: int = Field(default=1)
|
154
|
+
rotation: int = Field(default=1, frozen=True)
|
155
155
|
"""The rotation of the log file. in weeks."""
|
156
156
|
|
157
|
-
retention: int = Field(default=2)
|
157
|
+
retention: int = Field(default=2, frozen=True)
|
158
158
|
"""The retention of the log file. in weeks."""
|
159
159
|
|
160
160
|
streaming_visible: bool = Field(default=False)
|
@@ -232,6 +232,12 @@ class TemplateConfig(BaseModel):
|
|
232
232
|
correct_template: str = Field(default="correct")
|
233
233
|
"""The name of the correct template which will be used to correct a string."""
|
234
234
|
|
235
|
+
co_validation_template: str = Field(default="co_validation")
|
236
|
+
"""The name of the co-validation template which will be used to co-validate a string."""
|
237
|
+
|
238
|
+
as_prompt_template: str = Field(default="as_prompt")
|
239
|
+
"""The name of the as prompt template which will be used to convert a string to a prompt."""
|
240
|
+
|
235
241
|
|
236
242
|
class MagikaConfig(BaseModel):
|
237
243
|
"""Magika configuration class."""
|
@@ -272,9 +278,9 @@ class RagConfig(BaseModel):
|
|
272
278
|
|
273
279
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
274
280
|
|
275
|
-
milvus_uri: HttpUrl = Field(default=HttpUrl("http://localhost:19530"))
|
281
|
+
milvus_uri: Optional[HttpUrl] = Field(default=HttpUrl("http://localhost:19530"))
|
276
282
|
"""The URI of the Milvus server."""
|
277
|
-
milvus_timeout: Optional[PositiveFloat] = Field(default=
|
283
|
+
milvus_timeout: Optional[PositiveFloat] = Field(default=30.0)
|
278
284
|
"""The timeout of the Milvus server."""
|
279
285
|
milvus_token: Optional[SecretStr] = Field(default=None)
|
280
286
|
"""The token of the Milvus server."""
|
@@ -300,11 +306,13 @@ class RoutingConfig(BaseModel):
|
|
300
306
|
|
301
307
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
302
308
|
|
303
|
-
|
309
|
+
max_parallel_requests: Optional[int] = 60
|
310
|
+
"""The maximum number of parallel requests. None means not checked."""
|
311
|
+
allowed_fails: Optional[int] = 3
|
304
312
|
"""The number of allowed fails before the routing is considered failed."""
|
305
313
|
retry_after: int = 15
|
306
314
|
"""The time in seconds to wait before retrying the routing after a fail."""
|
307
|
-
cooldown_time: Optional[int] =
|
315
|
+
cooldown_time: Optional[int] = 30
|
308
316
|
"""The time in seconds to wait before retrying the routing after a cooldown."""
|
309
317
|
|
310
318
|
|
fabricatio/decorators.py
CHANGED
@@ -177,3 +177,35 @@ def use_temp_module[**P, R](modules: ModuleType | List[ModuleType]) -> Callable[
|
|
177
177
|
return _wrapper
|
178
178
|
|
179
179
|
return _decorator
|
180
|
+
|
181
|
+
|
182
|
+
def logging_exec_time[**P, R](func: Callable[P, R]) -> Callable[P, R]:
|
183
|
+
"""Decorator to log the execution time of a function.
|
184
|
+
|
185
|
+
Args:
|
186
|
+
func (Callable): The function to be executed
|
187
|
+
|
188
|
+
Returns:
|
189
|
+
Callable: A decorator that wraps the function to log the execution time.
|
190
|
+
"""
|
191
|
+
from time import time
|
192
|
+
|
193
|
+
if iscoroutinefunction(func):
|
194
|
+
|
195
|
+
@wraps(func)
|
196
|
+
async def _async_wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
197
|
+
start_time = time()
|
198
|
+
result = await func(*args, **kwargs)
|
199
|
+
logger.debug(f"Execution time of `{func.__name__}`: {time() - start_time:.2f} s")
|
200
|
+
return result
|
201
|
+
|
202
|
+
return _async_wrapper
|
203
|
+
|
204
|
+
@wraps(func)
|
205
|
+
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
206
|
+
start_time = time()
|
207
|
+
result = func(*args, **kwargs)
|
208
|
+
logger.debug(f"Execution time of {func.__name__}: {(time() - start_time) * 1000:.2f} ms")
|
209
|
+
return result
|
210
|
+
|
211
|
+
return _wrapper
|
fabricatio/models/action.py
CHANGED
@@ -1,4 +1,8 @@
|
|
1
|
-
"""Module that contains the classes for actions and workflows.
|
1
|
+
"""Module that contains the classes for actions and workflows.
|
2
|
+
|
3
|
+
This module defines the Action and WorkFlow classes, which are used for
|
4
|
+
creating and executing sequences of actions in a task-based context.
|
5
|
+
"""
|
2
6
|
|
3
7
|
import traceback
|
4
8
|
from abc import abstractmethod
|
@@ -15,20 +19,27 @@ from pydantic import Field, PrivateAttr
|
|
15
19
|
|
16
20
|
|
17
21
|
class Action(HandleTask, ProposeTask, Correct):
|
18
|
-
"""Class that represents an action to be executed in a workflow.
|
22
|
+
"""Class that represents an action to be executed in a workflow.
|
23
|
+
|
24
|
+
Actions are the atomic units of work in a workflow. Each action performs
|
25
|
+
a specific operation and can modify the shared context data.
|
26
|
+
"""
|
19
27
|
|
20
28
|
name: str = Field(default="")
|
21
29
|
"""The name of the action."""
|
30
|
+
|
22
31
|
description: str = Field(default="")
|
23
32
|
"""The description of the action."""
|
33
|
+
|
24
34
|
personality: str = Field(default="")
|
25
|
-
"""The personality
|
35
|
+
"""The personality traits or context for the action executor."""
|
36
|
+
|
26
37
|
output_key: str = Field(default="")
|
27
|
-
"""The key
|
38
|
+
"""The key used to store this action's output in the context dictionary."""
|
28
39
|
|
29
40
|
@final
|
30
41
|
def model_post_init(self, __context: Any) -> None:
|
31
|
-
"""Initialize the action by setting
|
42
|
+
"""Initialize the action by setting default name and description if not provided.
|
32
43
|
|
33
44
|
Args:
|
34
45
|
__context: The context to be used for initialization.
|
@@ -37,122 +48,185 @@ class Action(HandleTask, ProposeTask, Correct):
|
|
37
48
|
self.description = self.description or self.__class__.__doc__ or ""
|
38
49
|
|
39
50
|
@abstractmethod
|
40
|
-
async def _execute(self, **cxt) -> Any:
|
41
|
-
"""Execute the action with the provided arguments.
|
51
|
+
async def _execute(self, *_, **cxt) -> Any: # noqa: ANN002
|
52
|
+
"""Execute the action logic with the provided context arguments.
|
53
|
+
|
54
|
+
This method must be implemented by subclasses to define the actual behavior.
|
42
55
|
|
43
56
|
Args:
|
44
57
|
**cxt: The context dictionary containing input and output data.
|
45
58
|
|
46
59
|
Returns:
|
47
|
-
The result of the action execution.
|
60
|
+
Any: The result of the action execution.
|
48
61
|
"""
|
49
62
|
pass
|
50
63
|
|
51
64
|
@final
|
52
65
|
async def act(self, cxt: Dict[str, Any]) -> Dict[str, Any]:
|
53
|
-
"""Perform the action
|
66
|
+
"""Perform the action and update the context with results.
|
54
67
|
|
55
68
|
Args:
|
56
69
|
cxt: The context dictionary containing input and output data.
|
70
|
+
|
71
|
+
Returns:
|
72
|
+
Dict[str, Any]: The updated context dictionary.
|
57
73
|
"""
|
58
74
|
ret = await self._execute(**cxt)
|
75
|
+
|
59
76
|
if self.output_key:
|
60
77
|
logger.debug(f"Setting output: {self.output_key}")
|
61
78
|
cxt[self.output_key] = ret
|
79
|
+
|
62
80
|
return cxt
|
63
81
|
|
64
82
|
@property
|
65
83
|
def briefing(self) -> str:
|
66
|
-
"""Return a
|
84
|
+
"""Return a formatted description of the action including personality context if available.
|
85
|
+
|
86
|
+
Returns:
|
87
|
+
str: Formatted briefing text with personality and action description.
|
88
|
+
"""
|
67
89
|
if self.personality:
|
68
90
|
return f"## Your personality: \n{self.personality}\n# The action you are going to perform: \n{super().briefing}"
|
69
91
|
return f"# The action you are going to perform: \n{super().briefing}"
|
70
92
|
|
71
93
|
|
72
94
|
class WorkFlow(WithBriefing, ToolBoxUsage):
|
73
|
-
"""Class that represents a
|
95
|
+
"""Class that represents a sequence of actions to be executed for a task.
|
96
|
+
|
97
|
+
A workflow manages the execution of multiple actions in sequence, passing
|
98
|
+
a shared context between them and handling task lifecycle events.
|
99
|
+
"""
|
74
100
|
|
75
101
|
_context: Queue[Dict[str, Any]] = PrivateAttr(default_factory=lambda: Queue(maxsize=1))
|
76
|
-
"""
|
102
|
+
"""Queue for storing the workflow execution context."""
|
77
103
|
|
78
104
|
_instances: Tuple[Action, ...] = PrivateAttr(default_factory=tuple)
|
79
|
-
"""
|
105
|
+
"""Instantiated action objects to be executed in this workflow."""
|
80
106
|
|
81
107
|
steps: Tuple[Union[Type[Action], Action], ...] = Field(...)
|
82
|
-
"""
|
108
|
+
"""The sequence of actions to be executed, can be action classes or instances."""
|
109
|
+
|
83
110
|
task_input_key: str = Field(default="task_input")
|
84
|
-
"""
|
111
|
+
"""Key used to store the input task in the context dictionary."""
|
112
|
+
|
85
113
|
task_output_key: str = Field(default="task_output")
|
86
|
-
"""
|
114
|
+
"""Key used to extract the final result from the context dictionary."""
|
115
|
+
|
87
116
|
extra_init_context: Dict[str, Any] = Field(default_factory=dict, frozen=True)
|
88
|
-
"""
|
117
|
+
"""Additional initial context values to be included at workflow start."""
|
89
118
|
|
90
119
|
def model_post_init(self, __context: Any) -> None:
|
91
|
-
"""Initialize the workflow by
|
120
|
+
"""Initialize the workflow by instantiating any action classes.
|
92
121
|
|
93
122
|
Args:
|
94
123
|
__context: The context to be used for initialization.
|
95
124
|
"""
|
96
|
-
|
97
|
-
for step in self.steps
|
98
|
-
temp.append(step if isinstance(step, Action) else step())
|
99
|
-
self._instances = tuple(temp)
|
125
|
+
# Convert any action classes to instances
|
126
|
+
self._instances = tuple(step if isinstance(step, Action) else step() for step in self.steps)
|
100
127
|
|
101
128
|
def inject_personality(self, personality: str) -> Self:
|
102
|
-
"""
|
129
|
+
"""Set the personality for all actions that don't have one defined.
|
103
130
|
|
104
131
|
Args:
|
105
|
-
personality: The personality to
|
132
|
+
personality: The personality text to inject.
|
106
133
|
|
107
134
|
Returns:
|
108
|
-
Self: The
|
135
|
+
Self: The workflow instance for method chaining.
|
109
136
|
"""
|
110
|
-
for
|
111
|
-
|
137
|
+
for action in filter(lambda a: not a.personality, self._instances):
|
138
|
+
action.personality = personality
|
112
139
|
return self
|
113
140
|
|
114
141
|
async def serve(self, task: Task) -> None:
|
115
|
-
"""
|
142
|
+
"""Execute the workflow to fulfill the given task.
|
143
|
+
|
144
|
+
This method manages the complete lifecycle of processing a task through
|
145
|
+
the workflow's sequence of actions.
|
116
146
|
|
117
147
|
Args:
|
118
|
-
task: The task to be
|
148
|
+
task: The task to be processed.
|
119
149
|
"""
|
150
|
+
logger.info(f"Start execute workflow: {self.name}")
|
151
|
+
|
120
152
|
await task.start()
|
121
153
|
await self._init_context(task)
|
154
|
+
|
122
155
|
current_action = None
|
123
156
|
try:
|
157
|
+
# Process each action in sequence
|
124
158
|
for step in self._instances:
|
125
|
-
|
126
|
-
|
159
|
+
current_action = step.name
|
160
|
+
logger.info(f"Executing step: {current_action}")
|
161
|
+
|
162
|
+
# Get current context and execute action
|
163
|
+
context = await self._context.get()
|
164
|
+
act_task = create_task(step.act(context))
|
165
|
+
# Handle task cancellation
|
127
166
|
if task.is_cancelled():
|
128
167
|
act_task.cancel(f"Cancelled by task: {task.name}")
|
129
168
|
break
|
169
|
+
|
170
|
+
# Update context with modified values
|
130
171
|
modified_ctx = await act_task
|
172
|
+
logger.success(f"Step execution finished: {current_action}")
|
131
173
|
await self._context.put(modified_ctx)
|
132
|
-
logger.info(f"Finished executing workflow: {self.name}")
|
133
174
|
|
134
|
-
|
175
|
+
logger.success(f"Workflow execution finished: {self.name}")
|
176
|
+
|
177
|
+
# Get final context and extract result
|
178
|
+
final_ctx = await self._context.get()
|
179
|
+
result = final_ctx.get(self.task_output_key)
|
180
|
+
|
181
|
+
if self.task_output_key not in final_ctx:
|
135
182
|
logger.warning(
|
136
|
-
f"Task output key: {self.task_output_key} not found in the context, None will be returned.
|
183
|
+
f"Task output key: {self.task_output_key} not found in the context, None will be returned. "
|
184
|
+
f"You can check if `Action.output_key` is set the same as `WorkFlow.task_output_key`."
|
137
185
|
)
|
138
186
|
|
139
|
-
await task.finish(
|
140
|
-
|
141
|
-
|
142
|
-
logger.
|
143
|
-
|
187
|
+
await task.finish(result)
|
188
|
+
|
189
|
+
except Exception as e: # noqa: BLE001
|
190
|
+
logger.critical(f"Error during task: {current_action} execution: {e}")
|
191
|
+
logger.critical(traceback.format_exc())
|
192
|
+
await task.fail()
|
144
193
|
|
145
194
|
async def _init_context[T](self, task: Task[T]) -> None:
|
146
|
-
"""Initialize the context dictionary for workflow execution.
|
195
|
+
"""Initialize the context dictionary for workflow execution.
|
196
|
+
|
197
|
+
Args:
|
198
|
+
task: The task being served by this workflow.
|
199
|
+
"""
|
147
200
|
logger.debug(f"Initializing context for workflow: {self.name}")
|
148
|
-
|
201
|
+
initial_context = {self.task_input_key: task, **dict(self.extra_init_context)}
|
202
|
+
await self._context.put(initial_context)
|
149
203
|
|
150
204
|
def steps_fallback_to_self(self) -> Self:
|
151
|
-
"""
|
205
|
+
"""Configure all steps to use this workflow's configuration as fallback.
|
206
|
+
|
207
|
+
Returns:
|
208
|
+
Self: The workflow instance for method chaining.
|
209
|
+
"""
|
152
210
|
self.hold_to(self._instances)
|
153
211
|
return self
|
154
212
|
|
155
213
|
def steps_supply_tools_from_self(self) -> Self:
|
156
|
-
"""
|
214
|
+
"""Provide this workflow's tools to all steps in the workflow.
|
215
|
+
|
216
|
+
Returns:
|
217
|
+
Self: The workflow instance for method chaining.
|
218
|
+
"""
|
157
219
|
self.provide_tools_to(self._instances)
|
158
220
|
return self
|
221
|
+
|
222
|
+
def update_init_context(self, **kwargs) -> Self:
|
223
|
+
"""Update the initial context with additional key-value pairs.
|
224
|
+
|
225
|
+
Args:
|
226
|
+
**kwargs: Key-value pairs to add to the initial context.
|
227
|
+
|
228
|
+
Returns:
|
229
|
+
Self: The workflow instance for method chaining.
|
230
|
+
"""
|
231
|
+
self.extra_init_context.update(kwargs)
|
232
|
+
return self
|