fabricatio 0.2.6.dev4__cp312-cp312-manylinux_2_34_x86_64.whl → 0.2.6.dev5__cp312-cp312-manylinux_2_34_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/_rust.cpython-312-x86_64-linux-gnu.so +0 -0
- fabricatio/capabilities/covalidate.py +160 -0
- fabricatio/capabilities/task.py +2 -2
- fabricatio/config.py +18 -15
- fabricatio/models/action.py +102 -41
- fabricatio/models/extra.py +1 -1
- fabricatio/models/role.py +32 -7
- fabricatio/models/usages.py +5 -4
- {fabricatio-0.2.6.dev4.data → fabricatio-0.2.6.dev5.data}/scripts/tdown +0 -0
- {fabricatio-0.2.6.dev4.dist-info → fabricatio-0.2.6.dev5.dist-info}/METADATA +1 -1
- {fabricatio-0.2.6.dev4.dist-info → fabricatio-0.2.6.dev5.dist-info}/RECORD +13 -12
- {fabricatio-0.2.6.dev4.dist-info → fabricatio-0.2.6.dev5.dist-info}/WHEEL +0 -0
- {fabricatio-0.2.6.dev4.dist-info → fabricatio-0.2.6.dev5.dist-info}/licenses/LICENSE +0 -0
Binary file
|
@@ -0,0 +1,160 @@
|
|
1
|
+
"""Co-validation capability for LLMs."""
|
2
|
+
|
3
|
+
from asyncio import gather
|
4
|
+
from typing import Callable, List, Optional, Union, Unpack, overload
|
5
|
+
|
6
|
+
from fabricatio import TEMPLATE_MANAGER
|
7
|
+
from fabricatio.config import configs
|
8
|
+
from fabricatio.journal import logger
|
9
|
+
from fabricatio.models.kwargs_types import GenerateKwargs
|
10
|
+
from fabricatio.models.usages import LLMUsage
|
11
|
+
|
12
|
+
|
13
|
+
class CoValidate(LLMUsage):
|
14
|
+
"""Class that represents a co-validation capability using multiple LLMs.
|
15
|
+
|
16
|
+
This class provides methods to validate responses by attempting multiple approaches:
|
17
|
+
1. Using the primary LLM to generate a response
|
18
|
+
2. Using a secondary (co-) model to refine responses that fail validation
|
19
|
+
3. Trying multiple times if needed
|
20
|
+
"""
|
21
|
+
|
22
|
+
@overload
|
23
|
+
async def aask_covalidate[T](
|
24
|
+
self,
|
25
|
+
question: str,
|
26
|
+
validator: Callable[[str], T | None],
|
27
|
+
co_model: Optional[str] = None,
|
28
|
+
co_temperature: Optional[float] = None,
|
29
|
+
co_top_p: Optional[float] = None,
|
30
|
+
co_max_tokens: Optional[int] = None,
|
31
|
+
max_validations: int = 2,
|
32
|
+
default: None = None,
|
33
|
+
**kwargs: Unpack[GenerateKwargs],
|
34
|
+
) -> T | None: ...
|
35
|
+
|
36
|
+
@overload
|
37
|
+
async def aask_covalidate[T](
|
38
|
+
self,
|
39
|
+
question: str,
|
40
|
+
validator: Callable[[str], T | None],
|
41
|
+
co_model: Optional[str] = None,
|
42
|
+
co_temperature: Optional[float] = None,
|
43
|
+
co_top_p: Optional[float] = None,
|
44
|
+
co_max_tokens: Optional[int] = None,
|
45
|
+
max_validations: int = 2,
|
46
|
+
default: T = ...,
|
47
|
+
**kwargs: Unpack[GenerateKwargs],
|
48
|
+
) -> T: ...
|
49
|
+
|
50
|
+
@overload
|
51
|
+
async def aask_covalidate[T](
|
52
|
+
self,
|
53
|
+
question: List[str],
|
54
|
+
validator: Callable[[str], T | None],
|
55
|
+
co_model: Optional[str] = None,
|
56
|
+
co_temperature: Optional[float] = None,
|
57
|
+
co_top_p: Optional[float] = None,
|
58
|
+
co_max_tokens: Optional[int] = None,
|
59
|
+
max_validations: int = 2,
|
60
|
+
default: None = None,
|
61
|
+
**kwargs: Unpack[GenerateKwargs],
|
62
|
+
) -> List[T | None]: ...
|
63
|
+
|
64
|
+
@overload
|
65
|
+
async def aask_covalidate[T](
|
66
|
+
self,
|
67
|
+
question: List[str],
|
68
|
+
validator: Callable[[str], T | None],
|
69
|
+
co_model: Optional[str] = None,
|
70
|
+
co_temperature: Optional[float] = None,
|
71
|
+
co_top_p: Optional[float] = None,
|
72
|
+
co_max_tokens: Optional[int] = None,
|
73
|
+
max_validations: int = 2,
|
74
|
+
default: T = ...,
|
75
|
+
**kwargs: Unpack[GenerateKwargs],
|
76
|
+
) -> List[T]: ...
|
77
|
+
|
78
|
+
async def aask_covalidate[T](
|
79
|
+
self,
|
80
|
+
question: Union[str, List[str]],
|
81
|
+
validator: Callable[[str], T | None],
|
82
|
+
co_model: Optional[str] = None,
|
83
|
+
co_temperature: Optional[float] = None,
|
84
|
+
co_top_p: Optional[float] = None,
|
85
|
+
co_max_tokens: Optional[int] = None,
|
86
|
+
max_validations: int = 2,
|
87
|
+
default: Optional[T] = None,
|
88
|
+
**kwargs: Unpack[GenerateKwargs],
|
89
|
+
) -> Union[T | None, List[T | None]]:
|
90
|
+
"""Ask the LLM with co-validation to obtain a validated response.
|
91
|
+
|
92
|
+
This method attempts to generate a response that passes validation using two approaches:
|
93
|
+
1. First, it asks the primary LLM using the original question
|
94
|
+
2. If validation fails, it uses a secondary (co-) model with a template to improve the response
|
95
|
+
3. The process repeats up to max_validations times
|
96
|
+
|
97
|
+
Args:
|
98
|
+
question: String question or list of questions to ask
|
99
|
+
validator: Function that validates responses, returns result or None if invalid
|
100
|
+
co_model: Optional model name for the co-validator
|
101
|
+
co_temperature: Optional temperature setting for the co-validator
|
102
|
+
co_top_p: Optional top_p setting for the co-validator
|
103
|
+
co_max_tokens: Optional maximum tokens for the co-validator response
|
104
|
+
max_validations: Maximum number of validation attempts
|
105
|
+
default: Default value to return if validation fails
|
106
|
+
**kwargs: Additional keyword arguments passed to aask method
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
The validated result (T) or default if validation fails.
|
110
|
+
If input is a list of questions, returns a list of results.
|
111
|
+
"""
|
112
|
+
|
113
|
+
async def validate_single_question(q: str) -> Optional[T]:
|
114
|
+
"""Process a single question with validation attempts."""
|
115
|
+
validation_kwargs = kwargs.copy()
|
116
|
+
|
117
|
+
for lap in range(max_validations):
|
118
|
+
try:
|
119
|
+
# First attempt: direct question to primary model
|
120
|
+
response = await self.aask(question=q, **validation_kwargs)
|
121
|
+
if response and (validated := validator(response)):
|
122
|
+
logger.debug(f"Successfully validated the primary response at {lap}th attempt.")
|
123
|
+
return validated
|
124
|
+
|
125
|
+
# Second attempt: use co-model with validation template
|
126
|
+
co_prompt = TEMPLATE_MANAGER.render_template(
|
127
|
+
configs.templates.co_validation_template,
|
128
|
+
{"original_q": q, "original_a": response},
|
129
|
+
)
|
130
|
+
co_response = await self.aask(
|
131
|
+
question=co_prompt,
|
132
|
+
model=co_model,
|
133
|
+
temperature=co_temperature,
|
134
|
+
top_p=co_top_p,
|
135
|
+
max_tokens=co_max_tokens,
|
136
|
+
)
|
137
|
+
|
138
|
+
if co_response and (validated := validator(co_response)):
|
139
|
+
logger.debug(f"Successfully validated the co-response at {lap}th attempt.")
|
140
|
+
return validated
|
141
|
+
|
142
|
+
except Exception as e: # noqa: BLE001
|
143
|
+
logger.error(f"Error during validation: \n{e}")
|
144
|
+
break
|
145
|
+
|
146
|
+
# Disable caching for subsequent attempts
|
147
|
+
if not validation_kwargs.get("no_cache"):
|
148
|
+
validation_kwargs["no_cache"] = True
|
149
|
+
logger.debug("Disabled cache for the next attempt")
|
150
|
+
|
151
|
+
if default is None:
|
152
|
+
logger.error(f"Failed to validate the response after {max_validations} attempts.")
|
153
|
+
return default
|
154
|
+
|
155
|
+
# Handle single question or list of questions
|
156
|
+
if isinstance(question, str):
|
157
|
+
return await validate_single_question(question)
|
158
|
+
|
159
|
+
# Process multiple questions in parallel
|
160
|
+
return await gather(*[validate_single_question(q) for q in question])
|
fabricatio/capabilities/task.py
CHANGED
@@ -84,7 +84,7 @@ class HandleTask(WithBriefing, ToolBoxUsage):
|
|
84
84
|
**self.prepend(cast(Dict[str, Any], kwargs)),
|
85
85
|
)
|
86
86
|
|
87
|
-
async def
|
87
|
+
async def handle_fine_grind(
|
88
88
|
self,
|
89
89
|
task: Task,
|
90
90
|
data: Dict[str, Any],
|
@@ -110,4 +110,4 @@ class HandleTask(WithBriefing, ToolBoxUsage):
|
|
110
110
|
|
111
111
|
async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
|
112
112
|
"""Asynchronously handles a task based on a given task object and parameters."""
|
113
|
-
return await self.
|
113
|
+
return await self.handle_fine_grind(task, data, **kwargs)
|
fabricatio/config.py
CHANGED
@@ -48,37 +48,37 @@ class LLMConfig(BaseModel):
|
|
48
48
|
"""
|
49
49
|
|
50
50
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
51
|
-
api_endpoint: HttpUrl = Field(default=HttpUrl("https://api.openai.com"))
|
51
|
+
api_endpoint: Optional[HttpUrl] = Field(default=HttpUrl("https://api.openai.com"))
|
52
52
|
"""OpenAI API Endpoint."""
|
53
53
|
|
54
|
-
api_key: SecretStr = Field(default=SecretStr(""))
|
54
|
+
api_key: Optional[SecretStr] = Field(default=SecretStr("sk-setyourkey"))
|
55
55
|
"""OpenAI API key. Empty by default for security reasons, should be set before use."""
|
56
56
|
|
57
|
-
timeout: PositiveInt = Field(default=300)
|
57
|
+
timeout: Optional[PositiveInt] = Field(default=300)
|
58
58
|
"""The timeout of the LLM model in seconds. Default is 300 seconds as per request."""
|
59
59
|
|
60
|
-
max_retries: PositiveInt = Field(default=3)
|
60
|
+
max_retries: Optional[PositiveInt] = Field(default=3)
|
61
61
|
"""The maximum number of retries. Default is 3 retries."""
|
62
62
|
|
63
|
-
model: str = Field(default="gpt-3.5-turbo")
|
63
|
+
model: Optional[str] = Field(default="gpt-3.5-turbo")
|
64
64
|
"""The LLM model name. Set to 'gpt-3.5-turbo' as per request."""
|
65
65
|
|
66
|
-
temperature: NonNegativeFloat = Field(default=1.0)
|
66
|
+
temperature: Optional[NonNegativeFloat] = Field(default=1.0)
|
67
67
|
"""The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request."""
|
68
68
|
|
69
|
-
stop_sign: str | List[str] = Field(
|
69
|
+
stop_sign: Optional[str | List[str]] = Field(default="")
|
70
70
|
"""The stop sign of the LLM model. No default stop sign specified."""
|
71
71
|
|
72
|
-
top_p: NonNegativeFloat = Field(default=0.35)
|
72
|
+
top_p: Optional[NonNegativeFloat] = Field(default=0.35)
|
73
73
|
"""The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request."""
|
74
74
|
|
75
|
-
generation_count: PositiveInt = Field(default=1)
|
75
|
+
generation_count: Optional[PositiveInt] = Field(default=1)
|
76
76
|
"""The number of generations to generate. Default is 1."""
|
77
77
|
|
78
|
-
stream: bool = Field(default=False)
|
78
|
+
stream: Optional[bool] = Field(default=False)
|
79
79
|
"""Whether to stream the LLM model's response. Default is False."""
|
80
80
|
|
81
|
-
max_tokens: PositiveInt = Field(default=8192)
|
81
|
+
max_tokens: Optional[PositiveInt] = Field(default=8192)
|
82
82
|
"""The maximum number of tokens to generate. Set to 8192 as per request."""
|
83
83
|
|
84
84
|
rpm: Optional[PositiveInt] = Field(default=100)
|
@@ -93,7 +93,7 @@ class EmbeddingConfig(BaseModel):
|
|
93
93
|
|
94
94
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
95
95
|
|
96
|
-
model: str = Field(default="text-embedding-ada-002")
|
96
|
+
model: Optional[str] = Field(default="text-embedding-ada-002")
|
97
97
|
"""The embedding model name. """
|
98
98
|
|
99
99
|
dimensions: Optional[PositiveInt] = Field(default=None)
|
@@ -102,10 +102,10 @@ class EmbeddingConfig(BaseModel):
|
|
102
102
|
timeout: Optional[PositiveInt] = Field(default=None)
|
103
103
|
"""The timeout of the embedding model in seconds."""
|
104
104
|
|
105
|
-
max_sequence_length: PositiveInt = Field(default=8192)
|
105
|
+
max_sequence_length: Optional[PositiveInt] = Field(default=8192)
|
106
106
|
"""The maximum sequence length of the embedding model. Default is 8192 as per request."""
|
107
107
|
|
108
|
-
caching: bool = Field(default=False)
|
108
|
+
caching: Optional[bool] = Field(default=False)
|
109
109
|
"""Whether to cache the embedding. Default is False."""
|
110
110
|
|
111
111
|
api_endpoint: Optional[HttpUrl] = None
|
@@ -232,6 +232,9 @@ class TemplateConfig(BaseModel):
|
|
232
232
|
correct_template: str = Field(default="correct")
|
233
233
|
"""The name of the correct template which will be used to correct a string."""
|
234
234
|
|
235
|
+
co_validation_template: str = Field(default="co_validation")
|
236
|
+
"""The name of the co-validation template which will be used to co-validate a string."""
|
237
|
+
|
235
238
|
|
236
239
|
class MagikaConfig(BaseModel):
|
237
240
|
"""Magika configuration class."""
|
@@ -272,7 +275,7 @@ class RagConfig(BaseModel):
|
|
272
275
|
|
273
276
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
274
277
|
|
275
|
-
milvus_uri: HttpUrl = Field(default=HttpUrl("http://localhost:19530"))
|
278
|
+
milvus_uri: Optional[HttpUrl] = Field(default=HttpUrl("http://localhost:19530"))
|
276
279
|
"""The URI of the Milvus server."""
|
277
280
|
milvus_timeout: Optional[PositiveFloat] = Field(default=None)
|
278
281
|
"""The timeout of the Milvus server."""
|
fabricatio/models/action.py
CHANGED
@@ -1,4 +1,8 @@
|
|
1
|
-
"""Module that contains the classes for actions and workflows.
|
1
|
+
"""Module that contains the classes for actions and workflows.
|
2
|
+
|
3
|
+
This module defines the Action and WorkFlow classes, which are used for
|
4
|
+
creating and executing sequences of actions in a task-based context.
|
5
|
+
"""
|
2
6
|
|
3
7
|
import traceback
|
4
8
|
from abc import abstractmethod
|
@@ -6,6 +10,7 @@ from asyncio import Queue, create_task
|
|
6
10
|
from typing import Any, Dict, Self, Tuple, Type, Union, final
|
7
11
|
|
8
12
|
from fabricatio.capabilities.correct import Correct
|
13
|
+
from fabricatio.capabilities.covalidate import CoValidate
|
9
14
|
from fabricatio.capabilities.task import HandleTask, ProposeTask
|
10
15
|
from fabricatio.journal import logger
|
11
16
|
from fabricatio.models.generic import WithBriefing
|
@@ -14,21 +19,28 @@ from fabricatio.models.usages import ToolBoxUsage
|
|
14
19
|
from pydantic import Field, PrivateAttr
|
15
20
|
|
16
21
|
|
17
|
-
class Action(HandleTask, ProposeTask, Correct):
|
18
|
-
"""Class that represents an action to be executed in a workflow.
|
22
|
+
class Action(HandleTask, ProposeTask, Correct, CoValidate):
|
23
|
+
"""Class that represents an action to be executed in a workflow.
|
24
|
+
|
25
|
+
Actions are the atomic units of work in a workflow. Each action performs
|
26
|
+
a specific operation and can modify the shared context data.
|
27
|
+
"""
|
19
28
|
|
20
29
|
name: str = Field(default="")
|
21
30
|
"""The name of the action."""
|
31
|
+
|
22
32
|
description: str = Field(default="")
|
23
33
|
"""The description of the action."""
|
34
|
+
|
24
35
|
personality: str = Field(default="")
|
25
|
-
"""The personality
|
36
|
+
"""The personality traits or context for the action executor."""
|
37
|
+
|
26
38
|
output_key: str = Field(default="")
|
27
|
-
"""The key
|
39
|
+
"""The key used to store this action's output in the context dictionary."""
|
28
40
|
|
29
41
|
@final
|
30
42
|
def model_post_init(self, __context: Any) -> None:
|
31
|
-
"""Initialize the action by setting
|
43
|
+
"""Initialize the action by setting default name and description if not provided.
|
32
44
|
|
33
45
|
Args:
|
34
46
|
__context: The context to be used for initialization.
|
@@ -38,121 +50,170 @@ class Action(HandleTask, ProposeTask, Correct):
|
|
38
50
|
|
39
51
|
@abstractmethod
|
40
52
|
async def _execute(self, **cxt) -> Any:
|
41
|
-
"""Execute the action with the provided arguments.
|
53
|
+
"""Execute the action logic with the provided context arguments.
|
54
|
+
|
55
|
+
This method must be implemented by subclasses to define the actual behavior.
|
42
56
|
|
43
57
|
Args:
|
44
58
|
**cxt: The context dictionary containing input and output data.
|
45
59
|
|
46
60
|
Returns:
|
47
|
-
The result of the action execution.
|
61
|
+
Any: The result of the action execution.
|
48
62
|
"""
|
49
63
|
pass
|
50
64
|
|
51
65
|
@final
|
52
66
|
async def act(self, cxt: Dict[str, Any]) -> Dict[str, Any]:
|
53
|
-
"""Perform the action
|
67
|
+
"""Perform the action and update the context with results.
|
54
68
|
|
55
69
|
Args:
|
56
70
|
cxt: The context dictionary containing input and output data.
|
71
|
+
|
72
|
+
Returns:
|
73
|
+
Dict[str, Any]: The updated context dictionary.
|
57
74
|
"""
|
58
75
|
ret = await self._execute(**cxt)
|
76
|
+
|
59
77
|
if self.output_key:
|
60
78
|
logger.debug(f"Setting output: {self.output_key}")
|
61
79
|
cxt[self.output_key] = ret
|
80
|
+
|
62
81
|
return cxt
|
63
82
|
|
64
83
|
@property
|
65
84
|
def briefing(self) -> str:
|
66
|
-
"""Return a
|
85
|
+
"""Return a formatted description of the action including personality context if available.
|
86
|
+
|
87
|
+
Returns:
|
88
|
+
str: Formatted briefing text with personality and action description.
|
89
|
+
"""
|
67
90
|
if self.personality:
|
68
91
|
return f"## Your personality: \n{self.personality}\n# The action you are going to perform: \n{super().briefing}"
|
69
92
|
return f"# The action you are going to perform: \n{super().briefing}"
|
70
93
|
|
71
94
|
|
72
95
|
class WorkFlow(WithBriefing, ToolBoxUsage):
|
73
|
-
"""Class that represents a
|
96
|
+
"""Class that represents a sequence of actions to be executed for a task.
|
97
|
+
|
98
|
+
A workflow manages the execution of multiple actions in sequence, passing
|
99
|
+
a shared context between them and handling task lifecycle events.
|
100
|
+
"""
|
74
101
|
|
75
102
|
_context: Queue[Dict[str, Any]] = PrivateAttr(default_factory=lambda: Queue(maxsize=1))
|
76
|
-
"""
|
103
|
+
"""Queue for storing the workflow execution context."""
|
77
104
|
|
78
105
|
_instances: Tuple[Action, ...] = PrivateAttr(default_factory=tuple)
|
79
|
-
"""
|
106
|
+
"""Instantiated action objects to be executed in this workflow."""
|
80
107
|
|
81
108
|
steps: Tuple[Union[Type[Action], Action], ...] = Field(...)
|
82
|
-
"""
|
109
|
+
"""The sequence of actions to be executed, can be action classes or instances."""
|
110
|
+
|
83
111
|
task_input_key: str = Field(default="task_input")
|
84
|
-
"""
|
112
|
+
"""Key used to store the input task in the context dictionary."""
|
113
|
+
|
85
114
|
task_output_key: str = Field(default="task_output")
|
86
|
-
"""
|
115
|
+
"""Key used to extract the final result from the context dictionary."""
|
116
|
+
|
87
117
|
extra_init_context: Dict[str, Any] = Field(default_factory=dict, frozen=True)
|
88
|
-
"""
|
118
|
+
"""Additional initial context values to be included at workflow start."""
|
89
119
|
|
90
120
|
def model_post_init(self, __context: Any) -> None:
|
91
|
-
"""Initialize the workflow by
|
121
|
+
"""Initialize the workflow by instantiating any action classes.
|
92
122
|
|
93
123
|
Args:
|
94
124
|
__context: The context to be used for initialization.
|
95
125
|
"""
|
96
|
-
|
97
|
-
for step in self.steps
|
98
|
-
temp.append(step if isinstance(step, Action) else step())
|
99
|
-
self._instances = tuple(temp)
|
126
|
+
# Convert any action classes to instances
|
127
|
+
self._instances = tuple(step if isinstance(step, Action) else step() for step in self.steps)
|
100
128
|
|
101
129
|
def inject_personality(self, personality: str) -> Self:
|
102
|
-
"""
|
130
|
+
"""Set the personality for all actions that don't have one defined.
|
103
131
|
|
104
132
|
Args:
|
105
|
-
personality: The personality to
|
133
|
+
personality: The personality text to inject.
|
106
134
|
|
107
135
|
Returns:
|
108
|
-
Self: The
|
136
|
+
Self: The workflow instance for method chaining.
|
109
137
|
"""
|
110
|
-
for
|
111
|
-
|
138
|
+
for action in filter(lambda a: not a.personality, self._instances):
|
139
|
+
action.personality = personality
|
112
140
|
return self
|
113
141
|
|
114
142
|
async def serve(self, task: Task) -> None:
|
115
|
-
"""
|
143
|
+
"""Execute the workflow to fulfill the given task.
|
144
|
+
|
145
|
+
This method manages the complete lifecycle of processing a task through
|
146
|
+
the workflow's sequence of actions.
|
116
147
|
|
117
148
|
Args:
|
118
|
-
task: The task to be
|
149
|
+
task: The task to be processed.
|
119
150
|
"""
|
120
151
|
await task.start()
|
121
152
|
await self._init_context(task)
|
153
|
+
|
122
154
|
current_action = None
|
123
155
|
try:
|
156
|
+
# Process each action in sequence
|
124
157
|
for step in self._instances:
|
125
|
-
|
126
|
-
|
158
|
+
current_action = step.name
|
159
|
+
logger.debug(f"Executing step: {current_action}")
|
160
|
+
|
161
|
+
# Get current context and execute action
|
162
|
+
context = await self._context.get()
|
163
|
+
act_task = create_task(step.act(context))
|
164
|
+
|
165
|
+
# Handle task cancellation
|
127
166
|
if task.is_cancelled():
|
128
167
|
act_task.cancel(f"Cancelled by task: {task.name}")
|
129
168
|
break
|
169
|
+
|
170
|
+
# Update context with modified values
|
130
171
|
modified_ctx = await act_task
|
131
172
|
await self._context.put(modified_ctx)
|
173
|
+
|
132
174
|
logger.info(f"Finished executing workflow: {self.name}")
|
133
175
|
|
134
|
-
|
176
|
+
# Get final context and extract result
|
177
|
+
final_ctx = await self._context.get()
|
178
|
+
result = final_ctx.get(self.task_output_key)
|
179
|
+
|
180
|
+
if self.task_output_key not in final_ctx:
|
135
181
|
logger.warning(
|
136
|
-
f"Task output key: {self.task_output_key} not found in the context, None will be returned.
|
182
|
+
f"Task output key: {self.task_output_key} not found in the context, None will be returned. "
|
183
|
+
f"You can check if `Action.output_key` is set the same as `WorkFlow.task_output_key`."
|
137
184
|
)
|
138
185
|
|
139
|
-
await task.finish(
|
186
|
+
await task.finish(result)
|
187
|
+
|
140
188
|
except RuntimeError as e:
|
141
|
-
logger.error(f"Error during task: {current_action} execution: {e}")
|
142
|
-
logger.error(traceback.format_exc())
|
143
|
-
await task.fail()
|
189
|
+
logger.error(f"Error during task: {current_action} execution: {e}")
|
190
|
+
logger.error(traceback.format_exc())
|
191
|
+
await task.fail()
|
144
192
|
|
145
193
|
async def _init_context[T](self, task: Task[T]) -> None:
|
146
|
-
"""Initialize the context dictionary for workflow execution.
|
194
|
+
"""Initialize the context dictionary for workflow execution.
|
195
|
+
|
196
|
+
Args:
|
197
|
+
task: The task being served by this workflow.
|
198
|
+
"""
|
147
199
|
logger.debug(f"Initializing context for workflow: {self.name}")
|
148
|
-
|
200
|
+
initial_context = {self.task_input_key: task, **dict(self.extra_init_context)}
|
201
|
+
await self._context.put(initial_context)
|
149
202
|
|
150
203
|
def steps_fallback_to_self(self) -> Self:
|
151
|
-
"""
|
204
|
+
"""Configure all steps to use this workflow's configuration as fallback.
|
205
|
+
|
206
|
+
Returns:
|
207
|
+
Self: The workflow instance for method chaining.
|
208
|
+
"""
|
152
209
|
self.hold_to(self._instances)
|
153
210
|
return self
|
154
211
|
|
155
212
|
def steps_supply_tools_from_self(self) -> Self:
|
156
|
-
"""
|
213
|
+
"""Provide this workflow's tools to all steps in the workflow.
|
214
|
+
|
215
|
+
Returns:
|
216
|
+
Self: The workflow instance for method chaining.
|
217
|
+
"""
|
157
218
|
self.provide_tools_to(self._instances)
|
158
219
|
return self
|
fabricatio/models/extra.py
CHANGED
@@ -26,7 +26,7 @@ class Figure(Base):
|
|
26
26
|
"""The caption accompanying the figure, summarizing its main points and academic value."""
|
27
27
|
|
28
28
|
figure_path: str
|
29
|
-
"""The
|
29
|
+
"""The exact path to the figure file, must exist in the file system, SHALL never be a PLACEHOLDER."""
|
30
30
|
|
31
31
|
|
32
32
|
class Highlightings(Base):
|
fabricatio/models/role.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1
|
-
"""Module that contains the Role class."""
|
1
|
+
"""Module that contains the Role class for managing workflows and their event registrations."""
|
2
2
|
|
3
3
|
from typing import Any, Self, Set
|
4
4
|
|
5
5
|
from fabricatio.capabilities.correct import Correct
|
6
|
+
from fabricatio.capabilities.covalidate import CoValidate
|
6
7
|
from fabricatio.capabilities.task import HandleTask, ProposeTask
|
7
8
|
from fabricatio.core import env
|
8
9
|
from fabricatio.journal import logger
|
@@ -12,20 +13,37 @@ from fabricatio.models.tool import ToolBox
|
|
12
13
|
from pydantic import Field
|
13
14
|
|
14
15
|
|
15
|
-
class Role(ProposeTask, HandleTask, Correct):
|
16
|
-
"""Class that represents a role with a registry of events and workflows.
|
16
|
+
class Role(ProposeTask, HandleTask, Correct, CoValidate):
|
17
|
+
"""Class that represents a role with a registry of events and workflows.
|
18
|
+
|
19
|
+
A Role serves as a container for workflows, managing their registration to events
|
20
|
+
and providing them with shared configuration like tools and personality.
|
21
|
+
|
22
|
+
Attributes:
|
23
|
+
registry: Mapping of events to workflows that handle them
|
24
|
+
toolboxes: Set of toolboxes available to this role and its workflows
|
25
|
+
"""
|
17
26
|
|
18
27
|
registry: dict[Event | str, WorkFlow] = Field(default_factory=dict)
|
19
|
-
"""
|
28
|
+
"""The registry of events and workflows."""
|
20
29
|
|
21
30
|
toolboxes: Set[ToolBox] = Field(default_factory=set)
|
31
|
+
"""Collection of tools available to this role."""
|
22
32
|
|
23
33
|
def model_post_init(self, __context: Any) -> None:
|
24
|
-
"""
|
34
|
+
"""Initialize the role by resolving configurations and registering workflows.
|
35
|
+
|
36
|
+
Args:
|
37
|
+
__context: The context used for initialization
|
38
|
+
"""
|
25
39
|
self.resolve_configuration().register_workflows()
|
26
40
|
|
27
41
|
def register_workflows(self) -> Self:
|
28
|
-
"""Register
|
42
|
+
"""Register each workflow in the registry to its corresponding event in the event bus.
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
Self: The role instance for method chaining
|
46
|
+
"""
|
29
47
|
for event, workflow in self.registry.items():
|
30
48
|
logger.debug(
|
31
49
|
f"Registering workflow: `{workflow.name}` for event: `{Event.instantiate_from(event).collapse()}`"
|
@@ -34,7 +52,14 @@ class Role(ProposeTask, HandleTask, Correct):
|
|
34
52
|
return self
|
35
53
|
|
36
54
|
def resolve_configuration(self) -> Self:
|
37
|
-
"""
|
55
|
+
"""Apply role-level configuration to all workflows in the registry.
|
56
|
+
|
57
|
+
This includes setting up fallback configurations, injecting personality traits,
|
58
|
+
and providing tool access to workflows and their steps.
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
Self: The role instance for method chaining
|
62
|
+
"""
|
38
63
|
for workflow in self.registry.values():
|
39
64
|
logger.debug(f"Resolving config for workflow: `{workflow.name}`")
|
40
65
|
(
|
fabricatio/models/usages.py
CHANGED
@@ -213,7 +213,7 @@ class LLMUsage(ScopedConfig):
|
|
213
213
|
self,
|
214
214
|
question: str,
|
215
215
|
validator: Callable[[str], T | None],
|
216
|
-
default: T
|
216
|
+
default: T = ...,
|
217
217
|
max_validations: PositiveInt = 2,
|
218
218
|
**kwargs: Unpack[GenerateKwargs],
|
219
219
|
) -> T: ...
|
@@ -222,7 +222,7 @@ class LLMUsage(ScopedConfig):
|
|
222
222
|
self,
|
223
223
|
question: List[str],
|
224
224
|
validator: Callable[[str], T | None],
|
225
|
-
default: T
|
225
|
+
default: T = ...,
|
226
226
|
max_validations: PositiveInt = 2,
|
227
227
|
**kwargs: Unpack[GenerateKwargs],
|
228
228
|
) -> List[T]: ...
|
@@ -277,8 +277,9 @@ class LLMUsage(ScopedConfig):
|
|
277
277
|
except Exception as e: # noqa: BLE001
|
278
278
|
logger.error(f"Error during validation: \n{e}")
|
279
279
|
break
|
280
|
-
kwargs
|
281
|
-
|
280
|
+
if not kwargs.get("no_cache"):
|
281
|
+
kwargs["no_cache"] = True
|
282
|
+
logger.debug("Closed the cache for the next attempt")
|
282
283
|
if default is None:
|
283
284
|
logger.error(f"Failed to validate the response after {max_validations} attempts.")
|
284
285
|
return default
|
Binary file
|
@@ -1,18 +1,18 @@
|
|
1
|
-
fabricatio-0.2.6.
|
2
|
-
fabricatio-0.2.6.
|
3
|
-
fabricatio-0.2.6.
|
1
|
+
fabricatio-0.2.6.dev5.dist-info/METADATA,sha256=fDyrjLOMA7hb-NFEGY3jM5sMjkDDfHQ-PIYRHqjQvO0,13693
|
2
|
+
fabricatio-0.2.6.dev5.dist-info/WHEEL,sha256=RIvmwLDYujv60MYBx2jxyP4vdn1DD7X0kBgz1TQvZuc,108
|
3
|
+
fabricatio-0.2.6.dev5.dist-info/licenses/LICENSE,sha256=yDZaTLnOi03bi3Dk6f5IjhLUc5old2yOsihHWU0z-i0,1067
|
4
4
|
fabricatio/decorators.py,sha256=cJHsxxbnMhc4SzPl4454CPLuDP3H0qbTrzV_U2rLPrs,6372
|
5
5
|
fabricatio/core.py,sha256=MaEKZ6DDmbdScAY-7F1gwGA6fr7ADX6Mz5rNVi2msFA,6277
|
6
6
|
fabricatio/models/generic.py,sha256=WxT4KBGGZTpqGPSPVwD5mkmhYBjxggZ7n-HKi-Hed4M,13619
|
7
7
|
fabricatio/models/tool.py,sha256=ATwbOyvOTzrfAKcbOmCqdG3je4-T5jrM6FIw4cDPRDY,6863
|
8
|
-
fabricatio/models/role.py,sha256=
|
9
|
-
fabricatio/models/extra.py,sha256=
|
8
|
+
fabricatio/models/role.py,sha256=m-orT8xtiI9t0MvCzwVnfnPXPoocS2z6Twaim_nSmNE,2780
|
9
|
+
fabricatio/models/extra.py,sha256=2TyrlYOfTe-Z-9eZxV8K7lXQLHz3y5Ze97JYZnnqg9U,7219
|
10
10
|
fabricatio/models/kwargs_types.py,sha256=7MjoTtGfSUx4jws_DlvK2ud7au6Y2z50Umr3PFtmSTc,4435
|
11
11
|
fabricatio/models/utils.py,sha256=KmsTQcBCTYgnsZz7U1ECSfLRdswWPkKtGg8mBMaXrwA,4850
|
12
|
-
fabricatio/models/usages.py,sha256=
|
12
|
+
fabricatio/models/usages.py,sha256=SvfASbO_nqbZ1fvn-dTMldjQetFcin3ADwVC8xmN-nQ,28019
|
13
13
|
fabricatio/models/events.py,sha256=UvOc6V3vfjKuvh7irDezJ8EGpsNo5yzLdq4xQexVonw,4063
|
14
14
|
fabricatio/models/task.py,sha256=-EnzpEyM6Z687gF1lPcmA2szEUw6dFpu3lOtseaz95o,10193
|
15
|
-
fabricatio/models/action.py,sha256=
|
15
|
+
fabricatio/models/action.py,sha256=C8zyrZbJdMmspJFFbsoXK1B5R0WCbY46G3CLAarxnWo,8062
|
16
16
|
fabricatio/toolboxes/fs.py,sha256=OQMdeokYxSNVrCZJAweJ0cYiK4k2QuEiNdIbS5IHIV8,705
|
17
17
|
fabricatio/toolboxes/__init__.py,sha256=dYm_Gd8XolSU_h4wnkA09dlaLDK146eeFz0CUgPZ8_c,380
|
18
18
|
fabricatio/toolboxes/arithmetic.py,sha256=sSTPkKI6-mb278DwQKFO9jKyzc9kCx45xNH7V6bGBpE,1307
|
@@ -20,7 +20,7 @@ fabricatio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
20
20
|
fabricatio/fs/readers.py,sha256=5bLlpqcdhIwWfysh7gvfVv0PPPVAeDlTPGwNTio6j9M,1156
|
21
21
|
fabricatio/fs/curd.py,sha256=FuG75qco4dX8vhIK27gKz9rKUXbWHOFg5yK3nGLB25s,4469
|
22
22
|
fabricatio/fs/__init__.py,sha256=hTuYtzmvIGtbg7PTdoqLEQJ0E63hOzZltCIrLlDKaSE,559
|
23
|
-
fabricatio/config.py,sha256=
|
23
|
+
fabricatio/config.py,sha256=vRCDnCyBgQoNCiwMEVdlD8_ij9Q7rykdpsod3XHtD-8,16191
|
24
24
|
fabricatio/journal.py,sha256=Op0wC-JlZumnAc_aDmYM4ljnSNLoKEEMfcIRbCF69ow,455
|
25
25
|
fabricatio/__init__.py,sha256=6EjK4SxbnvFxdO9ftkXD9rxSuoPEIITNzUkuMO9s3yU,1092
|
26
26
|
fabricatio/actions/output.py,sha256=wNyLNxjqBlms0hyxap8XUPgN53izipJrCOtpX6aluFQ,626
|
@@ -32,11 +32,12 @@ fabricatio/workflows/rag.py,sha256=uOZXprD479fUhLA6sYvEM8RWcVcUZXXtP0xRbTMPdHE,5
|
|
32
32
|
fabricatio/parser.py,sha256=Jr2ELtcmiRNAyz76TCWoJuUpG7zrJoRn3GfaX9vZSJM,6099
|
33
33
|
fabricatio/capabilities/correct.py,sha256=BiLEAk6e1KbwUMhTexmDfgtlPUct_bG0igDK7CwHqao,5107
|
34
34
|
fabricatio/capabilities/rag.py,sha256=ghctqjIf6KDe6PP8-SDzKN1zxh94rXk5Y5hHFtG_46Y,15404
|
35
|
+
fabricatio/capabilities/covalidate.py,sha256=uEpZNpzsQoIM5ZP4sntUhBveNhhqrAzQ6Q9-2WDlpPg,6461
|
35
36
|
fabricatio/capabilities/rating.py,sha256=ZQrKKmmIgnN4zgNnG_GmWa5Nyxpk03JYW32RJ4R5vvQ,14067
|
36
37
|
fabricatio/capabilities/review.py,sha256=TX7av4b2N7MRDHMowsIZfiujXRRNxjUMNHtCFVA1UTM,10824
|
37
38
|
fabricatio/capabilities/propose.py,sha256=4QvONVVUp1rs34Te2Rjams6NioEt6FhEAxDWiveQnSg,1544
|
38
|
-
fabricatio/capabilities/task.py,sha256=
|
39
|
+
fabricatio/capabilities/task.py,sha256=llFFKh8MAaTjsp8DtAGD_UUONROfFNxorh6NLys973U,4496
|
39
40
|
fabricatio/_rust.pyi,sha256=1TvnaXK_QKM8Et05LkZ_vOGR4WISVd9X8lU6OTwFFaU,3376
|
40
|
-
fabricatio/_rust.cpython-312-x86_64-linux-gnu.so,sha256=
|
41
|
-
fabricatio-0.2.6.
|
42
|
-
fabricatio-0.2.6.
|
41
|
+
fabricatio/_rust.cpython-312-x86_64-linux-gnu.so,sha256=Qv4F28dducNx2GS8L8TmMWuk3MgBivQbzH_UmC-1P2w,1911376
|
42
|
+
fabricatio-0.2.6.dev5.data/scripts/tdown,sha256=h7dazHQEgymw8fXo1ROyyUfwLMMw7l4JyMlt2xJyN-4,4576688
|
43
|
+
fabricatio-0.2.6.dev5.dist-info/RECORD,,
|
File without changes
|
File without changes
|