fabricatio 0.2.0__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/__init__.py +37 -0
- fabricatio/_rust.cp312-win_amd64.pyd +0 -0
- fabricatio/_rust.pyi +53 -0
- fabricatio/_rust_instances.py +8 -0
- fabricatio/actions/__init__.py +5 -0
- fabricatio/actions/communication.py +15 -0
- fabricatio/actions/transmission.py +23 -0
- fabricatio/config.py +263 -0
- fabricatio/core.py +167 -0
- fabricatio/decorators.py +178 -0
- fabricatio/fs/__init__.py +5 -0
- fabricatio/fs/curd.py +130 -0
- fabricatio/fs/readers.py +24 -0
- fabricatio/journal.py +28 -0
- fabricatio/models/action.py +139 -0
- fabricatio/models/advanced.py +128 -0
- fabricatio/models/events.py +82 -0
- fabricatio/models/generic.py +124 -0
- fabricatio/models/kwargs_types.py +26 -0
- fabricatio/models/role.py +48 -0
- fabricatio/models/task.py +276 -0
- fabricatio/models/tool.py +187 -0
- fabricatio/models/usages.py +515 -0
- fabricatio/models/utils.py +78 -0
- fabricatio/parser.py +93 -0
- fabricatio/py.typed +0 -0
- fabricatio/toolboxes/__init__.py +17 -0
- fabricatio/toolboxes/arithmetic.py +62 -0
- fabricatio/toolboxes/fs.py +15 -0
- fabricatio/toolboxes/task.py +6 -0
- fabricatio-0.2.0.data/scripts/tdown.exe +0 -0
- fabricatio-0.2.0.dist-info/METADATA +342 -0
- fabricatio-0.2.0.dist-info/RECORD +35 -0
- fabricatio-0.2.0.dist-info/WHEEL +4 -0
- fabricatio-0.2.0.dist-info/licenses/LICENSE +21 -0
fabricatio/decorators.py
ADDED
@@ -0,0 +1,178 @@
|
|
1
|
+
"""Decorators for Fabricatio."""
|
2
|
+
|
3
|
+
from asyncio import iscoroutinefunction
|
4
|
+
from functools import wraps
|
5
|
+
from inspect import signature
|
6
|
+
from shutil import which
|
7
|
+
from types import ModuleType
|
8
|
+
from typing import Callable, List, Optional
|
9
|
+
|
10
|
+
from questionary import confirm
|
11
|
+
|
12
|
+
from fabricatio.config import configs
|
13
|
+
from fabricatio.journal import logger
|
14
|
+
|
15
|
+
|
16
|
+
def depend_on_external_cmd[**P, R](
|
17
|
+
bin_name: str, install_tip: Optional[str], homepage: Optional[str] = None
|
18
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
19
|
+
"""Decorator to check for the presence of an external command.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
bin_name (str): The name of the required binary.
|
23
|
+
install_tip (Optional[str]): Installation instructions for the required binary.
|
24
|
+
homepage (Optional[str]): The homepage of the required binary.
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
Callable[[Callable[P, R]], Callable[P, R]]: A decorator that wraps the function to check for the binary.
|
28
|
+
|
29
|
+
Raises:
|
30
|
+
RuntimeError: If the required binary is not found.
|
31
|
+
"""
|
32
|
+
|
33
|
+
def _decorator(func: Callable[P, R]) -> Callable[P, R]:
|
34
|
+
@wraps(func)
|
35
|
+
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
36
|
+
if which(bin_name) is None:
|
37
|
+
err = f"`{bin_name}` is required to run {func.__name__}{signature(func)}, please install it the to `PATH` first."
|
38
|
+
if install_tip is not None:
|
39
|
+
err += f"\nInstall tip: {install_tip}"
|
40
|
+
if homepage is not None:
|
41
|
+
err += f"\nHomepage: {homepage}"
|
42
|
+
logger.error(err)
|
43
|
+
raise RuntimeError(err)
|
44
|
+
return func(*args, **kwargs)
|
45
|
+
|
46
|
+
return _wrapper
|
47
|
+
|
48
|
+
return _decorator
|
49
|
+
|
50
|
+
|
51
|
+
def logging_execution_info[**P, R](func: Callable[P, R]) -> Callable[P, R]:
|
52
|
+
"""Decorator to log the execution of a function.
|
53
|
+
|
54
|
+
Args:
|
55
|
+
func (Callable): The function to be executed
|
56
|
+
|
57
|
+
Returns:
|
58
|
+
Callable: A decorator that wraps the function to log the execution.
|
59
|
+
"""
|
60
|
+
|
61
|
+
@wraps(func)
|
62
|
+
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
63
|
+
logger.info(f"Executing function: {func.__name__}{signature(func)}\nArgs: {args}\nKwargs: {kwargs}")
|
64
|
+
return func(*args, **kwargs)
|
65
|
+
|
66
|
+
return _wrapper
|
67
|
+
|
68
|
+
|
69
|
+
def confirm_to_execute[**P, R](func: Callable[P, R]) -> Callable[P, Optional[R]] | Callable[P, R]:
|
70
|
+
"""Decorator to confirm before executing a function.
|
71
|
+
|
72
|
+
Args:
|
73
|
+
func (Callable): The function to be executed
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
Callable: A decorator that wraps the function to confirm before execution.
|
77
|
+
"""
|
78
|
+
if not configs.general.confirm_on_ops:
|
79
|
+
# Skip confirmation if the configuration is set to False
|
80
|
+
return func
|
81
|
+
|
82
|
+
if iscoroutinefunction(func):
|
83
|
+
|
84
|
+
@wraps(func)
|
85
|
+
async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
|
86
|
+
if await confirm(
|
87
|
+
f"Are you sure to execute function: {func.__name__}{signature(func)} \n📦 Args:{args}\n🔑 Kwargs:{kwargs}\n",
|
88
|
+
instruction="Please input [Yes/No] to proceed (default: Yes):",
|
89
|
+
).ask_async():
|
90
|
+
return await func(*args, **kwargs)
|
91
|
+
logger.warning(f"Function: {func.__name__}{signature(func)} canceled by user.")
|
92
|
+
return None
|
93
|
+
|
94
|
+
else:
|
95
|
+
|
96
|
+
@wraps(func)
|
97
|
+
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
|
98
|
+
if confirm(
|
99
|
+
f"Are you sure to execute function: {func.__name__}{signature(func)} \n📦 Args:{args}\n��� Kwargs:{kwargs}\n",
|
100
|
+
instruction="Please input [Yes/No] to proceed (default: Yes):",
|
101
|
+
).ask():
|
102
|
+
return func(*args, **kwargs)
|
103
|
+
logger.warning(f"Function: {func.__name__}{signature(func)} canceled by user.")
|
104
|
+
return None
|
105
|
+
|
106
|
+
return _wrapper
|
107
|
+
|
108
|
+
|
109
|
+
def use_temp_module[**P, R](modules: ModuleType | List[ModuleType]) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
110
|
+
"""Temporarily inject modules into sys.modules during function execution.
|
111
|
+
|
112
|
+
This decorator allows you to temporarily inject one or more modules into sys.modules
|
113
|
+
while the decorated function executes. After execution, it restores the original
|
114
|
+
state of sys.modules.
|
115
|
+
|
116
|
+
Args:
|
117
|
+
modules (ModuleType | List[ModuleType]): A single module or list of modules to
|
118
|
+
temporarily inject into sys.modules.
|
119
|
+
|
120
|
+
Returns:
|
121
|
+
Callable[[Callable[P, R]], Callable[P, R]]: A decorator that handles temporary
|
122
|
+
module injection.
|
123
|
+
|
124
|
+
Examples:
|
125
|
+
```python
|
126
|
+
from types import ModuleSpec, ModuleType, module_from_spec
|
127
|
+
|
128
|
+
# Create a temporary module
|
129
|
+
temp_module = module_from_spec(ModuleSpec("temp_math", None))
|
130
|
+
temp_module.pi = 3.14
|
131
|
+
|
132
|
+
# Use the decorator to temporarily inject the module
|
133
|
+
@use_temp_module(temp_module)
|
134
|
+
def calculate_area(radius: float) -> float:
|
135
|
+
from temp_math import pi
|
136
|
+
return pi * radius ** 2
|
137
|
+
|
138
|
+
# The temp_module is only available inside the function
|
139
|
+
result = calculate_area(5.0) # Uses temp_module.pi
|
140
|
+
```
|
141
|
+
|
142
|
+
Multiple modules can also be injected:
|
143
|
+
```python
|
144
|
+
module1 = module_from_spec(ModuleSpec("mod1", None))
|
145
|
+
module2 = module_from_spec(ModuleSpec("mod2", None))
|
146
|
+
|
147
|
+
@use_temp_module([module1, module2])
|
148
|
+
def process_data():
|
149
|
+
import mod1, mod2
|
150
|
+
# Work with temporary modules
|
151
|
+
...
|
152
|
+
```
|
153
|
+
"""
|
154
|
+
module_list = [modules] if isinstance(modules, ModuleType) else modules
|
155
|
+
|
156
|
+
def _decorator(func: Callable[P, R]) -> Callable[P, R]:
|
157
|
+
@wraps(func)
|
158
|
+
def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
159
|
+
import sys
|
160
|
+
|
161
|
+
# Store original modules if they exist
|
162
|
+
for module in module_list:
|
163
|
+
if module.__name__ in sys.modules:
|
164
|
+
raise RuntimeError(
|
165
|
+
f"Module '{module.__name__}' is already present in sys.modules and cannot be overridden."
|
166
|
+
)
|
167
|
+
sys.modules[module.__name__] = module
|
168
|
+
|
169
|
+
try:
|
170
|
+
return func(*args, **kwargs)
|
171
|
+
finally:
|
172
|
+
# Restore original state
|
173
|
+
for module in module_list:
|
174
|
+
del sys.modules[module.__name__]
|
175
|
+
|
176
|
+
return _wrapper
|
177
|
+
|
178
|
+
return _decorator
|
fabricatio/fs/curd.py
ADDED
@@ -0,0 +1,130 @@
|
|
1
|
+
"""File system create, update, read, delete operations."""
|
2
|
+
|
3
|
+
import shutil
|
4
|
+
import subprocess
|
5
|
+
from pathlib import Path
|
6
|
+
from typing import Union
|
7
|
+
|
8
|
+
from fabricatio.decorators import depend_on_external_cmd, logging_execution_info
|
9
|
+
from fabricatio.journal import logger
|
10
|
+
|
11
|
+
|
12
|
+
@logging_execution_info
|
13
|
+
def dump_text(path: Union[str, Path], text: str) -> None:
|
14
|
+
"""Dump text to a file. you need to make sure the file's parent directory exists.
|
15
|
+
|
16
|
+
Args:
|
17
|
+
path(str, Path): Path to the file
|
18
|
+
text(str): Text to write to the file
|
19
|
+
|
20
|
+
Returns:
|
21
|
+
None
|
22
|
+
"""
|
23
|
+
Path(path).write_text(text, encoding="utf-8", errors="ignore")
|
24
|
+
|
25
|
+
|
26
|
+
@logging_execution_info
|
27
|
+
def copy_file(src: Union[str, Path], dst: Union[str, Path]) -> None:
|
28
|
+
"""Copy a file from source to destination.
|
29
|
+
|
30
|
+
Args:
|
31
|
+
src: Source file path
|
32
|
+
dst: Destination file path
|
33
|
+
|
34
|
+
Raises:
|
35
|
+
FileNotFoundError: If source file doesn't exist
|
36
|
+
shutil.SameFileError: If source and destination are the same
|
37
|
+
"""
|
38
|
+
try:
|
39
|
+
shutil.copy(src, dst)
|
40
|
+
logger.info(f"Copied file from {src} to {dst}")
|
41
|
+
except OSError as e:
|
42
|
+
logger.error(f"Failed to copy file from {src} to {dst}: {e!s}")
|
43
|
+
raise
|
44
|
+
|
45
|
+
|
46
|
+
@logging_execution_info
|
47
|
+
def move_file(src: Union[str, Path], dst: Union[str, Path]) -> None:
|
48
|
+
"""Move a file from source to destination.
|
49
|
+
|
50
|
+
Args:
|
51
|
+
src: Source file path
|
52
|
+
dst: Destination file path
|
53
|
+
|
54
|
+
Raises:
|
55
|
+
FileNotFoundError: If source file doesn't exist
|
56
|
+
shutil.SameFileError: If source and destination are the same
|
57
|
+
"""
|
58
|
+
try:
|
59
|
+
shutil.move(src, dst)
|
60
|
+
logger.info(f"Moved file from {src} to {dst}")
|
61
|
+
except OSError as e:
|
62
|
+
logger.error(f"Failed to move file from {src} to {dst}: {e!s}")
|
63
|
+
raise
|
64
|
+
|
65
|
+
|
66
|
+
@logging_execution_info
|
67
|
+
def delete_file(file_path: Union[str, Path]) -> None:
|
68
|
+
"""Delete a file.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
file_path: Path to the file to be deleted
|
72
|
+
|
73
|
+
Raises:
|
74
|
+
FileNotFoundError: If file doesn't exist
|
75
|
+
PermissionError: If no permission to delete the file
|
76
|
+
"""
|
77
|
+
try:
|
78
|
+
Path(file_path).unlink()
|
79
|
+
logger.info(f"Deleted file: {file_path}")
|
80
|
+
except OSError as e:
|
81
|
+
logger.error(f"Failed to delete file {file_path}: {e!s}")
|
82
|
+
raise
|
83
|
+
|
84
|
+
|
85
|
+
@logging_execution_info
|
86
|
+
def create_directory(dir_path: Union[str, Path], parents: bool = True, exist_ok: bool = True) -> None:
|
87
|
+
"""Create a directory.
|
88
|
+
|
89
|
+
Args:
|
90
|
+
dir_path: Path to the directory to create
|
91
|
+
parents: Create parent directories if they don't exist
|
92
|
+
exist_ok: Don't raise error if directory already exists
|
93
|
+
"""
|
94
|
+
try:
|
95
|
+
Path(dir_path).mkdir(parents=parents, exist_ok=exist_ok)
|
96
|
+
logger.info(f"Created directory: {dir_path}")
|
97
|
+
except OSError as e:
|
98
|
+
logger.error(f"Failed to create directory {dir_path}: {e!s}")
|
99
|
+
raise
|
100
|
+
|
101
|
+
|
102
|
+
@logging_execution_info
|
103
|
+
@depend_on_external_cmd(
|
104
|
+
"erd",
|
105
|
+
"Please install `erd` using `cargo install erdtree` or `scoop install erdtree`.",
|
106
|
+
"https://github.com/solidiquis/erdtree",
|
107
|
+
)
|
108
|
+
def tree(dir_path: Union[str, Path]) -> str:
|
109
|
+
"""Generate a tree representation of the directory structure. Requires `erd` to be installed."""
|
110
|
+
dir_path = Path(dir_path)
|
111
|
+
return subprocess.check_output(("erd", dir_path.as_posix()), encoding="utf-8") # noqa: S603
|
112
|
+
|
113
|
+
|
114
|
+
@logging_execution_info
|
115
|
+
def delete_directory(dir_path: Union[str, Path]) -> None:
|
116
|
+
"""Delete a directory and its contents.
|
117
|
+
|
118
|
+
Args:
|
119
|
+
dir_path: Path to the directory to delete
|
120
|
+
|
121
|
+
Raises:
|
122
|
+
FileNotFoundError: If directory doesn't exist
|
123
|
+
OSError: If directory is not empty and can't be removed
|
124
|
+
"""
|
125
|
+
try:
|
126
|
+
shutil.rmtree(dir_path)
|
127
|
+
logger.info(f"Deleted directory: {dir_path}")
|
128
|
+
except OSError as e:
|
129
|
+
logger.error(f"Failed to delete directory {dir_path}: {e!s}")
|
130
|
+
raise
|
fabricatio/fs/readers.py
ADDED
@@ -0,0 +1,24 @@
|
|
1
|
+
"""Filesystem readers for Fabricatio."""
|
2
|
+
|
3
|
+
from pathlib import Path
|
4
|
+
|
5
|
+
from magika import Magika
|
6
|
+
|
7
|
+
from fabricatio.config import configs
|
8
|
+
|
9
|
+
magika = Magika(model_dir=configs.magika.model_dir)
|
10
|
+
|
11
|
+
|
12
|
+
def safe_text_read(path: Path) -> str:
|
13
|
+
"""Safely read the text from a file.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
path (Path): The path to the file.
|
17
|
+
|
18
|
+
Returns:
|
19
|
+
str: The text from the file.
|
20
|
+
"""
|
21
|
+
try:
|
22
|
+
return path.read_text(encoding="utf-8")
|
23
|
+
except (UnicodeDecodeError, IsADirectoryError, FileNotFoundError):
|
24
|
+
return ""
|
fabricatio/journal.py
ADDED
@@ -0,0 +1,28 @@
|
|
1
|
+
"""Logging setup for the project."""
|
2
|
+
|
3
|
+
import sys
|
4
|
+
|
5
|
+
from loguru import logger
|
6
|
+
from rich import pretty, traceback
|
7
|
+
|
8
|
+
from fabricatio.config import configs
|
9
|
+
|
10
|
+
pretty.install()
|
11
|
+
traceback.install()
|
12
|
+
logger.remove()
|
13
|
+
logger.add(
|
14
|
+
configs.debug.log_file,
|
15
|
+
level=configs.debug.log_level,
|
16
|
+
rotation=f"{configs.debug.rotation} weeks",
|
17
|
+
retention=f"{configs.debug.retention} weeks",
|
18
|
+
)
|
19
|
+
logger.add(sys.stderr, level=configs.debug.log_level)
|
20
|
+
|
21
|
+
|
22
|
+
if __name__ == "__main__":
|
23
|
+
logger.debug("This is a trace message.")
|
24
|
+
logger.info("This is an information message.")
|
25
|
+
logger.success("This is a success message.")
|
26
|
+
logger.warning("This is a warning message.")
|
27
|
+
logger.error("This is an error message.")
|
28
|
+
logger.critical("This is a critical message.")
|
@@ -0,0 +1,139 @@
|
|
1
|
+
"""Module that contains the classes for actions and workflows."""
|
2
|
+
|
3
|
+
import traceback
|
4
|
+
from abc import abstractmethod
|
5
|
+
from asyncio import Queue
|
6
|
+
from typing import Any, Dict, Self, Tuple, Type, Union, Unpack
|
7
|
+
|
8
|
+
from fabricatio.journal import logger
|
9
|
+
from fabricatio.models.advanced import HandleTask, ProposeTask
|
10
|
+
from fabricatio.models.generic import WithBriefing
|
11
|
+
from fabricatio.models.task import Task
|
12
|
+
from fabricatio.models.usages import ToolBoxUsage
|
13
|
+
from pydantic import Field, PrivateAttr
|
14
|
+
|
15
|
+
|
16
|
+
class Action(HandleTask, ProposeTask):
|
17
|
+
"""Class that represents an action to be executed in a workflow."""
|
18
|
+
|
19
|
+
personality: str = Field(default="")
|
20
|
+
"""The personality of whom the action belongs to."""
|
21
|
+
output_key: str = Field(default="")
|
22
|
+
"""The key of the output data."""
|
23
|
+
|
24
|
+
@abstractmethod
|
25
|
+
async def _execute(self, **cxt: Unpack) -> Any:
|
26
|
+
"""Execute the action with the provided arguments.
|
27
|
+
|
28
|
+
Args:
|
29
|
+
**cxt: The context dictionary containing input and output data.
|
30
|
+
|
31
|
+
Returns:
|
32
|
+
The result of the action execution.
|
33
|
+
"""
|
34
|
+
pass
|
35
|
+
|
36
|
+
async def act(self, cxt: Dict[str, Any]) -> Dict[str, Any]:
|
37
|
+
"""Perform the action by executing it and setting the output data.
|
38
|
+
|
39
|
+
Args:
|
40
|
+
cxt: The context dictionary containing input and output data.
|
41
|
+
"""
|
42
|
+
ret = await self._execute(**cxt)
|
43
|
+
if self.output_key:
|
44
|
+
logger.debug(f"Setting output: {self.output_key}")
|
45
|
+
cxt[self.output_key] = ret
|
46
|
+
return cxt
|
47
|
+
|
48
|
+
def briefing(self) -> str:
|
49
|
+
"""Return a brief description of the action."""
|
50
|
+
if self.personality:
|
51
|
+
return f"## Your personality: \n{self.personality}\n# The action you are going to perform: \n{super().briefing}"
|
52
|
+
return f"# The action you are going to perform: \n{super().briefing}"
|
53
|
+
|
54
|
+
|
55
|
+
class WorkFlow(WithBriefing, ToolBoxUsage):
|
56
|
+
"""Class that represents a workflow to be executed in a task."""
|
57
|
+
|
58
|
+
_context: Queue[Dict[str, Any]] = PrivateAttr(default_factory=lambda: Queue(maxsize=1))
|
59
|
+
""" The context dictionary to be used for workflow execution."""
|
60
|
+
|
61
|
+
_instances: Tuple[Action, ...] = PrivateAttr(...)
|
62
|
+
""" The instances of the workflow steps."""
|
63
|
+
|
64
|
+
steps: Tuple[Union[Type[Action], Action], ...] = Field(...)
|
65
|
+
""" The steps to be executed in the workflow, actions or action classes."""
|
66
|
+
task_input_key: str = Field(default="task_input")
|
67
|
+
""" The key of the task input data."""
|
68
|
+
task_output_key: str = Field(default="task_output")
|
69
|
+
""" The key of the task output data."""
|
70
|
+
extra_init_context: Dict[str, Any] = Field(default_factory=dict, frozen=True)
|
71
|
+
""" The extra context dictionary to be used for workflow initialization."""
|
72
|
+
|
73
|
+
def model_post_init(self, __context: Any) -> None:
|
74
|
+
"""Initialize the workflow by setting fallbacks for each step.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
__context: The context to be used for initialization.
|
78
|
+
"""
|
79
|
+
temp = []
|
80
|
+
for step in self.steps:
|
81
|
+
temp.append(step if isinstance(step, Action) else step())
|
82
|
+
self._instances = tuple(temp)
|
83
|
+
|
84
|
+
def inject_personality(self, personality: str) -> Self:
|
85
|
+
"""Inject the personality of the workflow.
|
86
|
+
|
87
|
+
Args:
|
88
|
+
personality: The personality to be injected.
|
89
|
+
|
90
|
+
Returns:
|
91
|
+
Self: The instance of the workflow with the injected personality.
|
92
|
+
"""
|
93
|
+
for a in self._instances:
|
94
|
+
if not a.personality:
|
95
|
+
a.personality = personality
|
96
|
+
return self
|
97
|
+
|
98
|
+
async def serve(self, task: Task) -> None:
|
99
|
+
"""Serve the task by executing the workflow steps.
|
100
|
+
|
101
|
+
Args:
|
102
|
+
task: The task to be served.
|
103
|
+
"""
|
104
|
+
await task.start()
|
105
|
+
await self._init_context(task)
|
106
|
+
current_action = None
|
107
|
+
try:
|
108
|
+
for step in self._instances:
|
109
|
+
logger.debug(f"Executing step: {step.name}")
|
110
|
+
modified_ctx = await step.act(await self._context.get())
|
111
|
+
await self._context.put(modified_ctx)
|
112
|
+
current_action = step.name
|
113
|
+
logger.info(f"Finished executing workflow: {self.name}")
|
114
|
+
final_ctx = await self._context.get()
|
115
|
+
if self.task_output_key not in final_ctx:
|
116
|
+
logger.warning(
|
117
|
+
f"Task output key: {self.task_output_key} not found in the context, None will be returned. You can check if `Action.output_key` is set the same as `WorkFlow.task_output_key`."
|
118
|
+
)
|
119
|
+
|
120
|
+
await task.finish(final_ctx.get(self.task_output_key, None))
|
121
|
+
except RuntimeError as e:
|
122
|
+
logger.error(f"Error during task: {current_action} execution: {e}") # Log the exception
|
123
|
+
logger.error(traceback.format_exc()) # Add this line to log the traceback
|
124
|
+
await task.fail() # Mark the task as failed
|
125
|
+
|
126
|
+
async def _init_context[T](self, task: Task[T]) -> None:
|
127
|
+
"""Initialize the context dictionary for workflow execution."""
|
128
|
+
logger.debug(f"Initializing context for workflow: {self.name}")
|
129
|
+
await self._context.put({self.task_input_key: task, **dict(self.extra_init_context)})
|
130
|
+
|
131
|
+
def steps_fallback_to_self(self) -> Self:
|
132
|
+
"""Set the fallback for each step to the workflow itself."""
|
133
|
+
self.hold_to(self._instances)
|
134
|
+
return self
|
135
|
+
|
136
|
+
def steps_supply_tools_from_self(self) -> Self:
|
137
|
+
"""Supply the tools from the workflow to each step."""
|
138
|
+
self.provide_tools_to(self._instances)
|
139
|
+
return self
|
@@ -0,0 +1,128 @@
|
|
1
|
+
"""A module for advanced models and functionalities."""
|
2
|
+
|
3
|
+
from types import CodeType
|
4
|
+
from typing import Any, Dict, List, Optional, Tuple, Unpack
|
5
|
+
|
6
|
+
import orjson
|
7
|
+
from fabricatio._rust_instances import template_manager
|
8
|
+
from fabricatio.config import configs
|
9
|
+
from fabricatio.models.generic import WithBriefing
|
10
|
+
from fabricatio.models.kwargs_types import LLMKwargs
|
11
|
+
from fabricatio.models.task import Task
|
12
|
+
from fabricatio.models.tool import Tool, ToolExecutor
|
13
|
+
from fabricatio.models.usages import LLMUsage, ToolBoxUsage
|
14
|
+
from fabricatio.parser import JsonCapture, PythonCapture
|
15
|
+
from loguru import logger
|
16
|
+
from pydantic import PositiveInt, ValidationError
|
17
|
+
|
18
|
+
|
19
|
+
class ProposeTask(WithBriefing, LLMUsage):
|
20
|
+
"""A class that proposes a task based on a prompt."""
|
21
|
+
|
22
|
+
async def propose[T](
|
23
|
+
self,
|
24
|
+
prompt: str,
|
25
|
+
max_validations: PositiveInt = 2,
|
26
|
+
**kwargs: Unpack[LLMKwargs],
|
27
|
+
) -> Task[T]:
|
28
|
+
"""Asynchronously proposes a task based on a given prompt and parameters.
|
29
|
+
|
30
|
+
Parameters:
|
31
|
+
prompt: The prompt text for proposing a task, which is a string that must be provided.
|
32
|
+
max_validations: The maximum number of validations allowed, default is 2.
|
33
|
+
**kwargs: The keyword arguments for the LLM (Large Language Model) usage.
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
A Task object based on the proposal result.
|
37
|
+
"""
|
38
|
+
if not prompt:
|
39
|
+
err = f"{self.name}: Prompt must be provided."
|
40
|
+
logger.error(err)
|
41
|
+
raise ValueError(err)
|
42
|
+
|
43
|
+
def _validate_json(response: str) -> None | Task:
|
44
|
+
try:
|
45
|
+
cap = JsonCapture.capture(response)
|
46
|
+
logger.debug(f"Response: \n{response}")
|
47
|
+
logger.info(f"Captured JSON: \n{cap}")
|
48
|
+
return Task.model_validate_json(cap)
|
49
|
+
except ValidationError as e:
|
50
|
+
logger.error(f"Failed to parse task from JSON: {e}")
|
51
|
+
return None
|
52
|
+
|
53
|
+
template_data = {"prompt": prompt, "json_example": Task.json_example()}
|
54
|
+
return await self.aask_validate(
|
55
|
+
question=template_manager.render_template(configs.templates.propose_task_template, template_data),
|
56
|
+
validator=_validate_json,
|
57
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
58
|
+
max_validations=max_validations,
|
59
|
+
**kwargs,
|
60
|
+
)
|
61
|
+
|
62
|
+
|
63
|
+
class HandleTask(WithBriefing, ToolBoxUsage):
|
64
|
+
"""A class that handles a task based on a task object."""
|
65
|
+
|
66
|
+
async def draft_tool_usage_code(
|
67
|
+
self,
|
68
|
+
task: Task,
|
69
|
+
tools: List[Tool],
|
70
|
+
data: Dict[str, Any],
|
71
|
+
**kwargs: Unpack[LLMKwargs],
|
72
|
+
) -> Tuple[CodeType, List[str]]:
|
73
|
+
"""Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
|
74
|
+
logger.info(f"Drafting tool usage code for task: {task.briefing}")
|
75
|
+
|
76
|
+
if not tools:
|
77
|
+
err = f"{self.name}: Tools must be provided to draft the tool usage code."
|
78
|
+
logger.error(err)
|
79
|
+
raise ValueError(err)
|
80
|
+
|
81
|
+
def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
|
82
|
+
if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
|
83
|
+
to_extract := JsonCapture.convert_with(response, orjson.loads)
|
84
|
+
):
|
85
|
+
return source, to_extract
|
86
|
+
|
87
|
+
return None
|
88
|
+
|
89
|
+
q = template_manager.render_template(
|
90
|
+
configs.templates.draft_tool_usage_code_template,
|
91
|
+
{
|
92
|
+
"data_module_name": configs.toolbox.data_module_name,
|
93
|
+
"tool_module_name": configs.toolbox.tool_module_name,
|
94
|
+
"task": task.briefing,
|
95
|
+
"deps": task.dependencies_prompt,
|
96
|
+
"tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
|
97
|
+
"data": data,
|
98
|
+
},
|
99
|
+
)
|
100
|
+
logger.debug(f"Code Drafting Question: \n{q}")
|
101
|
+
return await self.aask_validate(
|
102
|
+
question=q,
|
103
|
+
validator=_validator,
|
104
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
105
|
+
**kwargs,
|
106
|
+
)
|
107
|
+
|
108
|
+
async def handle_fin_grind(
|
109
|
+
self,
|
110
|
+
task: Task,
|
111
|
+
data: Dict[str, Any],
|
112
|
+
**kwargs: Unpack[LLMKwargs],
|
113
|
+
) -> Optional[Tuple]:
|
114
|
+
"""Asynchronously handles a task based on a given task object and parameters."""
|
115
|
+
logger.info(f"Handling task: \n{task.briefing}")
|
116
|
+
|
117
|
+
tools = await self.gather_tools(task)
|
118
|
+
logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
|
119
|
+
|
120
|
+
if tools:
|
121
|
+
executor = ToolExecutor(candidates=tools, data=data)
|
122
|
+
code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
|
123
|
+
|
124
|
+
cxt = executor.execute(code)
|
125
|
+
if to_extract:
|
126
|
+
return tuple(cxt.get(k) for k in to_extract)
|
127
|
+
|
128
|
+
return None
|