fabricatio 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/__init__.py +18 -0
- fabricatio/config.py +122 -0
- fabricatio/core.py +148 -0
- fabricatio/fs.py +1 -0
- fabricatio/logger.py +16 -0
- fabricatio/models/action.py +22 -0
- fabricatio/models/events.py +68 -0
- fabricatio/models/generic.py +310 -0
- fabricatio/models/role.py +14 -0
- fabricatio/models/tool.py +80 -0
- fabricatio/models/utils.py +81 -0
- fabricatio/py.typed +0 -0
- fabricatio-0.1.0.dist-info/METADATA +46 -0
- fabricatio-0.1.0.dist-info/RECORD +16 -0
- fabricatio-0.1.0.dist-info/WHEEL +4 -0
- fabricatio-0.1.0.dist-info/licenses/LICENSE +21 -0
fabricatio/__init__.py
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
from fabricatio.core import Env
|
2
|
+
from fabricatio.logger import logger
|
3
|
+
from fabricatio.models.action import Action, WorkFlow
|
4
|
+
from fabricatio.models.events import Event
|
5
|
+
from fabricatio.models.role import Role
|
6
|
+
from fabricatio.models.tool import ToolBox
|
7
|
+
from fabricatio.models.utils import Messages
|
8
|
+
|
9
|
+
__all__ = [
|
10
|
+
"Env",
|
11
|
+
"logger",
|
12
|
+
"Action",
|
13
|
+
"Event",
|
14
|
+
"Messages",
|
15
|
+
"Role",
|
16
|
+
"ToolBox",
|
17
|
+
"WorkFlow",
|
18
|
+
]
|
fabricatio/config.py
ADDED
@@ -0,0 +1,122 @@
|
|
1
|
+
from typing import Literal
|
2
|
+
|
3
|
+
from appdirs import user_config_dir
|
4
|
+
from pydantic import BaseModel, HttpUrl, SecretStr, PositiveInt, NonNegativeFloat, Field, FilePath
|
5
|
+
from pydantic_settings import (
|
6
|
+
BaseSettings,
|
7
|
+
SettingsConfigDict,
|
8
|
+
PydanticBaseSettingsSource,
|
9
|
+
TomlConfigSettingsSource,
|
10
|
+
PyprojectTomlConfigSettingsSource,
|
11
|
+
EnvSettingsSource,
|
12
|
+
DotEnvSettingsSource,
|
13
|
+
)
|
14
|
+
|
15
|
+
|
16
|
+
class LLMConfig(BaseModel):
|
17
|
+
api_endpoint: HttpUrl = Field(default=HttpUrl("https://api.openai.com"))
|
18
|
+
"""
|
19
|
+
OpenAI API Endpoint.
|
20
|
+
"""
|
21
|
+
|
22
|
+
api_key: SecretStr = Field(default=SecretStr(""))
|
23
|
+
"""
|
24
|
+
OpenAI API key. Empty by default for security reasons, should be set before use.
|
25
|
+
"""
|
26
|
+
|
27
|
+
timeout: PositiveInt = Field(default=300)
|
28
|
+
"""
|
29
|
+
The timeout of the LLM model in seconds. Default is 300 seconds as per request.
|
30
|
+
"""
|
31
|
+
|
32
|
+
max_retries: PositiveInt = Field(default=3)
|
33
|
+
"""
|
34
|
+
The maximum number of retries. Default is 3 retries.
|
35
|
+
"""
|
36
|
+
|
37
|
+
model: str = Field(default="gpt-3.5-turbo")
|
38
|
+
"""
|
39
|
+
The LLM model name. Set to 'gpt-3.5-turbo' as per request.
|
40
|
+
"""
|
41
|
+
|
42
|
+
temperature: NonNegativeFloat = Field(default=1.0)
|
43
|
+
"""
|
44
|
+
The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request.
|
45
|
+
"""
|
46
|
+
|
47
|
+
stop_sign: str = Field(default="")
|
48
|
+
"""
|
49
|
+
The stop sign of the LLM model. No default stop sign specified.
|
50
|
+
"""
|
51
|
+
|
52
|
+
top_p: NonNegativeFloat = Field(default=0.35)
|
53
|
+
"""
|
54
|
+
The top p of the LLM model. Controls diversity via nucleus sampling. Set to 0.35 as per request.
|
55
|
+
"""
|
56
|
+
|
57
|
+
generation_count: PositiveInt = Field(default=1)
|
58
|
+
"""
|
59
|
+
The number of generations to generate. Default is 1.
|
60
|
+
"""
|
61
|
+
|
62
|
+
stream: bool = Field(default=False)
|
63
|
+
"""
|
64
|
+
Whether to stream the LLM model's response. Default is False.
|
65
|
+
"""
|
66
|
+
|
67
|
+
max_tokens: PositiveInt = Field(default=8192)
|
68
|
+
"""
|
69
|
+
The maximum number of tokens to generate. Set to 8192 as per request.
|
70
|
+
"""
|
71
|
+
|
72
|
+
|
73
|
+
class DebugConfig(BaseModel):
|
74
|
+
log_level: Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"] = Field(default="INFO")
|
75
|
+
"""
|
76
|
+
The log level of the application.
|
77
|
+
"""
|
78
|
+
|
79
|
+
log_file: FilePath = Field(default=f"{user_config_dir("fabricatio", roaming=True)}.log")
|
80
|
+
"""
|
81
|
+
The log file of the application.
|
82
|
+
"""
|
83
|
+
|
84
|
+
|
85
|
+
class Settings(BaseSettings):
|
86
|
+
model_config = SettingsConfigDict(
|
87
|
+
env_prefix="FABRIK_",
|
88
|
+
env_nested_delimiter="__",
|
89
|
+
pyproject_toml_depth=1,
|
90
|
+
toml_file=["fabricatio.toml", f"{user_config_dir("fabricatio", roaming=True)}.toml"],
|
91
|
+
env_file=[".env", ".envrc"],
|
92
|
+
use_attribute_docstrings=True,
|
93
|
+
)
|
94
|
+
|
95
|
+
llm: LLMConfig = Field(default_factory=LLMConfig)
|
96
|
+
"""
|
97
|
+
LLM Configuration
|
98
|
+
"""
|
99
|
+
|
100
|
+
debug: DebugConfig = Field(default_factory=DebugConfig)
|
101
|
+
"""
|
102
|
+
Debug Configuration
|
103
|
+
"""
|
104
|
+
|
105
|
+
@classmethod
|
106
|
+
def settings_customise_sources(
|
107
|
+
cls,
|
108
|
+
settings_cls: type[BaseSettings],
|
109
|
+
init_settings: PydanticBaseSettingsSource,
|
110
|
+
env_settings: PydanticBaseSettingsSource,
|
111
|
+
dotenv_settings: PydanticBaseSettingsSource,
|
112
|
+
file_secret_settings: PydanticBaseSettingsSource,
|
113
|
+
) -> tuple[PydanticBaseSettingsSource, ...]:
|
114
|
+
return (
|
115
|
+
DotEnvSettingsSource(settings_cls),
|
116
|
+
EnvSettingsSource(settings_cls),
|
117
|
+
TomlConfigSettingsSource(settings_cls),
|
118
|
+
PyprojectTomlConfigSettingsSource(settings_cls),
|
119
|
+
)
|
120
|
+
|
121
|
+
|
122
|
+
configs: Settings = Settings()
|
fabricatio/core.py
ADDED
@@ -0,0 +1,148 @@
|
|
1
|
+
from typing import Callable, Self, overload
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, PrivateAttr
|
4
|
+
from pymitter import EventEmitter
|
5
|
+
|
6
|
+
from fabricatio.models.events import Event
|
7
|
+
|
8
|
+
|
9
|
+
class Env(BaseModel):
|
10
|
+
"""
|
11
|
+
Environment class that manages event handling using EventEmitter.
|
12
|
+
|
13
|
+
Attributes:
|
14
|
+
_ee (EventEmitter): Private attribute for event handling.
|
15
|
+
"""
|
16
|
+
|
17
|
+
model_config = ConfigDict(use_attribute_docstrings=True)
|
18
|
+
_ee: EventEmitter = PrivateAttr(default_factory=EventEmitter)
|
19
|
+
|
20
|
+
@overload
|
21
|
+
def on(self, event: str | Event, /, ttl: int = -1) -> Self:
|
22
|
+
"""
|
23
|
+
Registers an event listener that listens indefinitely or for a specified number of times.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
event (str | Event): The event to listen for.
|
27
|
+
ttl (int): Time-to-live for the listener. If -1, the listener will listen indefinitely.
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
Self: The current instance of Env.
|
31
|
+
"""
|
32
|
+
...
|
33
|
+
|
34
|
+
@overload
|
35
|
+
def on[**P, R](
|
36
|
+
self, event: str | Event, func: Callable[P, R] = None, /, ttl: int = -1
|
37
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
38
|
+
"""
|
39
|
+
Registers an event listener with a specific function that listens indefinitely or for a specified number of times.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
event (str | Event): The event to listen for.
|
43
|
+
func (Callable[P, R]): The function to be called when the event is emitted.
|
44
|
+
ttl (int): Time-to-live for the listener. If -1, the listener will listen indefinitely.
|
45
|
+
|
46
|
+
Returns:
|
47
|
+
Callable[[Callable[P, R]], Callable[P, R]]: A decorator that registers the function as an event listener.
|
48
|
+
"""
|
49
|
+
...
|
50
|
+
|
51
|
+
def on[**P, R](
|
52
|
+
self,
|
53
|
+
event: str | Event,
|
54
|
+
func: Callable[P, R] = None,
|
55
|
+
/,
|
56
|
+
ttl=-1,
|
57
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]] | Self:
|
58
|
+
"""
|
59
|
+
Registers an event listener with a specific function that listens indefinitely or for a specified number of times.
|
60
|
+
Args:
|
61
|
+
event (str | Event): The event to listen for.
|
62
|
+
func (Callable[P, R]): The function to be called when the event is emitted.
|
63
|
+
ttl (int): Time-to-live for the listener. If -1, the listener will listen indefinitely.
|
64
|
+
|
65
|
+
Returns:
|
66
|
+
Callable[[Callable[P, R]], Callable[P, R]] | Self: A decorator that registers the function as an event listener or the current instance of Env.
|
67
|
+
"""
|
68
|
+
if isinstance(event, Event):
|
69
|
+
event = event.collapse()
|
70
|
+
if func is None:
|
71
|
+
return self._ee.on(event, ttl=ttl)
|
72
|
+
|
73
|
+
else:
|
74
|
+
self._ee.on(event, func, ttl=ttl)
|
75
|
+
return self
|
76
|
+
|
77
|
+
@overload
|
78
|
+
def once[**P, R](
|
79
|
+
self,
|
80
|
+
event: str | Event,
|
81
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
82
|
+
"""
|
83
|
+
Registers an event listener that listens only once.
|
84
|
+
|
85
|
+
Args:
|
86
|
+
event (str | Event): The event to listen for.
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
Callable[[Callable[P, R]], Callable[P, R]]: A decorator that registers the function as an event listener.
|
90
|
+
"""
|
91
|
+
...
|
92
|
+
|
93
|
+
@overload
|
94
|
+
def once[**P, R](self, event: str | Event, func: Callable[[Callable[P, R]], Callable[P, R]]) -> Self:
|
95
|
+
"""
|
96
|
+
Registers an event listener with a specific function that listens only once.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
event (str | Event): The event to listen for.
|
100
|
+
func (Callable[P, R]): The function to be called when the event is emitted.
|
101
|
+
|
102
|
+
Returns:
|
103
|
+
Self: The current instance of Env.
|
104
|
+
"""
|
105
|
+
...
|
106
|
+
|
107
|
+
def once[**P, R](
|
108
|
+
self, event: str | Event, func: Callable[P, R] = None
|
109
|
+
) -> Callable[[Callable[P, R]], Callable[P, R]] | Self:
|
110
|
+
"""
|
111
|
+
|
112
|
+
Args:
|
113
|
+
event (str | Event): The event to listen for.
|
114
|
+
func (Callable[P, R]): The function to be called when the event is emitted.
|
115
|
+
|
116
|
+
Returns:
|
117
|
+
Callable[[Callable[P, R]], Callable[P, R]] | Self: A decorator that registers the function as an event listener or the current instance
|
118
|
+
"""
|
119
|
+
if isinstance(event, Event):
|
120
|
+
event = event.collapse()
|
121
|
+
if func is None:
|
122
|
+
return self._ee.once(event)
|
123
|
+
|
124
|
+
else:
|
125
|
+
self._ee.once(event, func)
|
126
|
+
return self
|
127
|
+
|
128
|
+
def emit[**P](self, event: str | Event, *args: P.args, **kwargs: P.kwargs) -> None:
|
129
|
+
"""
|
130
|
+
Emits an event to all registered listeners.
|
131
|
+
|
132
|
+
Args:
|
133
|
+
event (str | Event): The event to emit.
|
134
|
+
*args: Positional arguments to pass to the listeners.
|
135
|
+
**kwargs: Keyword arguments to pass to the listeners.
|
136
|
+
"""
|
137
|
+
self._ee.emit(event, *args, **kwargs)
|
138
|
+
|
139
|
+
async def emit_async[**P](self, event: str | Event, *args: P.args, **kwargs: P.kwargs) -> None:
|
140
|
+
"""
|
141
|
+
Asynchronously emits an event to all registered listeners.
|
142
|
+
|
143
|
+
Args:
|
144
|
+
event (str | Event): The event to emit.
|
145
|
+
*args: Positional arguments to pass to the listeners.
|
146
|
+
**kwargs: Keyword arguments to pass to the listeners.
|
147
|
+
"""
|
148
|
+
return await self._ee.emit_async(event, *args, **kwargs)
|
fabricatio/fs.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
# TODO: fs capabilities impl
|
fabricatio/logger.py
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
from loguru import logger
|
2
|
+
from rich import traceback
|
3
|
+
|
4
|
+
from fabricatio.config import configs
|
5
|
+
|
6
|
+
traceback.install()
|
7
|
+
logger.level(configs.debug.log_level)
|
8
|
+
logger.add(configs.debug.log_file, rotation="1 weeks", retention="1 month", compression="zip")
|
9
|
+
|
10
|
+
if __name__ == "__main__":
|
11
|
+
logger.debug("This is a trace message.")
|
12
|
+
logger.info("This is an information message.")
|
13
|
+
logger.success("This is a success message.")
|
14
|
+
logger.warning("This is a warning message.")
|
15
|
+
logger.error("This is an error message.")
|
16
|
+
logger.critical("This is a critical message.")
|
@@ -0,0 +1,22 @@
|
|
1
|
+
from abc import abstractmethod
|
2
|
+
from typing import Tuple
|
3
|
+
|
4
|
+
from pydantic import Field
|
5
|
+
|
6
|
+
from fabricatio.models.generic import WithBriefing, LLMUsage
|
7
|
+
|
8
|
+
|
9
|
+
class Action(WithBriefing, LLMUsage):
|
10
|
+
|
11
|
+
@abstractmethod
|
12
|
+
async def execute(self, *args, **kwargs):
|
13
|
+
pass
|
14
|
+
|
15
|
+
|
16
|
+
class WorkFlow(WithBriefing, LLMUsage):
|
17
|
+
steps: Tuple[Action, ...] = Field(default=())
|
18
|
+
|
19
|
+
async def execute(self, *args, **kwargs):
|
20
|
+
# TODO dispatch params to each step according to the step's signature
|
21
|
+
for step in self.steps:
|
22
|
+
await step.execute(*args, **kwargs)
|
@@ -0,0 +1,68 @@
|
|
1
|
+
from typing import List, Self
|
2
|
+
|
3
|
+
from pydantic import BaseModel, ConfigDict, Field
|
4
|
+
|
5
|
+
|
6
|
+
class Event(BaseModel):
|
7
|
+
model_config = ConfigDict(use_attribute_docstrings=True)
|
8
|
+
delimiter: str = Field(default=".", frozen=True)
|
9
|
+
""" The delimiter used to separate the event name into segments."""
|
10
|
+
|
11
|
+
segments: List[str] = Field(default_factory=list, frozen=True)
|
12
|
+
""" The segments of the namespaces."""
|
13
|
+
|
14
|
+
@classmethod
|
15
|
+
def from_string(cls, event: str, delimiter: str = ".") -> Self:
|
16
|
+
"""
|
17
|
+
Create an Event instance from a string.
|
18
|
+
|
19
|
+
Args:
|
20
|
+
event (str): The event string.
|
21
|
+
delimiter (str): The delimiter used to separate the event name into segments.
|
22
|
+
|
23
|
+
Returns:
|
24
|
+
Event: The Event instance.
|
25
|
+
"""
|
26
|
+
return cls(delimiter=delimiter, segments=event.split(delimiter))
|
27
|
+
|
28
|
+
def collapse(self) -> str:
|
29
|
+
"""
|
30
|
+
Collapse the event into a string.
|
31
|
+
"""
|
32
|
+
return self.delimiter.join(self.segments)
|
33
|
+
|
34
|
+
def clone(self) -> Self:
|
35
|
+
"""
|
36
|
+
Clone the event.
|
37
|
+
"""
|
38
|
+
return Event(delimiter=self.delimiter, segments=[segment for segment in self.segments])
|
39
|
+
|
40
|
+
def push(self, segment: str) -> Self:
|
41
|
+
"""
|
42
|
+
Push a segment to the event.
|
43
|
+
"""
|
44
|
+
assert segment, "The segment must not be empty."
|
45
|
+
assert self.delimiter not in segment, "The segment must not contain the delimiter."
|
46
|
+
|
47
|
+
self.segments.append(segment)
|
48
|
+
return self
|
49
|
+
|
50
|
+
def pop(self) -> str:
|
51
|
+
"""
|
52
|
+
Pop a segment from the event.
|
53
|
+
"""
|
54
|
+
return self.segments.pop()
|
55
|
+
|
56
|
+
def clear(self) -> Self:
|
57
|
+
"""
|
58
|
+
Clear the event.
|
59
|
+
"""
|
60
|
+
self.segments.clear()
|
61
|
+
return self
|
62
|
+
|
63
|
+
def concat(self, event: Self) -> Self:
|
64
|
+
"""
|
65
|
+
Concatenate another event to this event.
|
66
|
+
"""
|
67
|
+
self.segments.extend(event.segments)
|
68
|
+
return self
|
@@ -0,0 +1,310 @@
|
|
1
|
+
from asyncio import Queue
|
2
|
+
from typing import Iterable, Any, Dict, Self, List
|
3
|
+
|
4
|
+
import litellm
|
5
|
+
from litellm.types.utils import StreamingChoices, ModelResponse, Choices
|
6
|
+
from pydantic import (
|
7
|
+
BaseModel,
|
8
|
+
Field,
|
9
|
+
PositiveInt,
|
10
|
+
NonNegativeInt,
|
11
|
+
ConfigDict,
|
12
|
+
HttpUrl,
|
13
|
+
SecretStr,
|
14
|
+
NonNegativeFloat,
|
15
|
+
PrivateAttr,
|
16
|
+
)
|
17
|
+
|
18
|
+
from fabricatio.config import configs
|
19
|
+
from fabricatio.models.utils import Messages
|
20
|
+
|
21
|
+
|
22
|
+
class Base(BaseModel):
|
23
|
+
model_config = ConfigDict(use_attribute_docstrings=True)
|
24
|
+
|
25
|
+
|
26
|
+
class WithToDo(Base):
|
27
|
+
_todo: Queue[str] = PrivateAttr(default_factory=Queue)
|
28
|
+
"""
|
29
|
+
The todo list of the current instance.
|
30
|
+
"""
|
31
|
+
|
32
|
+
async def add_todo(self, todo_msg: str) -> Self:
|
33
|
+
"""
|
34
|
+
Add a todo item to the todo list.
|
35
|
+
Args:
|
36
|
+
todo_msg: The todo item to be added to the todo list.
|
37
|
+
|
38
|
+
Returns:
|
39
|
+
Self: The current instance object to support method chaining.
|
40
|
+
"""
|
41
|
+
|
42
|
+
await self._todo.put(todo_msg)
|
43
|
+
return self
|
44
|
+
|
45
|
+
async def get_todo(self) -> str:
|
46
|
+
"""
|
47
|
+
Get the last todo item from the todo list.
|
48
|
+
Returns:
|
49
|
+
str: The last todo item from the todo list.
|
50
|
+
|
51
|
+
"""
|
52
|
+
# Pop the last todo item from the todo list
|
53
|
+
return await self._todo.get()
|
54
|
+
|
55
|
+
|
56
|
+
class Named(Base):
|
57
|
+
name: str = Field(frozen=True)
|
58
|
+
"""
|
59
|
+
Name of the object.
|
60
|
+
"""
|
61
|
+
|
62
|
+
|
63
|
+
class Described(Base):
|
64
|
+
description: str = Field(default="", frozen=True)
|
65
|
+
"""
|
66
|
+
Description of the object.
|
67
|
+
"""
|
68
|
+
|
69
|
+
|
70
|
+
class WithBriefing(Named, Described):
|
71
|
+
|
72
|
+
@property
|
73
|
+
def briefing(self) -> str:
|
74
|
+
"""
|
75
|
+
Get the briefing of the object.
|
76
|
+
Returns:
|
77
|
+
str: The briefing of the object.
|
78
|
+
"""
|
79
|
+
return f"{self.name}: {self.description}" if self.description else self.name
|
80
|
+
|
81
|
+
|
82
|
+
class Memorable(Base):
|
83
|
+
memory: List[str] = Field(default_factory=list)
|
84
|
+
"""
|
85
|
+
Memory list.
|
86
|
+
"""
|
87
|
+
memory_max_size: NonNegativeInt = Field(default=0)
|
88
|
+
"""
|
89
|
+
Maximum size of the memory list.
|
90
|
+
"""
|
91
|
+
|
92
|
+
def add_memory(self, memories: str | Iterable[str]) -> Self:
|
93
|
+
"""
|
94
|
+
Add memory items to the memory list.
|
95
|
+
|
96
|
+
This method appends memory items to the memory list of the current instance.
|
97
|
+
|
98
|
+
Parameters:
|
99
|
+
- memories: str | Iterable[str] - A single memory item as a string or multiple memory items as an iterable.
|
100
|
+
|
101
|
+
Returns:
|
102
|
+
- Returns the current instance object to support method chaining.
|
103
|
+
|
104
|
+
This method design allows users to add memory items to the memory list
|
105
|
+
through a unified interface, enhancing code usability and extensibility.
|
106
|
+
"""
|
107
|
+
# Convert a single memory item to a list
|
108
|
+
if isinstance(memories, str):
|
109
|
+
memories = [memories]
|
110
|
+
# Add memory items to the memory list
|
111
|
+
self.memory.extend(memories)
|
112
|
+
# Limit the memory list size if the maximum size is set
|
113
|
+
if self.memory_max_size > 0:
|
114
|
+
self.memory = self.memory[-self.memory_max_size:]
|
115
|
+
# Return the current instance object to support method chaining
|
116
|
+
return self
|
117
|
+
|
118
|
+
def top_memories(self, n: PositiveInt = 1) -> List[str]:
|
119
|
+
"""
|
120
|
+
Get the top memory items from the memory list.
|
121
|
+
|
122
|
+
This method returns the top memory items from the memory list of the current instance.
|
123
|
+
|
124
|
+
Parameters:
|
125
|
+
- n: PositiveInt - The number of top memory items to return.
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
- List[str] - The top memory items from the memory list.
|
129
|
+
|
130
|
+
This method design allows users to get the top memory items from the memory list
|
131
|
+
through a unified interface, enhancing code usability and extensibility.
|
132
|
+
"""
|
133
|
+
# Get the top memory items from the memory list
|
134
|
+
return self.memory[-n:]
|
135
|
+
|
136
|
+
def top_memories_as_string(self, n: PositiveInt = 1, separator: str = "\n\n") -> str:
|
137
|
+
"""
|
138
|
+
Get the memory items as a string.
|
139
|
+
|
140
|
+
This method returns the memory items as a string from the memory list of the current instance.
|
141
|
+
|
142
|
+
Parameters:
|
143
|
+
- n: PositiveInt - The number of memory items to return.
|
144
|
+
- separator: str - The separator to join memory items.
|
145
|
+
|
146
|
+
Returns:
|
147
|
+
- str - The memory items as a string.
|
148
|
+
|
149
|
+
This method design allows users to get the memory items as a string from the memory list
|
150
|
+
through a unified interface, enhancing code usability and extensibility.
|
151
|
+
"""
|
152
|
+
# Get the top memory items from the memory list
|
153
|
+
memories = self.top_memories(n)
|
154
|
+
# Join memory items with the separator
|
155
|
+
return separator.join(memories)
|
156
|
+
|
157
|
+
def clear_memories(self) -> Self:
|
158
|
+
"""
|
159
|
+
Clear all memory items.
|
160
|
+
|
161
|
+
This method clears all memory items from the memory list of the current instance.
|
162
|
+
|
163
|
+
Parameters:
|
164
|
+
- self: The current instance object.
|
165
|
+
|
166
|
+
Returns:
|
167
|
+
- Returns the current instance object to support method chaining.
|
168
|
+
|
169
|
+
This method design allows users to clear all memory items from the memory list
|
170
|
+
through a unified interface, enhancing code usability and extensibility.
|
171
|
+
"""
|
172
|
+
# Clear all memory items from the memory list
|
173
|
+
self.memory.clear()
|
174
|
+
# Return the current instance object to support method chaining
|
175
|
+
return self
|
176
|
+
|
177
|
+
|
178
|
+
class LLMUsage(Base):
|
179
|
+
llm_api_endpoint: HttpUrl = Field(default=configs.llm.api_endpoint)
|
180
|
+
"""
|
181
|
+
The OpenAI API endpoint.
|
182
|
+
"""
|
183
|
+
|
184
|
+
llm_api_key: SecretStr = Field(default=configs.llm.api_key)
|
185
|
+
"""
|
186
|
+
The OpenAI API key.
|
187
|
+
"""
|
188
|
+
|
189
|
+
llm_timeout: PositiveInt = Field(default=configs.llm.timeout)
|
190
|
+
"""
|
191
|
+
The timeout of the LLM model.
|
192
|
+
"""
|
193
|
+
|
194
|
+
llm_max_retries: PositiveInt = Field(default=configs.llm.max_retries)
|
195
|
+
"""
|
196
|
+
The maximum number of retries.
|
197
|
+
"""
|
198
|
+
|
199
|
+
llm_model: str = Field(default=configs.llm.model)
|
200
|
+
"""
|
201
|
+
The LLM model name.
|
202
|
+
"""
|
203
|
+
|
204
|
+
llm_temperature: NonNegativeFloat = Field(default=configs.llm.temperature)
|
205
|
+
"""
|
206
|
+
The temperature of the LLM model.
|
207
|
+
"""
|
208
|
+
|
209
|
+
llm_stop_sign: str = Field(default=configs.llm.stop_sign)
|
210
|
+
"""
|
211
|
+
The stop sign of the LLM model.
|
212
|
+
"""
|
213
|
+
|
214
|
+
llm_top_p: NonNegativeFloat = Field(default=configs.llm.top_p)
|
215
|
+
"""
|
216
|
+
The top p of the LLM model.
|
217
|
+
"""
|
218
|
+
|
219
|
+
llm_generation_count: PositiveInt = Field(default=configs.llm.generation_count)
|
220
|
+
"""
|
221
|
+
The number of generations to generate.
|
222
|
+
"""
|
223
|
+
|
224
|
+
llm_stream: bool = Field(default=configs.llm.stream)
|
225
|
+
"""
|
226
|
+
Whether to stream the LLM model's response.
|
227
|
+
"""
|
228
|
+
|
229
|
+
llm_max_tokens: PositiveInt = Field(default=configs.llm.max_tokens)
|
230
|
+
"""
|
231
|
+
The maximum number of tokens to generate.
|
232
|
+
"""
|
233
|
+
|
234
|
+
def model_post_init(self, __context: Any) -> None:
|
235
|
+
litellm.api_key = self.llm_api_key.get_secret_value()
|
236
|
+
litellm.api_base = self.llm_api_endpoint.unicode_string()
|
237
|
+
|
238
|
+
async def aquery(
|
239
|
+
self,
|
240
|
+
messages: List[Dict[str, str]],
|
241
|
+
model: str | None = None,
|
242
|
+
temperature: NonNegativeFloat | None = None,
|
243
|
+
stop: str | None = None,
|
244
|
+
top_p: NonNegativeFloat | None = None,
|
245
|
+
max_tokens: PositiveInt | None = None,
|
246
|
+
n: PositiveInt | None = None,
|
247
|
+
stream: bool | None = None,
|
248
|
+
timeout: PositiveInt | None = None,
|
249
|
+
max_retries: PositiveInt | None = None,
|
250
|
+
) -> ModelResponse:
|
251
|
+
"""
|
252
|
+
Asynchronously queries the language model to generate a response based on the provided messages and parameters.
|
253
|
+
|
254
|
+
Parameters:
|
255
|
+
- messages (List[Dict[str, str]]): A list of messages, where each message is a dictionary containing the role and content of the message.
|
256
|
+
- model (str | None): The name of the model to use. If not provided, the default model will be used.
|
257
|
+
- temperature (NonNegativeFloat | None): Controls the randomness of the output. Lower values make the output more deterministic.
|
258
|
+
- stop (str | None): A sequence at which to stop the generation of the response.
|
259
|
+
- top_p (NonNegativeFloat | None): Controls the diversity of the output through nucleus sampling.
|
260
|
+
- max_tokens (PositiveInt | None): The maximum number of tokens to generate in the response.
|
261
|
+
- n (PositiveInt | None): The number of responses to generate.
|
262
|
+
- stream (bool | None): Whether to receive the response in a streaming fashion.
|
263
|
+
- timeout (PositiveInt | None): The timeout duration for the request.
|
264
|
+
- max_retries (PositiveInt | None): The maximum number of retries in case of failure.
|
265
|
+
|
266
|
+
Returns:
|
267
|
+
- ModelResponse: An object containing the generated response and other metadata from the model.
|
268
|
+
"""
|
269
|
+
# Call the underlying asynchronous completion function with the provided and default parameters
|
270
|
+
return await litellm.acompletion(
|
271
|
+
messages=messages,
|
272
|
+
model=model or self.llm_model,
|
273
|
+
temperature=temperature or self.llm_temperature,
|
274
|
+
stop=stop or self.llm_stop_sign,
|
275
|
+
top_p=top_p or self.llm_top_p,
|
276
|
+
max_tokens=max_tokens or self.llm_max_tokens,
|
277
|
+
n=n or self.llm_generation_count,
|
278
|
+
stream=stream or self.llm_stream,
|
279
|
+
timeout=timeout or self.llm_timeout,
|
280
|
+
max_retries=max_retries or self.llm_max_retries,
|
281
|
+
)
|
282
|
+
|
283
|
+
async def aask(
|
284
|
+
self,
|
285
|
+
question: str,
|
286
|
+
system_message: str = "",
|
287
|
+
model: str | None = None,
|
288
|
+
temperature: NonNegativeFloat | None = None,
|
289
|
+
stop: str | None = None,
|
290
|
+
top_p: NonNegativeFloat | None = None,
|
291
|
+
max_tokens: PositiveInt | None = None,
|
292
|
+
n: PositiveInt | None = None,
|
293
|
+
stream: bool | None = None,
|
294
|
+
timeout: PositiveInt | None = None,
|
295
|
+
max_retries: PositiveInt | None = None,
|
296
|
+
) -> List[Choices | StreamingChoices]:
|
297
|
+
return (
|
298
|
+
await self.aquery(
|
299
|
+
messages=Messages().add_system_message(system_message).add_user_message(question),
|
300
|
+
model=model,
|
301
|
+
temperature=temperature,
|
302
|
+
stop=stop,
|
303
|
+
top_p=top_p,
|
304
|
+
max_tokens=max_tokens,
|
305
|
+
n=n,
|
306
|
+
stream=stream,
|
307
|
+
timeout=timeout,
|
308
|
+
max_retries=max_retries,
|
309
|
+
)
|
310
|
+
).choices
|
@@ -0,0 +1,14 @@
|
|
1
|
+
from typing import List
|
2
|
+
|
3
|
+
from pydantic import Field
|
4
|
+
|
5
|
+
from fabricatio.models.action import WorkFlow
|
6
|
+
from fabricatio.models.generic import Memorable, WithToDo, WithBriefing, LLMUsage
|
7
|
+
|
8
|
+
|
9
|
+
class Role[T: WorkFlow](Memorable, WithBriefing, WithToDo, LLMUsage):
|
10
|
+
workflows: List[T] = Field(frozen=True)
|
11
|
+
"""A list of action names that the role can perform."""
|
12
|
+
|
13
|
+
async def act(self):
|
14
|
+
pass
|
@@ -0,0 +1,80 @@
|
|
1
|
+
from inspect import signature, getfullargspec
|
2
|
+
from typing import Callable, List
|
3
|
+
|
4
|
+
from pydantic import Field
|
5
|
+
|
6
|
+
from fabricatio.models.generic import WithBriefing
|
7
|
+
|
8
|
+
|
9
|
+
class Tool[**P, R](WithBriefing):
|
10
|
+
"""A class representing a tool with a callable source function."""
|
11
|
+
source: Callable[P, R]
|
12
|
+
|
13
|
+
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
|
14
|
+
"""Invoke the tool's source function with the provided arguments."""
|
15
|
+
return self.source(*args, **kwargs)
|
16
|
+
|
17
|
+
def briefing(self) -> str:
|
18
|
+
"""Return a brief description of the tool.
|
19
|
+
|
20
|
+
Returns:
|
21
|
+
str: A brief description of the tool.
|
22
|
+
"""
|
23
|
+
source_signature = str(signature(self.source))
|
24
|
+
# 获取源函数的返回类型
|
25
|
+
return_annotation = getfullargspec(self.source).annotations.get('return', 'None')
|
26
|
+
return f"{self.name}{source_signature} -> {return_annotation}\n{self.description}"
|
27
|
+
|
28
|
+
|
29
|
+
class ToolBox(WithBriefing):
|
30
|
+
"""A class representing a collection of tools."""
|
31
|
+
tools: List[Tool] = Field(default_factory=list)
|
32
|
+
"""A list of tools in the toolbox."""
|
33
|
+
|
34
|
+
def collect_tool[**P, R](self, func: Callable[P, R]) -> Callable[P, R]:
|
35
|
+
"""Add a callable function to the toolbox as a tool.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
func (Callable[P, R]): The function to be added as a tool.
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
Callable[P, R]: The added function.
|
42
|
+
|
43
|
+
Raises:
|
44
|
+
AssertionError: If the provided function is not callable or lacks a name.
|
45
|
+
"""
|
46
|
+
assert callable(func), "The tool must be a callable function."
|
47
|
+
assert func.__name__, "The tool must have a name."
|
48
|
+
|
49
|
+
tool = Tool(source=func, name=func.__name__, description=func.__doc__ or "")
|
50
|
+
self.tools.append(tool)
|
51
|
+
return func
|
52
|
+
|
53
|
+
def briefing(self) -> str:
|
54
|
+
"""Return a brief description of the toolbox.
|
55
|
+
|
56
|
+
Returns:
|
57
|
+
str: A brief description of the toolbox.
|
58
|
+
"""
|
59
|
+
list_out = "\n\n".join([f'- {tool.briefing}' for tool in self.tools])
|
60
|
+
toc = (f"## {self.name}: {self.description}\n"
|
61
|
+
f"## {len(self.tools)} tools available:\n")
|
62
|
+
return f"{toc}\n\n{list_out}"
|
63
|
+
|
64
|
+
def invoke_tool[**P, R](self, name: str, *args: P.args, **kwargs: P.kwargs) -> R:
|
65
|
+
"""Invoke a tool by name with the provided arguments.
|
66
|
+
|
67
|
+
Args:
|
68
|
+
name (str): The name of the tool to invoke.
|
69
|
+
*args (P.args): Positional arguments to pass to the tool.
|
70
|
+
**kwargs (P.kwargs): Keyword arguments to pass to the tool.
|
71
|
+
|
72
|
+
Returns:
|
73
|
+
R: The result of the tool's execution.
|
74
|
+
|
75
|
+
Raises:
|
76
|
+
AssertionError: If no tool with the specified name is found.
|
77
|
+
"""
|
78
|
+
tool = next((tool for tool in self.tools if tool.name == name), None)
|
79
|
+
assert tool, f"No tool named {name} found."
|
80
|
+
return tool(*args, **kwargs)
|
@@ -0,0 +1,81 @@
|
|
1
|
+
from typing import Literal, Self, List, Dict
|
2
|
+
|
3
|
+
from pydantic import BaseModel, Field, ConfigDict
|
4
|
+
|
5
|
+
|
6
|
+
class Message(BaseModel):
|
7
|
+
model_config = ConfigDict(use_attribute_docstrings=True)
|
8
|
+
role: Literal["user", "system", "assistant"] = Field(default="user")
|
9
|
+
"""
|
10
|
+
Who is sending the message.
|
11
|
+
"""
|
12
|
+
content: str = Field(default="")
|
13
|
+
"""
|
14
|
+
The content of the message.
|
15
|
+
"""
|
16
|
+
|
17
|
+
|
18
|
+
class Messages(list):
|
19
|
+
"""
|
20
|
+
A list of messages.
|
21
|
+
"""
|
22
|
+
|
23
|
+
def add_message(self, role: Literal["user", "system", "assistant"], content: str) -> Self:
|
24
|
+
"""
|
25
|
+
Adds a message to the list with the specified role and content.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
role (Literal["user", "system", "assistant"]): The role of the message sender.
|
29
|
+
content (str): The content of the message.
|
30
|
+
|
31
|
+
Returns:
|
32
|
+
Self: The current instance of Messages to allow method chaining.
|
33
|
+
"""
|
34
|
+
if content:
|
35
|
+
self.append(Message(role=role, content=content))
|
36
|
+
return self
|
37
|
+
|
38
|
+
def add_user_message(self, content: str) -> Self:
|
39
|
+
"""
|
40
|
+
Adds a user message to the list with the specified content.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
content (str): The content of the user message.
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
Self: The current instance of Messages to allow method chaining.
|
47
|
+
"""
|
48
|
+
return self.add_message("user", content)
|
49
|
+
|
50
|
+
def add_system_message(self, content: str) -> Self:
|
51
|
+
"""
|
52
|
+
Adds a system message to the list with the specified content.
|
53
|
+
|
54
|
+
Args:
|
55
|
+
content (str): The content of the system message.
|
56
|
+
|
57
|
+
Returns:
|
58
|
+
Self: The current instance of Messages to allow method chaining.
|
59
|
+
"""
|
60
|
+
return self.add_message("system", content)
|
61
|
+
|
62
|
+
def add_assistant_message(self, content: str) -> Self:
|
63
|
+
"""
|
64
|
+
Adds an assistant message to the list with the specified content.
|
65
|
+
|
66
|
+
Args:
|
67
|
+
content (str): The content of the assistant message.
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
Self: The current instance of Messages to allow method chaining.
|
71
|
+
"""
|
72
|
+
return self.add_message("assistant", content)
|
73
|
+
|
74
|
+
def as_list(self) -> List[Dict[str, str]]:
|
75
|
+
"""
|
76
|
+
Converts the messages to a list of dictionaries.
|
77
|
+
|
78
|
+
Returns:
|
79
|
+
list[dict]: A list of dictionaries representing the messages.
|
80
|
+
"""
|
81
|
+
return [message.model_dump() for message in self]
|
fabricatio/py.typed
ADDED
File without changes
|
@@ -0,0 +1,46 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: fabricatio
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: A LLM multi-agent framework.
|
5
|
+
Author-email: Whth <zettainspector@foxmail.com>
|
6
|
+
License: MIT License
|
7
|
+
|
8
|
+
Copyright (c) 2025 Whth Yotta
|
9
|
+
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
12
|
+
in the Software without restriction, including without limitation the rights
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
15
|
+
furnished to do so, subject to the following conditions:
|
16
|
+
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
18
|
+
copies or substantial portions of the Software.
|
19
|
+
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
26
|
+
SOFTWARE.
|
27
|
+
License-File: LICENSE
|
28
|
+
Classifier: Framework :: AsyncIO
|
29
|
+
Classifier: Framework :: Pydantic :: 2
|
30
|
+
Classifier: License :: OSI Approved :: MIT License
|
31
|
+
Classifier: Programming Language :: Python :: 3.12
|
32
|
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
33
|
+
Classifier: Typing :: Typed
|
34
|
+
Requires-Python: >=3.12
|
35
|
+
Requires-Dist: aiohttp>=3.11.11
|
36
|
+
Requires-Dist: aiomultiprocess>=0.9.1
|
37
|
+
Requires-Dist: appdirs>=1.4.4
|
38
|
+
Requires-Dist: asyncio>=3.4.3
|
39
|
+
Requires-Dist: litellm>=1.60.0
|
40
|
+
Requires-Dist: loguru>=0.7.3
|
41
|
+
Requires-Dist: pydantic-settings>=2.7.1
|
42
|
+
Requires-Dist: pydantic>=2.10.6
|
43
|
+
Requires-Dist: pymitter>=1.0.0
|
44
|
+
Requires-Dist: rich>=13.9.4
|
45
|
+
Provides-Extra: cli
|
46
|
+
Requires-Dist: typer>=0.15.1; extra == 'cli'
|
@@ -0,0 +1,16 @@
|
|
1
|
+
fabricatio/__init__.py,sha256=eyBnyKBsNZJOuHwVoDNCpLNdTj3a_B2raHtHh8dsgSg,420
|
2
|
+
fabricatio/config.py,sha256=aYvBVyJ3TBA6neDYJroxRKNEGgPA1Q8ncKE1YMH7jKU,3605
|
3
|
+
fabricatio/core.py,sha256=XVvhpgR4BNDkhWnxTDouw8qyT0v--xKeRjp2BiRMO4E,5194
|
4
|
+
fabricatio/fs.py,sha256=EBcqgSmsVWAKDm0ceiJ4IYYGPgP_qlO7tNl1O4tYUBk,30
|
5
|
+
fabricatio/logger.py,sha256=NwfIbSvFnhOrkCREyU_WDC80c3DTJr9NA4OH8oZmrXE,581
|
6
|
+
fabricatio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
+
fabricatio/models/action.py,sha256=6KAeG5MWnWQzsj_SrQAKkIMduTv1Ry4Bc_v7LRXfjfM,585
|
8
|
+
fabricatio/models/events.py,sha256=0VtavawE3H60ICOFa1eb-t2Iunl0zhvq0PikzGCE1rw,1978
|
9
|
+
fabricatio/models/generic.py,sha256=3CUCgXRVx_qKj-jTKh6GJ2jggRPHRwRXlNcOe_c6wlM,10624
|
10
|
+
fabricatio/models/role.py,sha256=0OZX724IsQwmjdVecJxst0TSo_UcJKI_rlJBEFOgD9o,410
|
11
|
+
fabricatio/models/tool.py,sha256=_Cj5yG447M5dQNFSu08bMuyVLmA7QZGvtAziyeNqJa0,2871
|
12
|
+
fabricatio/models/utils.py,sha256=TT4m7CymJXSVc_bqI9k6Inymj3AHbz7bh2tVgw4LW7Y,2491
|
13
|
+
fabricatio-0.1.0.dist-info/METADATA,sha256=ltTqOsOFVwkaRV8HIvrGuBf21wt9P-EyfmFMSIHEnkw,2082
|
14
|
+
fabricatio-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
15
|
+
fabricatio-0.1.0.dist-info/licenses/LICENSE,sha256=do7J7EiCGbq0QPbMAL_FqLYufXpHnCnXBOuqVPwSV8Y,1088
|
16
|
+
fabricatio-0.1.0.dist-info/RECORD,,
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 Whth Yotta
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|