fabricatio 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/config.py +6 -2
- fabricatio/models/generic.py +81 -14
- fabricatio/models/role.py +23 -2
- fabricatio/models/task.py +14 -10
- fabricatio/parser.py +5 -2
- {fabricatio-0.1.1.dist-info → fabricatio-0.1.2.dist-info}/METADATA +2 -1
- {fabricatio-0.1.1.dist-info → fabricatio-0.1.2.dist-info}/RECORD +9 -9
- {fabricatio-0.1.1.dist-info → fabricatio-0.1.2.dist-info}/WHEEL +0 -0
- {fabricatio-0.1.1.dist-info → fabricatio-0.1.2.dist-info}/licenses/LICENSE +0 -0
fabricatio/config.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import Literal
|
1
|
+
from typing import List, Literal
|
2
2
|
|
3
3
|
from appdirs import user_config_dir
|
4
4
|
from pydantic import BaseModel, ConfigDict, Field, FilePath, HttpUrl, NonNegativeFloat, PositiveInt, SecretStr
|
@@ -29,6 +29,7 @@ class LLMConfig(BaseModel):
|
|
29
29
|
stream (bool): Whether to stream the LLM model's response. Default is False.
|
30
30
|
max_tokens (PositiveInt): The maximum number of tokens to generate. Set to 8192 as per request.
|
31
31
|
"""
|
32
|
+
|
32
33
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
33
34
|
api_endpoint: HttpUrl = Field(default=HttpUrl("https://api.openai.com"))
|
34
35
|
"""
|
@@ -60,7 +61,7 @@ class LLMConfig(BaseModel):
|
|
60
61
|
The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request.
|
61
62
|
"""
|
62
63
|
|
63
|
-
stop_sign: str = Field(default="")
|
64
|
+
stop_sign: str | List[str] = Field(default=("\n\n", "User:"))
|
64
65
|
"""
|
65
66
|
The stop sign of the LLM model. No default stop sign specified.
|
66
67
|
"""
|
@@ -94,6 +95,7 @@ class PymitterConfig(BaseModel):
|
|
94
95
|
new_listener_event (bool): If set, a newListener event is emitted when a new listener is added.
|
95
96
|
max_listeners (int): The maximum number of listeners per event.
|
96
97
|
"""
|
98
|
+
|
97
99
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
98
100
|
delimiter: str = Field(default=".", frozen=True)
|
99
101
|
"""
|
@@ -118,6 +120,7 @@ class DebugConfig(BaseModel):
|
|
118
120
|
log_level (Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"]): The log level of the application.
|
119
121
|
log_file (FilePath): The log file of the application.
|
120
122
|
"""
|
123
|
+
|
121
124
|
model_config = ConfigDict(use_attribute_docstrings=True)
|
122
125
|
|
123
126
|
log_level: Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"] = Field(default="INFO")
|
@@ -139,6 +142,7 @@ class Settings(BaseSettings):
|
|
139
142
|
debug (DebugConfig): Debug Configuration
|
140
143
|
pymitter (PymitterConfig): Pymitter Configuration
|
141
144
|
"""
|
145
|
+
|
142
146
|
model_config = SettingsConfigDict(
|
143
147
|
env_prefix="FABRIK_",
|
144
148
|
env_nested_delimiter="__",
|
fabricatio/models/generic.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
1
|
from asyncio import Queue
|
2
|
-
from typing import
|
2
|
+
from typing import Callable, Dict, Iterable, List, Optional, Self
|
3
3
|
|
4
4
|
import litellm
|
5
|
+
import orjson
|
5
6
|
from litellm.types.utils import Choices, ModelResponse, StreamingChoices
|
6
7
|
from pydantic import (
|
7
8
|
BaseModel,
|
@@ -190,7 +191,7 @@ class LLMUsage(Base):
|
|
190
191
|
The temperature of the LLM model.
|
191
192
|
"""
|
192
193
|
|
193
|
-
llm_stop_sign: Optional[str] = None
|
194
|
+
llm_stop_sign: Optional[str | List[str]] = None
|
194
195
|
"""
|
195
196
|
The stop sign of the LLM model.
|
196
197
|
"""
|
@@ -215,21 +216,12 @@ class LLMUsage(Base):
|
|
215
216
|
The maximum number of tokens to generate.
|
216
217
|
"""
|
217
218
|
|
218
|
-
def model_post_init(self, __context: Any) -> None:
|
219
|
-
"""Initialize the LLM model with API key and endpoint.
|
220
|
-
|
221
|
-
Args:
|
222
|
-
__context (Any): The context passed during model initialization.
|
223
|
-
"""
|
224
|
-
litellm.api_key = self.llm_api_key.get_secret_value() if self.llm_api_key else configs.llm.api_key
|
225
|
-
litellm.api_base = self.llm_api_endpoint.unicode_string() if self.llm_api_endpoint else configs.llm.api_endpoint
|
226
|
-
|
227
219
|
async def aquery(
|
228
220
|
self,
|
229
221
|
messages: List[Dict[str, str]],
|
230
222
|
model: str | None = None,
|
231
223
|
temperature: NonNegativeFloat | None = None,
|
232
|
-
stop: str | None = None,
|
224
|
+
stop: str | List[str] | None = None,
|
233
225
|
top_p: NonNegativeFloat | None = None,
|
234
226
|
max_tokens: PositiveInt | None = None,
|
235
227
|
n: PositiveInt | None = None,
|
@@ -266,6 +258,10 @@ class LLMUsage(Base):
|
|
266
258
|
stream=stream or self.llm_stream or configs.llm.stream,
|
267
259
|
timeout=timeout or self.llm_timeout or configs.llm.timeout,
|
268
260
|
max_retries=max_retries or self.llm_max_retries or configs.llm.max_retries,
|
261
|
+
api_key=self.llm_api_key.get_secret_value() if self.llm_api_key else configs.llm.api_key.get_secret_value(),
|
262
|
+
base_url=self.llm_api_endpoint.unicode_string()
|
263
|
+
if self.llm_api_endpoint
|
264
|
+
else configs.llm.api_endpoint.unicode_string(),
|
269
265
|
)
|
270
266
|
|
271
267
|
async def ainvoke(
|
@@ -274,7 +270,7 @@ class LLMUsage(Base):
|
|
274
270
|
system_message: str = "",
|
275
271
|
model: str | None = None,
|
276
272
|
temperature: NonNegativeFloat | None = None,
|
277
|
-
stop: str | None = None,
|
273
|
+
stop: str | List[str] | None = None,
|
278
274
|
top_p: NonNegativeFloat | None = None,
|
279
275
|
max_tokens: PositiveInt | None = None,
|
280
276
|
n: PositiveInt | None = None,
|
@@ -321,7 +317,7 @@ class LLMUsage(Base):
|
|
321
317
|
system_message: str = "",
|
322
318
|
model: str | None = None,
|
323
319
|
temperature: NonNegativeFloat | None = None,
|
324
|
-
stop: str | None = None,
|
320
|
+
stop: str | List[str] | None = None,
|
325
321
|
top_p: NonNegativeFloat | None = None,
|
326
322
|
max_tokens: PositiveInt | None = None,
|
327
323
|
stream: bool | None = None,
|
@@ -365,6 +361,61 @@ class LLMUsage(Base):
|
|
365
361
|
.message.content
|
366
362
|
)
|
367
363
|
|
364
|
+
async def aask_validate[T](
|
365
|
+
self,
|
366
|
+
question: str,
|
367
|
+
validator: Callable[[str], T | None],
|
368
|
+
max_validations: PositiveInt = 2,
|
369
|
+
system_message: str = "",
|
370
|
+
model: str | None = None,
|
371
|
+
temperature: NonNegativeFloat | None = None,
|
372
|
+
stop: str | List[str] | None = None,
|
373
|
+
top_p: NonNegativeFloat | None = None,
|
374
|
+
max_tokens: PositiveInt | None = None,
|
375
|
+
stream: bool | None = None,
|
376
|
+
timeout: PositiveInt | None = None,
|
377
|
+
max_retries: PositiveInt | None = None,
|
378
|
+
) -> T:
|
379
|
+
"""Asynchronously ask a question and validate the response using a given validator.
|
380
|
+
|
381
|
+
Args:
|
382
|
+
question (str): The question to ask.
|
383
|
+
validator (Callable[[str], T | None]): A function to validate the response.
|
384
|
+
max_validations (PositiveInt): Maximum number of validation attempts.
|
385
|
+
system_message (str): System message to include in the request.
|
386
|
+
model (str | None): The model to use for the request.
|
387
|
+
temperature (NonNegativeFloat | None): Temperature setting for the request.
|
388
|
+
stop (str | None): Stop sequence for the request.
|
389
|
+
top_p (NonNegativeFloat | None): Top-p sampling parameter.
|
390
|
+
max_tokens (PositiveInt | None): Maximum number of tokens in the response.
|
391
|
+
stream (bool | None): Whether to stream the response.
|
392
|
+
timeout (PositiveInt | None): Timeout for the request.
|
393
|
+
max_retries (PositiveInt | None): Maximum number of retries for the request.
|
394
|
+
|
395
|
+
Returns:
|
396
|
+
T: The validated response.
|
397
|
+
|
398
|
+
Raises:
|
399
|
+
ValueError: If the response fails to validate after the maximum number of attempts.
|
400
|
+
"""
|
401
|
+
for _ in range(max_validations):
|
402
|
+
if (
|
403
|
+
response := await self.aask(
|
404
|
+
question,
|
405
|
+
system_message,
|
406
|
+
model,
|
407
|
+
temperature,
|
408
|
+
stop,
|
409
|
+
top_p,
|
410
|
+
max_tokens,
|
411
|
+
stream,
|
412
|
+
timeout,
|
413
|
+
max_retries,
|
414
|
+
)
|
415
|
+
) and (validated := validator(response)):
|
416
|
+
return validated
|
417
|
+
raise ValueError("Failed to validate the response.")
|
418
|
+
|
368
419
|
def fallback_to(self, other: "LLMUsage") -> Self:
|
369
420
|
"""Fallback to another instance's attribute values if the current instance's attributes are None.
|
370
421
|
|
@@ -397,3 +448,19 @@ class LLMUsage(Base):
|
|
397
448
|
|
398
449
|
# Return the current instance to allow for method chaining
|
399
450
|
return self
|
451
|
+
|
452
|
+
|
453
|
+
class WithJsonExample(Base):
|
454
|
+
"""Class that provides a JSON schema for the model."""
|
455
|
+
|
456
|
+
@classmethod
|
457
|
+
def json_example(cls) -> str:
|
458
|
+
"""Return a JSON example for the model.
|
459
|
+
|
460
|
+
Returns:
|
461
|
+
str: A JSON example for the model.
|
462
|
+
"""
|
463
|
+
return orjson.dumps(
|
464
|
+
{field_name: field_info.description for field_name, field_info in cls.model_fields.items()},
|
465
|
+
option=orjson.OPT_INDENT_2 | orjson.OPT_SORT_KEYS,
|
466
|
+
).decode()
|
fabricatio/models/role.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
from typing import Any
|
2
2
|
|
3
|
-
from pydantic import Field
|
3
|
+
from pydantic import Field, ValidationError
|
4
4
|
|
5
5
|
from fabricatio.core import env
|
6
6
|
from fabricatio.journal import logger
|
@@ -8,6 +8,7 @@ from fabricatio.models.action import WorkFlow
|
|
8
8
|
from fabricatio.models.events import Event
|
9
9
|
from fabricatio.models.generic import LLMUsage, Memorable, WithBriefing, WithToDo
|
10
10
|
from fabricatio.models.task import Task
|
11
|
+
from fabricatio.parser import JsonCapture
|
11
12
|
|
12
13
|
|
13
14
|
class Role(Memorable, WithBriefing, WithToDo, LLMUsage):
|
@@ -26,4 +27,24 @@ class Role(Memorable, WithBriefing, WithToDo, LLMUsage):
|
|
26
27
|
env.on(event, workflow.serve)
|
27
28
|
|
28
29
|
async def propose(self, prompt: str) -> Task:
|
29
|
-
"""Propose a task
|
30
|
+
"""Propose a task based on the provided prompt."""
|
31
|
+
assert prompt, "Prompt must be provided."
|
32
|
+
|
33
|
+
def _validate_json(response: str) -> None | Task:
|
34
|
+
try:
|
35
|
+
cap = JsonCapture.capture(response)
|
36
|
+
logger.debug(f"Response: \n{response}")
|
37
|
+
logger.info(f"Captured JSON: \n{cap[0]}")
|
38
|
+
return Task.model_validate_json(cap[0] if cap else response)
|
39
|
+
except ValidationError as e:
|
40
|
+
logger.error(f"Failed to parse task from JSON: {e}")
|
41
|
+
return None
|
42
|
+
|
43
|
+
return await self.aask_validate(
|
44
|
+
f"{prompt} \n\nBased on requirement above, "
|
45
|
+
f"you need to construct a task to satisfy that requirement in JSON format "
|
46
|
+
f"written like this: \n\n```json\n{Task.json_example()}\n```\n\n"
|
47
|
+
f"No extra explanation needed. ",
|
48
|
+
_validate_json,
|
49
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
50
|
+
)
|
fabricatio/models/task.py
CHANGED
@@ -12,7 +12,7 @@ from pydantic import Field, PrivateAttr
|
|
12
12
|
from fabricatio.config import configs
|
13
13
|
from fabricatio.core import env
|
14
14
|
from fabricatio.journal import logger
|
15
|
-
from fabricatio.models.generic import WithBriefing
|
15
|
+
from fabricatio.models.generic import WithBriefing, WithJsonExample
|
16
16
|
|
17
17
|
|
18
18
|
class TaskStatus(Enum):
|
@@ -25,27 +25,31 @@ class TaskStatus(Enum):
|
|
25
25
|
Cancelled = "cancelled"
|
26
26
|
|
27
27
|
|
28
|
-
class Task[T](WithBriefing):
|
28
|
+
class Task[T](WithBriefing, WithJsonExample):
|
29
29
|
"""Class that represents a task with a status and output.
|
30
30
|
|
31
31
|
Attributes:
|
32
32
|
name (str): The name of the task.
|
33
33
|
description (str): The description of the task.
|
34
34
|
_output (Queue): The output queue of the task.
|
35
|
-
|
35
|
+
_status (TaskStatus): The status of the task.
|
36
36
|
goal (str): The goal of the task.
|
37
37
|
"""
|
38
38
|
|
39
39
|
name: str = Field(...)
|
40
40
|
"""The name of the task."""
|
41
|
+
|
41
42
|
description: str = Field(default="")
|
42
43
|
"""The description of the task."""
|
43
|
-
|
44
|
-
status: TaskStatus = Field(default=TaskStatus.Pending)
|
45
|
-
"""The status of the task."""
|
44
|
+
|
46
45
|
goal: str = Field(default="")
|
47
46
|
"""The goal of the task."""
|
48
47
|
|
48
|
+
_output: Queue = PrivateAttr(default_factory=lambda: Queue(maxsize=1))
|
49
|
+
"""The output queue of the task."""
|
50
|
+
_status: TaskStatus = PrivateAttr(default=TaskStatus.Pending)
|
51
|
+
"""The status of the task."""
|
52
|
+
|
49
53
|
@classmethod
|
50
54
|
def simple_task(cls, name: str, goal: str, description: str) -> Self:
|
51
55
|
"""Create a simple task with a name, goal, and description.
|
@@ -151,7 +155,7 @@ class Task[T](WithBriefing):
|
|
151
155
|
Self: The finished instance of the Task class.
|
152
156
|
"""
|
153
157
|
logger.info(f"Finishing task {self.name}")
|
154
|
-
self.
|
158
|
+
self._status = TaskStatus.Finished
|
155
159
|
await self._output.put(output)
|
156
160
|
logger.debug(f"Output set for task {self.name}")
|
157
161
|
await env.emit_async(self.finished_label, self)
|
@@ -165,7 +169,7 @@ class Task[T](WithBriefing):
|
|
165
169
|
Self: The running instance of the Task class.
|
166
170
|
"""
|
167
171
|
logger.info(f"Starting task {self.name}")
|
168
|
-
self.
|
172
|
+
self._status = TaskStatus.Running
|
169
173
|
await env.emit_async(self.running_label, self)
|
170
174
|
return self
|
171
175
|
|
@@ -175,7 +179,7 @@ class Task[T](WithBriefing):
|
|
175
179
|
Returns:
|
176
180
|
Self: The cancelled instance of the Task class.
|
177
181
|
"""
|
178
|
-
self.
|
182
|
+
self._status = TaskStatus.Cancelled
|
179
183
|
await env.emit_async(self.cancelled_label, self)
|
180
184
|
return self
|
181
185
|
|
@@ -186,7 +190,7 @@ class Task[T](WithBriefing):
|
|
186
190
|
Self: The failed instance of the Task class.
|
187
191
|
"""
|
188
192
|
logger.error(f"Task {self.name} failed")
|
189
|
-
self.
|
193
|
+
self._status = TaskStatus.Failed
|
190
194
|
await env.emit_async(self.failed_label, self)
|
191
195
|
return self
|
192
196
|
|
fabricatio/parser.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
from typing import Any, Self, Tuple
|
2
2
|
|
3
|
-
|
3
|
+
import regex
|
4
|
+
from pydantic import Field, PositiveInt, PrivateAttr
|
4
5
|
from regex import Pattern, compile
|
5
6
|
|
6
7
|
from fabricatio.models.generic import Base
|
@@ -18,6 +19,8 @@ class Capture(Base):
|
|
18
19
|
"""The target groups to capture from the pattern."""
|
19
20
|
pattern: str = Field(frozen=True)
|
20
21
|
"""The regular expression pattern to search for."""
|
22
|
+
flags: PositiveInt = Field(default=regex.DOTALL | regex.MULTILINE | regex.IGNORECASE, frozen=True)
|
23
|
+
"""The flags to use when compiling the regular expression pattern."""
|
21
24
|
_compiled: Pattern = PrivateAttr()
|
22
25
|
|
23
26
|
def model_post_init(self, __context: Any) -> None:
|
@@ -26,7 +29,7 @@ class Capture(Base):
|
|
26
29
|
Args:
|
27
30
|
__context (Any): The context in which the model is initialized.
|
28
31
|
"""
|
29
|
-
self._compiled = compile(self.pattern)
|
32
|
+
self._compiled = compile(self.pattern, self.flags)
|
30
33
|
|
31
34
|
def capture(self, text: str) -> Tuple[str, ...] | None:
|
32
35
|
"""Capture the first occurrence of the pattern in the given text.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fabricatio
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.2
|
4
4
|
Summary: A LLM multi-agent framework.
|
5
5
|
Author-email: Whth <zettainspector@foxmail.com>
|
6
6
|
License: MIT License
|
@@ -40,6 +40,7 @@ Requires-Dist: asyncio>=3.4.3
|
|
40
40
|
Requires-Dist: gitpython>=3.1.44
|
41
41
|
Requires-Dist: litellm>=1.60.0
|
42
42
|
Requires-Dist: loguru>=0.7.3
|
43
|
+
Requires-Dist: orjson>=3.10.15
|
43
44
|
Requires-Dist: pydantic-settings>=2.7.1
|
44
45
|
Requires-Dist: pydantic>=2.10.6
|
45
46
|
Requires-Dist: pymitter>=1.0.0
|
@@ -1,19 +1,19 @@
|
|
1
1
|
fabricatio/__init__.py,sha256=nFPtohqceECRYzU-WlVT6o4oSaKN0vGok-w9JIaiJfs,644
|
2
|
-
fabricatio/config.py,sha256=
|
2
|
+
fabricatio/config.py,sha256=EOlVkuEBAHESAlrGtolGwEG2YrTaJPhEGPKS7QDxrx0,6995
|
3
3
|
fabricatio/core.py,sha256=B6KBIfBRF023HF0UUaUprEkQd6sT7G_pexGXQ9btJnE,5788
|
4
4
|
fabricatio/journal.py,sha256=CW9HePtgTiboOyPTExq9GjG5BseZcbc-S6lxDXrpmv0,667
|
5
|
-
fabricatio/parser.py,sha256=
|
5
|
+
fabricatio/parser.py,sha256=On_YUCvOuA0FA_NtDVNJqKp7KEO_sUE89oO_WnkEhQ4,2314
|
6
6
|
fabricatio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
fabricatio/models/action.py,sha256=M-12dc-nQiNJU6Y9j-dr4Ef3642vRvzHlzxekBepzaU,3358
|
8
8
|
fabricatio/models/events.py,sha256=0p42QmNDzmC76DhMwW1H_Mlg15MQ_XjEqkCJc8UkIB8,2055
|
9
|
-
fabricatio/models/generic.py,sha256=
|
10
|
-
fabricatio/models/role.py,sha256=
|
11
|
-
fabricatio/models/task.py,sha256=
|
9
|
+
fabricatio/models/generic.py,sha256=Sxpx0BO0t85YF5Lwks6F165N6TJsDe7xym28dQG5Mqs,17681
|
10
|
+
fabricatio/models/role.py,sha256=jdabuYRXwgvpYoNwvazygDiZHGGQApUIIKltniu78O8,2151
|
11
|
+
fabricatio/models/task.py,sha256=0oQeGQ6Rvd_x6ZM5ImtcN2vr0ojFmF6EiWBAMOjledI,6865
|
12
12
|
fabricatio/models/tool.py,sha256=UkEp1Nzbl5wZX21q_Z2VkpiJmVDSdoGDzINQniO8hSY,3536
|
13
13
|
fabricatio/models/utils.py,sha256=2mgXla9_K3dnRrz6hIKzmltTYPmvDk0MBjjEBkCXTdg,2474
|
14
14
|
fabricatio/toolboxes/__init__.py,sha256=bjefmPd7wBaWhbZzdMPXvrjMTeRzlUh_Dev2PUAc124,158
|
15
15
|
fabricatio/toolboxes/task.py,sha256=xgyPetm2R_HlQwpzE8YPnBN7QOYLd0-T8E6QPZG1PPQ,204
|
16
|
-
fabricatio-0.1.
|
17
|
-
fabricatio-0.1.
|
18
|
-
fabricatio-0.1.
|
19
|
-
fabricatio-0.1.
|
16
|
+
fabricatio-0.1.2.dist-info/METADATA,sha256=JoHDaag_cV_OOthFdXHGJyBKlJpyUg4C30lBOvAVA_U,3797
|
17
|
+
fabricatio-0.1.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
18
|
+
fabricatio-0.1.2.dist-info/licenses/LICENSE,sha256=do7J7EiCGbq0QPbMAL_FqLYufXpHnCnXBOuqVPwSV8Y,1088
|
19
|
+
fabricatio-0.1.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|