fabricatio 0.2.0.dev14__cp312-cp312-win_amd64.whl → 0.2.0.dev17__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/_rust.cp312-win_amd64.pyd +0 -0
- fabricatio/_rust.pyi +11 -11
- fabricatio/_rust_instances.py +1 -1
- fabricatio/actions/communication.py +2 -0
- fabricatio/actions/transmission.py +2 -0
- fabricatio/config.py +40 -14
- fabricatio/decorators.py +44 -7
- fabricatio/fs/curd.py +110 -0
- fabricatio/fs/readers.py +2 -0
- fabricatio/models/action.py +5 -4
- fabricatio/models/advanced.py +96 -0
- fabricatio/models/events.py +4 -2
- fabricatio/models/generic.py +1 -421
- fabricatio/models/role.py +3 -2
- fabricatio/models/task.py +2 -29
- fabricatio/models/tool.py +65 -49
- fabricatio/models/usages.py +464 -0
- fabricatio/toolboxes/fs.py +14 -0
- fabricatio/toolboxes/task.py +2 -0
- {fabricatio-0.2.0.dev14.data → fabricatio-0.2.0.dev17.data}/scripts/tdown.exe +0 -0
- {fabricatio-0.2.0.dev14.dist-info → fabricatio-0.2.0.dev17.dist-info}/METADATA +6 -1
- fabricatio-0.2.0.dev17.dist-info/RECORD +34 -0
- fabricatio-0.2.0.dev14.dist-info/RECORD +0 -30
- {fabricatio-0.2.0.dev14.dist-info → fabricatio-0.2.0.dev17.dist-info}/WHEEL +0 -0
- {fabricatio-0.2.0.dev14.dist-info → fabricatio-0.2.0.dev17.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,464 @@
|
|
1
|
+
"""This module contains classes that manage the usage of language models and tools in tasks."""
|
2
|
+
|
3
|
+
from typing import Callable, Dict, Iterable, List, NotRequired, Optional, Self, Set, TypedDict, Union, Unpack
|
4
|
+
|
5
|
+
import litellm
|
6
|
+
import orjson
|
7
|
+
from fabricatio._rust_instances import template_manager
|
8
|
+
from fabricatio.config import configs
|
9
|
+
from fabricatio.journal import logger
|
10
|
+
from fabricatio.models.generic import Base, WithBriefing
|
11
|
+
from fabricatio.models.task import Task
|
12
|
+
from fabricatio.models.tool import Tool, ToolBox
|
13
|
+
from fabricatio.models.utils import Messages
|
14
|
+
from fabricatio.parser import JsonCapture
|
15
|
+
from litellm.types.utils import Choices, ModelResponse, StreamingChoices
|
16
|
+
from pydantic import Field, HttpUrl, NonNegativeFloat, NonNegativeInt, PositiveInt, SecretStr
|
17
|
+
|
18
|
+
|
19
|
+
class LLMKwargs(TypedDict):
|
20
|
+
"""A type representing the keyword arguments for the LLM (Large Language Model) usage."""
|
21
|
+
|
22
|
+
model: NotRequired[str]
|
23
|
+
temperature: NotRequired[NonNegativeFloat]
|
24
|
+
stop: NotRequired[str | List[str]]
|
25
|
+
top_p: NotRequired[NonNegativeFloat]
|
26
|
+
max_tokens: NotRequired[PositiveInt]
|
27
|
+
stream: NotRequired[bool]
|
28
|
+
timeout: NotRequired[PositiveInt]
|
29
|
+
max_retries: NotRequired[PositiveInt]
|
30
|
+
|
31
|
+
|
32
|
+
class LLMUsage(Base):
|
33
|
+
"""Class that manages LLM (Large Language Model) usage parameters and methods."""
|
34
|
+
|
35
|
+
llm_api_endpoint: Optional[HttpUrl] = None
|
36
|
+
"""The OpenAI API endpoint."""
|
37
|
+
|
38
|
+
llm_api_key: Optional[SecretStr] = None
|
39
|
+
"""The OpenAI API key."""
|
40
|
+
|
41
|
+
llm_timeout: Optional[PositiveInt] = None
|
42
|
+
"""The timeout of the LLM model."""
|
43
|
+
|
44
|
+
llm_max_retries: Optional[PositiveInt] = None
|
45
|
+
"""The maximum number of retries."""
|
46
|
+
|
47
|
+
llm_model: Optional[str] = None
|
48
|
+
"""The LLM model name."""
|
49
|
+
|
50
|
+
llm_temperature: Optional[NonNegativeFloat] = None
|
51
|
+
"""The temperature of the LLM model."""
|
52
|
+
|
53
|
+
llm_stop_sign: Optional[str | List[str]] = None
|
54
|
+
"""The stop sign of the LLM model."""
|
55
|
+
|
56
|
+
llm_top_p: Optional[NonNegativeFloat] = None
|
57
|
+
"""The top p of the LLM model."""
|
58
|
+
|
59
|
+
llm_generation_count: Optional[PositiveInt] = None
|
60
|
+
"""The number of generations to generate."""
|
61
|
+
|
62
|
+
llm_stream: Optional[bool] = None
|
63
|
+
"""Whether to stream the LLM model's response."""
|
64
|
+
|
65
|
+
llm_max_tokens: Optional[PositiveInt] = None
|
66
|
+
"""The maximum number of tokens to generate."""
|
67
|
+
|
68
|
+
async def aquery(
|
69
|
+
self,
|
70
|
+
messages: List[Dict[str, str]],
|
71
|
+
n: PositiveInt | None = None,
|
72
|
+
**kwargs: Unpack[LLMKwargs],
|
73
|
+
) -> ModelResponse:
|
74
|
+
"""Asynchronously queries the language model to generate a response based on the provided messages and parameters.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
messages (List[Dict[str, str]]): A list of messages, where each message is a dictionary containing the role and content of the message.
|
78
|
+
n (PositiveInt | None): The number of responses to generate. Defaults to the instance's `llm_generation_count` or the global configuration.
|
79
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
80
|
+
|
81
|
+
Returns:
|
82
|
+
ModelResponse: An object containing the generated response and other metadata from the model.
|
83
|
+
"""
|
84
|
+
# Call the underlying asynchronous completion function with the provided and default parameters
|
85
|
+
return await litellm.acompletion(
|
86
|
+
messages=messages,
|
87
|
+
n=n or self.llm_generation_count or configs.llm.generation_count,
|
88
|
+
model=kwargs.get("model") or self.llm_model or configs.llm.model,
|
89
|
+
temperature=kwargs.get("temperature") or self.llm_temperature or configs.llm.temperature,
|
90
|
+
stop=kwargs.get("stop") or self.llm_stop_sign or configs.llm.stop_sign,
|
91
|
+
top_p=kwargs.get("top_p") or self.llm_top_p or configs.llm.top_p,
|
92
|
+
max_tokens=kwargs.get("max_tokens") or self.llm_max_tokens or configs.llm.max_tokens,
|
93
|
+
stream=kwargs.get("stream") or self.llm_stream or configs.llm.stream,
|
94
|
+
timeout=kwargs.get("timeout") or self.llm_timeout or configs.llm.timeout,
|
95
|
+
max_retries=kwargs.get("max_retries") or self.llm_max_retries or configs.llm.max_retries,
|
96
|
+
api_key=self.llm_api_key.get_secret_value() if self.llm_api_key else configs.llm.api_key.get_secret_value(),
|
97
|
+
base_url=self.llm_api_endpoint.unicode_string()
|
98
|
+
if self.llm_api_endpoint
|
99
|
+
else configs.llm.api_endpoint.unicode_string(),
|
100
|
+
)
|
101
|
+
|
102
|
+
async def ainvoke(
|
103
|
+
self,
|
104
|
+
question: str,
|
105
|
+
system_message: str = "",
|
106
|
+
n: PositiveInt | None = None,
|
107
|
+
**kwargs: Unpack[LLMKwargs],
|
108
|
+
) -> List[Choices | StreamingChoices]:
|
109
|
+
"""Asynchronously invokes the language model with a question and optional system message.
|
110
|
+
|
111
|
+
Args:
|
112
|
+
question (str): The question to ask the model.
|
113
|
+
system_message (str): The system message to provide context to the model. Defaults to an empty string.
|
114
|
+
n (PositiveInt | None): The number of responses to generate. Defaults to the instance's `llm_generation_count` or the global configuration.
|
115
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
116
|
+
|
117
|
+
Returns:
|
118
|
+
List[Choices | StreamingChoices]: A list of choices or streaming choices from the model response.
|
119
|
+
"""
|
120
|
+
return (
|
121
|
+
await self.aquery(
|
122
|
+
messages=Messages().add_system_message(system_message).add_user_message(question),
|
123
|
+
n=n,
|
124
|
+
**kwargs,
|
125
|
+
)
|
126
|
+
).choices
|
127
|
+
|
128
|
+
async def aask(
|
129
|
+
self,
|
130
|
+
question: str,
|
131
|
+
system_message: str = "",
|
132
|
+
**kwargs: Unpack[LLMKwargs],
|
133
|
+
) -> str:
|
134
|
+
"""Asynchronously asks the language model a question and returns the response content.
|
135
|
+
|
136
|
+
Args:
|
137
|
+
question (str): The question to ask the model.
|
138
|
+
system_message (str): The system message to provide context to the model. Defaults to an empty string.
|
139
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
str: The content of the model's response message.
|
143
|
+
"""
|
144
|
+
return (
|
145
|
+
(
|
146
|
+
await self.ainvoke(
|
147
|
+
n=1,
|
148
|
+
question=question,
|
149
|
+
system_message=system_message,
|
150
|
+
**kwargs,
|
151
|
+
)
|
152
|
+
)
|
153
|
+
.pop()
|
154
|
+
.message.content
|
155
|
+
)
|
156
|
+
|
157
|
+
async def aask_validate[T](
|
158
|
+
self,
|
159
|
+
question: str,
|
160
|
+
validator: Callable[[str], T | None],
|
161
|
+
max_validations: PositiveInt = 2,
|
162
|
+
system_message: str = "",
|
163
|
+
**kwargs: Unpack[LLMKwargs],
|
164
|
+
) -> T:
|
165
|
+
"""Asynchronously asks a question and validates the response using a given validator.
|
166
|
+
|
167
|
+
Args:
|
168
|
+
question (str): The question to ask.
|
169
|
+
validator (Callable[[str], T | None]): A function to validate the response.
|
170
|
+
max_validations (PositiveInt): Maximum number of validation attempts. Defaults to 2.
|
171
|
+
system_message (str): System message to include in the request. Defaults to an empty string.
|
172
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
173
|
+
|
174
|
+
Returns:
|
175
|
+
T: The validated response.
|
176
|
+
|
177
|
+
Raises:
|
178
|
+
ValueError: If the response fails to validate after the maximum number of attempts.
|
179
|
+
"""
|
180
|
+
for _ in range(max_validations):
|
181
|
+
if (
|
182
|
+
response := await self.aask(
|
183
|
+
question=question,
|
184
|
+
system_message=system_message,
|
185
|
+
**kwargs,
|
186
|
+
)
|
187
|
+
) and (validated := validator(response)):
|
188
|
+
return validated
|
189
|
+
raise ValueError("Failed to validate the response.")
|
190
|
+
|
191
|
+
async def achoose[T: WithBriefing](
|
192
|
+
self,
|
193
|
+
instruction: str,
|
194
|
+
choices: List[T],
|
195
|
+
k: NonNegativeInt = 0,
|
196
|
+
max_validations: PositiveInt = 2,
|
197
|
+
system_message: str = "",
|
198
|
+
**kwargs: Unpack[LLMKwargs],
|
199
|
+
) -> List[T]:
|
200
|
+
"""Asynchronously executes a multi-choice decision-making process, generating a prompt based on the instruction and options, and validates the returned selection results.
|
201
|
+
|
202
|
+
Args:
|
203
|
+
instruction (str): The user-provided instruction/question description.
|
204
|
+
choices (List[T]): A list of candidate options, requiring elements to have `name` and `briefing` fields.
|
205
|
+
k (NonNegativeInt): The number of choices to select, 0 means infinite. Defaults to 0.
|
206
|
+
max_validations (PositiveInt): Maximum number of validation failures, default is 2.
|
207
|
+
system_message (str): Custom system-level prompt, defaults to an empty string.
|
208
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
209
|
+
|
210
|
+
Returns:
|
211
|
+
List[T]: The final validated selection result list, with element types matching the input `choices`.
|
212
|
+
|
213
|
+
Important:
|
214
|
+
- Uses a template engine to generate structured prompts.
|
215
|
+
- Ensures response compliance through JSON parsing and format validation.
|
216
|
+
- Relies on `aask_validate` to implement retry mechanisms with validation.
|
217
|
+
"""
|
218
|
+
prompt = template_manager.render_template(
|
219
|
+
"make_choice",
|
220
|
+
{
|
221
|
+
"instruction": instruction,
|
222
|
+
"options": [m.model_dump(include={"name", "briefing"}) for m in choices],
|
223
|
+
"k": k,
|
224
|
+
},
|
225
|
+
)
|
226
|
+
names = [c.name for c in choices]
|
227
|
+
|
228
|
+
def _validate(response: str) -> List[T] | None:
|
229
|
+
ret = JsonCapture.convert_with(response, orjson.loads)
|
230
|
+
if not isinstance(ret, List) or len(ret) != k:
|
231
|
+
return None
|
232
|
+
if any(n not in names for n in ret):
|
233
|
+
return None
|
234
|
+
return ret
|
235
|
+
|
236
|
+
return await self.aask_validate(
|
237
|
+
question=prompt,
|
238
|
+
validator=_validate,
|
239
|
+
max_validations=max_validations,
|
240
|
+
system_message=system_message,
|
241
|
+
**kwargs,
|
242
|
+
)
|
243
|
+
|
244
|
+
async def ajudge(
|
245
|
+
self,
|
246
|
+
prompt: str,
|
247
|
+
affirm_case: str = "",
|
248
|
+
deny_case: str = "",
|
249
|
+
max_validations: PositiveInt = 2,
|
250
|
+
system_message: str = "",
|
251
|
+
**kwargs: Unpack[LLMKwargs],
|
252
|
+
) -> bool:
|
253
|
+
"""Asynchronously judges a prompt using AI validation.
|
254
|
+
|
255
|
+
Args:
|
256
|
+
prompt (str): The input prompt to be judged.
|
257
|
+
affirm_case (str): The affirmative case for the AI model. Defaults to an empty string.
|
258
|
+
deny_case (str): The negative case for the AI model. Defaults to an empty string.
|
259
|
+
max_validations (PositiveInt): Maximum number of validation attempts. Defaults to 2.
|
260
|
+
system_message (str): System message for the AI model. Defaults to an empty string.
|
261
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
262
|
+
|
263
|
+
Returns:
|
264
|
+
bool: The judgment result (True or False) based on the AI's response.
|
265
|
+
|
266
|
+
Notes:
|
267
|
+
The method uses an internal validator to ensure the response is a boolean value.
|
268
|
+
If the response cannot be converted to a boolean, it will return None.
|
269
|
+
"""
|
270
|
+
|
271
|
+
def _validate(response: str) -> bool | None:
|
272
|
+
ret = JsonCapture.convert_with(response, orjson.loads)
|
273
|
+
if not isinstance(ret, bool):
|
274
|
+
return None
|
275
|
+
return ret
|
276
|
+
|
277
|
+
return await self.aask_validate(
|
278
|
+
question=template_manager.render_template(
|
279
|
+
"make_judgment", {"prompt": prompt, "affirm_case": affirm_case, "deny_case": deny_case}
|
280
|
+
),
|
281
|
+
validator=_validate,
|
282
|
+
max_validations=max_validations,
|
283
|
+
system_message=system_message,
|
284
|
+
**kwargs,
|
285
|
+
)
|
286
|
+
|
287
|
+
def fallback_to(self, other: "LLMUsage") -> Self:
|
288
|
+
"""Fallback to another instance's attribute values if the current instance's attributes are None.
|
289
|
+
|
290
|
+
Args:
|
291
|
+
other (LLMUsage): Another instance from which to copy attribute values.
|
292
|
+
|
293
|
+
Returns:
|
294
|
+
Self: The current instance, allowing for method chaining.
|
295
|
+
"""
|
296
|
+
# Iterate over the attribute names and copy values from 'other' to 'self' where applicable
|
297
|
+
# noinspection PydanticTypeChecker,PyTypeChecker
|
298
|
+
for attr_name in LLMUsage.model_fields:
|
299
|
+
# Copy the attribute value from 'other' to 'self' only if 'self' has None and 'other' has a non-None value
|
300
|
+
if getattr(self, attr_name) is None and (attr := getattr(other, attr_name)) is not None:
|
301
|
+
setattr(self, attr_name, attr)
|
302
|
+
|
303
|
+
# Return the current instance to allow for method chaining
|
304
|
+
return self
|
305
|
+
|
306
|
+
def hold_to(self, others: Union["LLMUsage", Iterable["LLMUsage"]]) -> Self:
|
307
|
+
"""Hold to another instance's attribute values if the current instance's attributes are None.
|
308
|
+
|
309
|
+
Args:
|
310
|
+
others (LLMUsage | Iterable[LLMUsage]): Another instance or iterable of instances from which to copy attribute values.
|
311
|
+
|
312
|
+
Returns:
|
313
|
+
Self: The current instance, allowing for method chaining.
|
314
|
+
"""
|
315
|
+
for other in others:
|
316
|
+
# noinspection PyTypeChecker,PydanticTypeChecker
|
317
|
+
for attr_name in LLMUsage.model_fields:
|
318
|
+
if (attr := getattr(self, attr_name)) is not None and getattr(other, attr_name) is None:
|
319
|
+
setattr(other, attr_name, attr)
|
320
|
+
|
321
|
+
|
322
|
+
class ChooseKwargs(LLMKwargs):
|
323
|
+
"""A type representing the keyword arguments for the choose method."""
|
324
|
+
|
325
|
+
max_validations: NotRequired[PositiveInt]
|
326
|
+
system_message: NotRequired[str]
|
327
|
+
k: NotRequired[NonNegativeInt]
|
328
|
+
|
329
|
+
|
330
|
+
class ToolBoxUsage(LLMUsage):
|
331
|
+
"""A class representing the usage of tools in a task."""
|
332
|
+
|
333
|
+
toolboxes: Set[ToolBox] = Field(default_factory=set)
|
334
|
+
"""A set of toolboxes used by the instance."""
|
335
|
+
|
336
|
+
@property
|
337
|
+
def available_toolbox_names(self) -> List[str]:
|
338
|
+
"""Return a list of available toolbox names."""
|
339
|
+
return [toolbox.name for toolbox in self.toolboxes]
|
340
|
+
|
341
|
+
async def choose_toolboxes(
|
342
|
+
self,
|
343
|
+
task: Task,
|
344
|
+
system_message: str = "",
|
345
|
+
k: NonNegativeInt = 0,
|
346
|
+
max_validations: PositiveInt = 2,
|
347
|
+
**kwargs: Unpack[LLMKwargs],
|
348
|
+
) -> List[ToolBox]:
|
349
|
+
"""Asynchronously executes a multi-choice decision-making process to choose toolboxes.
|
350
|
+
|
351
|
+
Args:
|
352
|
+
task (Task): The task for which to choose toolboxes.
|
353
|
+
system_message (str): Custom system-level prompt, defaults to an empty string.
|
354
|
+
k (NonNegativeInt): The number of toolboxes to select, 0 means infinite. Defaults to 0.
|
355
|
+
max_validations (PositiveInt): Maximum number of validation failures, default is 2.
|
356
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
357
|
+
|
358
|
+
Returns:
|
359
|
+
List[ToolBox]: The selected toolboxes.
|
360
|
+
"""
|
361
|
+
if not self.toolboxes:
|
362
|
+
logger.warning("No toolboxes available.")
|
363
|
+
return []
|
364
|
+
return await self.achoose(
|
365
|
+
instruction=task.briefing,
|
366
|
+
choices=list(self.toolboxes),
|
367
|
+
k=k,
|
368
|
+
max_validations=max_validations,
|
369
|
+
system_message=system_message,
|
370
|
+
**kwargs,
|
371
|
+
)
|
372
|
+
|
373
|
+
async def choose_tools(
|
374
|
+
self,
|
375
|
+
task: Task,
|
376
|
+
toolbox: ToolBox,
|
377
|
+
system_message: str = "",
|
378
|
+
k: NonNegativeInt = 0,
|
379
|
+
max_validations: PositiveInt = 2,
|
380
|
+
**kwargs: Unpack[LLMKwargs],
|
381
|
+
) -> List[Tool]:
|
382
|
+
"""Asynchronously executes a multi-choice decision-making process to choose tools.
|
383
|
+
|
384
|
+
Args:
|
385
|
+
task (Task): The task for which to choose tools.
|
386
|
+
toolbox (ToolBox): The toolbox from which to choose tools.
|
387
|
+
system_message (str): Custom system-level prompt, defaults to an empty string.
|
388
|
+
k (NonNegativeInt): The number of tools to select, 0 means infinite. Defaults to 0.
|
389
|
+
max_validations (PositiveInt): Maximum number of validation failures, default is 2.
|
390
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
391
|
+
|
392
|
+
Returns:
|
393
|
+
List[Tool]: The selected tools.
|
394
|
+
"""
|
395
|
+
if not toolbox.tools:
|
396
|
+
logger.warning(f"No tools available in toolbox {toolbox.name}.")
|
397
|
+
return []
|
398
|
+
return await self.achoose(
|
399
|
+
instruction=task.briefing,
|
400
|
+
choices=toolbox.tools,
|
401
|
+
k=k,
|
402
|
+
max_validations=max_validations,
|
403
|
+
system_message=system_message,
|
404
|
+
**kwargs,
|
405
|
+
)
|
406
|
+
|
407
|
+
async def gather_tools(
|
408
|
+
self,
|
409
|
+
task: Task,
|
410
|
+
box_choose_kwargs: Optional[ChooseKwargs] = None,
|
411
|
+
tool_choose_kwargs: Optional[ChooseKwargs] = None,
|
412
|
+
) -> List[Tool]:
|
413
|
+
"""Asynchronously gathers tools based on the provided task and toolbox and tool selection criteria.
|
414
|
+
|
415
|
+
Args:
|
416
|
+
task (Task): The task for which to gather tools.
|
417
|
+
box_choose_kwargs (Optional[ChooseKwargs]): Keyword arguments for choosing toolboxes, such as `system_message`, `k`, `max_validations`, `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
418
|
+
tool_choose_kwargs (Optional[ChooseKwargs]): Keyword arguments for choosing tools, such as `system_message`, `k`, `max_validations`, `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
419
|
+
|
420
|
+
Returns:
|
421
|
+
List[Tool]: A list of tools gathered based on the provided task and toolbox and tool selection criteria.
|
422
|
+
"""
|
423
|
+
box_choose_kwargs = box_choose_kwargs or {}
|
424
|
+
tool_choose_kwargs = tool_choose_kwargs or {}
|
425
|
+
|
426
|
+
# Choose the toolboxes
|
427
|
+
chosen_toolboxes = await self.choose_toolboxes(task, **box_choose_kwargs)
|
428
|
+
# Choose the tools
|
429
|
+
chosen_tools = []
|
430
|
+
for toolbox in chosen_toolboxes:
|
431
|
+
chosen_tools.extend(await self.choose_tools(task, toolbox, **tool_choose_kwargs))
|
432
|
+
return chosen_tools
|
433
|
+
|
434
|
+
def supply_tools_from[S: "ToolBoxUsage"](self, others: Union[S, Iterable[S]]) -> Self:
|
435
|
+
"""Supplies tools from other ToolUsage instances to this instance.
|
436
|
+
|
437
|
+
Args:
|
438
|
+
others (ToolBoxUsage | Iterable[ToolBoxUsage]): A single ToolUsage instance or an iterable of ToolUsage instances
|
439
|
+
from which to take tools.
|
440
|
+
|
441
|
+
Returns:
|
442
|
+
Self: The current ToolUsage instance with updated tools.
|
443
|
+
"""
|
444
|
+
if isinstance(others, ToolBoxUsage):
|
445
|
+
others = [others]
|
446
|
+
for other in others:
|
447
|
+
self.toolboxes.update(other.toolboxes)
|
448
|
+
return self
|
449
|
+
|
450
|
+
def provide_tools_to[S: "ToolBoxUsage"](self, others: Union[S, Iterable[S]]) -> Self:
|
451
|
+
"""Provides tools from this instance to other ToolUsage instances.
|
452
|
+
|
453
|
+
Args:
|
454
|
+
others (ToolBoxUsage | Iterable[ToolBoxUsage]): A single ToolUsage instance or an iterable of ToolUsage instances
|
455
|
+
to which to provide tools.
|
456
|
+
|
457
|
+
Returns:
|
458
|
+
Self: The current ToolUsage instance.
|
459
|
+
"""
|
460
|
+
if isinstance(others, ToolBoxUsage):
|
461
|
+
others = [others]
|
462
|
+
for other in others:
|
463
|
+
other.toolboxes.update(self.toolboxes)
|
464
|
+
return self
|
@@ -0,0 +1,14 @@
|
|
1
|
+
"""File system tool box."""
|
2
|
+
|
3
|
+
from fabricatio.fs.curd import copy_file, create_directory, delete_directory, delete_file, move_file, tree
|
4
|
+
from fabricatio.models.tool import ToolBox
|
5
|
+
|
6
|
+
fs_toolbox = (
|
7
|
+
ToolBox(name="FsToolBox", description="A toolbox for file system operations.")
|
8
|
+
.add_tool(copy_file)
|
9
|
+
.add_tool(move_file)
|
10
|
+
.add_tool(delete_file)
|
11
|
+
.add_tool(tree)
|
12
|
+
.add_tool(delete_directory)
|
13
|
+
.add_tool(create_directory)
|
14
|
+
)
|
fabricatio/toolboxes/task.py
CHANGED
Binary file
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fabricatio
|
3
|
-
Version: 0.2.0.
|
3
|
+
Version: 0.2.0.dev17
|
4
4
|
Classifier: License :: OSI Approved :: MIT License
|
5
5
|
Classifier: Programming Language :: Rust
|
6
6
|
Classifier: Programming Language :: Python :: 3.12
|
@@ -22,6 +22,11 @@ Requires-Dist: pymitter>=1.0.0
|
|
22
22
|
Requires-Dist: questionary>=2.1.0
|
23
23
|
Requires-Dist: regex>=2024.11.6
|
24
24
|
Requires-Dist: rich>=13.9.4
|
25
|
+
Requires-Dist: faiss-cpu>=1.10.0 ; extra == 'rag'
|
26
|
+
Requires-Dist: pymilvus>=2.5.4 ; extra == 'rag'
|
27
|
+
Requires-Dist: fabricatio[rag] ; extra == 'full'
|
28
|
+
Provides-Extra: rag
|
29
|
+
Provides-Extra: full
|
25
30
|
License-File: LICENSE
|
26
31
|
Summary: A LLM multi-agent framework.
|
27
32
|
Keywords: ai,agents,multi-agent,llm,pyo3
|
@@ -0,0 +1,34 @@
|
|
1
|
+
fabricatio-0.2.0.dev17.dist-info/METADATA,sha256=1I6ycXWgI6ZRXlRrLYSubYm8tSkbfG8dqHYcHgCYIoc,6386
|
2
|
+
fabricatio-0.2.0.dev17.dist-info/WHEEL,sha256=tpW5AN9B-9qsM9WW2FXG2r193YXiqexDadpKp0A2daI,96
|
3
|
+
fabricatio-0.2.0.dev17.dist-info/licenses/LICENSE,sha256=do7J7EiCGbq0QPbMAL_FqLYufXpHnCnXBOuqVPwSV8Y,1088
|
4
|
+
fabricatio/actions/communication.py,sha256=wyc1DfhVwrwadhYG_auI9ykGXM0fwVYNwxpp91sSfiU,484
|
5
|
+
fabricatio/actions/transmission.py,sha256=fU-xL8fDG3oRDD9x7Q94OU2Sb9G6xQfrj5IMlMYsgiM,1240
|
6
|
+
fabricatio/actions/__init__.py,sha256=eFmFVPQvtNgFynIXBVr3eP-vWQDWCPng60YY5LXvZgg,115
|
7
|
+
fabricatio/config.py,sha256=qT_ArV1m5CVF6No-WaUmB-Jgg_tAZ1e50A4_43x5DgE,8802
|
8
|
+
fabricatio/core.py,sha256=yQK2ZrbPYDJOaNDp0Bky3muTkB-ZaQ1ld_Qfflm2dY0,5938
|
9
|
+
fabricatio/decorators.py,sha256=XCnFmBoQMBB2rrJe58Kg8UP3lAyWwH9cGL7myh4FJNA,2630
|
10
|
+
fabricatio/fs/curd.py,sha256=DulsGzat7jcR08WpP8yS1-VcDXEaaBeqnO_IZwn5kOY,3423
|
11
|
+
fabricatio/fs/readers.py,sha256=lwAoOGafusBWyTZeN11oXJc224nPZrfafVHMmGSajkY,167
|
12
|
+
fabricatio/fs/__init__.py,sha256=lWcKYg0v3mv2LnnSegOQaTtlVDODU0vtw_s6iKU5IqQ,122
|
13
|
+
fabricatio/journal.py,sha256=z5K5waad9xmGr1hGrqSgFDRH3wiDQ5Oqfe0o98DaM-k,707
|
14
|
+
fabricatio/models/action.py,sha256=YbfCZc5pWzfSsOATMaoPNwQH8uR7AOdjzqo289XVnrE,5301
|
15
|
+
fabricatio/models/advanced.py,sha256=tJ93mrQiuIAD5lJMwEcQP1IXY2L2ihUTacyVmVaq9lA,3952
|
16
|
+
fabricatio/models/events.py,sha256=p9uXXQNWmDcDvGuiCzeVudMC1cdwB0zSdf5FX8mEBnY,2704
|
17
|
+
fabricatio/models/generic.py,sha256=bxH0TidKhDN8n8LOCHjPx3ViIclztHGao1N6YM1IGuQ,3425
|
18
|
+
fabricatio/models/role.py,sha256=O0hMOGpSXFLPx2A5XhRis7BtPebG-EgT9tY9P5-8p4Y,1417
|
19
|
+
fabricatio/models/task.py,sha256=vVOGwnT-oaIIgjemEa0riMMz_CpWdCasie2NSIRvTKM,8188
|
20
|
+
fabricatio/models/tool.py,sha256=pOBjvOsMF6bX9cPy1mnaljzWnWE31rV1gPtTQ0ZPwxc,6389
|
21
|
+
fabricatio/models/usages.py,sha256=PHoXqRzu7ItF0Wts4xzgN8Ek_BVTqAHOcQZXxVmU_3s,20567
|
22
|
+
fabricatio/models/utils.py,sha256=i_kpcQpct04mQFk1nbcVGV-pl1YThWu4Qk3wbewzKkc,2535
|
23
|
+
fabricatio/parser.py,sha256=foEhrO_e-hhRhmABcttwdMyciyJx422MpNqCZOUx8bg,3278
|
24
|
+
fabricatio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
|
+
fabricatio/toolboxes/arithmetic.py,sha256=WLqhY-Pikv11Y_0SGajwZx3WhsLNpHKf9drzAqOf_nY,1369
|
26
|
+
fabricatio/toolboxes/fs.py,sha256=q5weqzAPv4RddC73blEeYYMTeXoIZjHuK55Us1yVlj0,455
|
27
|
+
fabricatio/toolboxes/task.py,sha256=kU4a501awIDV7GwNDuSlK3_Ym-5OhCp5sS-insTmUmQ,269
|
28
|
+
fabricatio/toolboxes/__init__.py,sha256=xWObAUPasJjBw0pY9V_XTA9fTavdQkCZSYi7-CTfOqs,415
|
29
|
+
fabricatio/_rust.pyi,sha256=rZd_143JcGrLtRF-r5H1rErp8jQxH74n8ItA5FrohT0,1639
|
30
|
+
fabricatio/_rust_instances.py,sha256=P5yKVZ9M9CC4ryI1G2rq4mHL96iu6DW_85RMTX8onpA,211
|
31
|
+
fabricatio/__init__.py,sha256=C9r6OVyMBb8IqwERNUq8lKDLe4BqN7fiu-O4TsXZ5xU,913
|
32
|
+
fabricatio/_rust.cp312-win_amd64.pyd,sha256=BaHm-8-2oT5dASWOXTsJxN17dnc3A2rRTh7RKILO8Cs,1132032
|
33
|
+
fabricatio-0.2.0.dev17.data/scripts/tdown.exe,sha256=HsrHzB4dL8S5qCvg98dfRruvwEtwnUkPcmZcdX-efFw,3383808
|
34
|
+
fabricatio-0.2.0.dev17.dist-info/RECORD,,
|
@@ -1,30 +0,0 @@
|
|
1
|
-
fabricatio-0.2.0.dev14.dist-info/METADATA,sha256=50vUIUZqVOHUgnmW6aLwpKvDtogRa_luXfgLpNMKcB8,6198
|
2
|
-
fabricatio-0.2.0.dev14.dist-info/WHEEL,sha256=tpW5AN9B-9qsM9WW2FXG2r193YXiqexDadpKp0A2daI,96
|
3
|
-
fabricatio-0.2.0.dev14.dist-info/licenses/LICENSE,sha256=do7J7EiCGbq0QPbMAL_FqLYufXpHnCnXBOuqVPwSV8Y,1088
|
4
|
-
fabricatio/actions/communication.py,sha256=tmsr3H_w-V-b2WxLEyWByGuwSCLgHIHTdHYAgHrdUxc,425
|
5
|
-
fabricatio/actions/transmission.py,sha256=PedZ6XsflKdT5ikzaqWr_6h8jci0kekAHfwygzKBUns,1188
|
6
|
-
fabricatio/actions/__init__.py,sha256=eFmFVPQvtNgFynIXBVr3eP-vWQDWCPng60YY5LXvZgg,115
|
7
|
-
fabricatio/config.py,sha256=wArRP1n3QIRwGjZOgAezdYwMYqhsrxz3D_biXAZjB28,8057
|
8
|
-
fabricatio/core.py,sha256=yQK2ZrbPYDJOaNDp0Bky3muTkB-ZaQ1ld_Qfflm2dY0,5938
|
9
|
-
fabricatio/decorators.py,sha256=0b8UeW6V1X6EYRgnvFlWymYOSc4nZBQNVly64JucKeY,1204
|
10
|
-
fabricatio/fs/readers.py,sha256=mw0VUH3P7Wk0SMlcQm2yOfjEz5C3mQ_kjduAjecaxgY,123
|
11
|
-
fabricatio/fs/__init__.py,sha256=lWcKYg0v3mv2LnnSegOQaTtlVDODU0vtw_s6iKU5IqQ,122
|
12
|
-
fabricatio/journal.py,sha256=z5K5waad9xmGr1hGrqSgFDRH3wiDQ5Oqfe0o98DaM-k,707
|
13
|
-
fabricatio/models/action.py,sha256=u_y89TDnP8G3g4rDuR72_a-6Ewe7_DWSywzcKeDr3vw,5280
|
14
|
-
fabricatio/models/events.py,sha256=DDdcexweKV7jmPLHx51PIQ6eIByRrFyAMyx2VMhl9JY,2650
|
15
|
-
fabricatio/models/generic.py,sha256=s2xz8yOlfSMnD2R9iRIe2xl12JNJXILBDfFZWzhl-xc,22932
|
16
|
-
fabricatio/models/role.py,sha256=9DorKR1zqIn4z1HrBJIh9P8Igr-cuzh1vwcSDswfTHA,1376
|
17
|
-
fabricatio/models/task.py,sha256=rw4RCvziUDRixE5N5miZolhYl6mvfV9dGgDxrFbJZS0,9345
|
18
|
-
fabricatio/models/tool.py,sha256=MXjYLCsGfpjJ74oorm1OAMMFwVbP5SO09CTbxWERRjc,5504
|
19
|
-
fabricatio/models/utils.py,sha256=i_kpcQpct04mQFk1nbcVGV-pl1YThWu4Qk3wbewzKkc,2535
|
20
|
-
fabricatio/parser.py,sha256=foEhrO_e-hhRhmABcttwdMyciyJx422MpNqCZOUx8bg,3278
|
21
|
-
fabricatio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
|
-
fabricatio/toolboxes/arithmetic.py,sha256=WLqhY-Pikv11Y_0SGajwZx3WhsLNpHKf9drzAqOf_nY,1369
|
23
|
-
fabricatio/toolboxes/task.py,sha256=WsMB5fSfftl975pTRXLEcnItGrmwdQfVX0Y24UyIsxs,205
|
24
|
-
fabricatio/toolboxes/__init__.py,sha256=xWObAUPasJjBw0pY9V_XTA9fTavdQkCZSYi7-CTfOqs,415
|
25
|
-
fabricatio/_rust.pyi,sha256=IHNv9SHdjve24PBWhdRGCqWYdo2tSAkxYR9CddHhzX8,1540
|
26
|
-
fabricatio/_rust_instances.py,sha256=PJC8TAkcH9f1Ak-0CKxSom_HTFHQEXZ8-aSDrYw7dwI,157
|
27
|
-
fabricatio/__init__.py,sha256=C9r6OVyMBb8IqwERNUq8lKDLe4BqN7fiu-O4TsXZ5xU,913
|
28
|
-
fabricatio/_rust.cp312-win_amd64.pyd,sha256=wDtiezTlOmzEW_nku7u68uLZW_bePrRznTQ10SNmgcM,1125888
|
29
|
-
fabricatio-0.2.0.dev14.data/scripts/tdown.exe,sha256=E4VwoZp3dpyJt1t4ly-4Oi0RUT6wlqlbqE4VvKVKnQg,3383296
|
30
|
-
fabricatio-0.2.0.dev14.dist-info/RECORD,,
|
File without changes
|
File without changes
|