fabricatio 0.2.1.dev3__cp312-cp312-win_amd64.whl → 0.2.2__cp312-cp312-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fabricatio/_rust.cp312-win_amd64.pyd +0 -0
- fabricatio/capabilities/rating.py +355 -0
- fabricatio/capabilities/task.py +131 -0
- fabricatio/config.py +11 -2
- fabricatio/models/action.py +2 -1
- fabricatio/models/kwargs_types.py +7 -2
- fabricatio/models/role.py +2 -1
- fabricatio/models/task.py +5 -5
- fabricatio/models/usages.py +41 -45
- {fabricatio-0.2.1.dev3.data → fabricatio-0.2.2.data}/scripts/tdown.exe +0 -0
- {fabricatio-0.2.1.dev3.dist-info → fabricatio-0.2.2.dist-info}/METADATA +2 -2
- {fabricatio-0.2.1.dev3.dist-info → fabricatio-0.2.2.dist-info}/RECORD +14 -13
- fabricatio/models/advanced.py +0 -289
- {fabricatio-0.2.1.dev3.dist-info → fabricatio-0.2.2.dist-info}/WHEEL +0 -0
- {fabricatio-0.2.1.dev3.dist-info → fabricatio-0.2.2.dist-info}/licenses/LICENSE +0 -0
Binary file
|
@@ -0,0 +1,355 @@
|
|
1
|
+
"""A module that provides functionality to rate tasks based on a rating manual and score range."""
|
2
|
+
|
3
|
+
from asyncio import gather
|
4
|
+
from itertools import permutations
|
5
|
+
from typing import Dict, List, Set, Tuple, Union, Unpack, overload
|
6
|
+
|
7
|
+
import orjson
|
8
|
+
from fabricatio._rust_instances import template_manager
|
9
|
+
from fabricatio.config import configs
|
10
|
+
from fabricatio.journal import logger
|
11
|
+
from fabricatio.models.generic import WithBriefing
|
12
|
+
from fabricatio.models.kwargs_types import GenerateKwargs, ValidateKwargs
|
13
|
+
from fabricatio.models.usages import LLMUsage
|
14
|
+
from fabricatio.parser import JsonCapture
|
15
|
+
from more_itertools import flatten, windowed
|
16
|
+
from pydantic import NonNegativeInt, PositiveInt
|
17
|
+
|
18
|
+
|
19
|
+
class GiveRating(WithBriefing, LLMUsage):
|
20
|
+
"""A class that provides functionality to rate tasks based on a rating manual and score range."""
|
21
|
+
|
22
|
+
async def rate_fine_grind(
|
23
|
+
self,
|
24
|
+
to_rate: str,
|
25
|
+
rating_manual: Dict[str, str],
|
26
|
+
score_range: Tuple[float, float],
|
27
|
+
**kwargs: Unpack[ValidateKwargs],
|
28
|
+
) -> Dict[str, float]:
|
29
|
+
"""Rate a given string based on a rating manual and score range.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
to_rate (str): The string to be rated.
|
33
|
+
rating_manual (Dict[str, str]): A dictionary containing the rating criteria.
|
34
|
+
score_range (Tuple[float, float]): A tuple representing the valid score range.
|
35
|
+
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
36
|
+
|
37
|
+
Returns:
|
38
|
+
Dict[str, float]: A dictionary with the ratings for each dimension.
|
39
|
+
"""
|
40
|
+
|
41
|
+
def _validator(response: str) -> Dict[str, float] | None:
|
42
|
+
if (
|
43
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
44
|
+
and isinstance(json_data, dict)
|
45
|
+
and json_data.keys() == rating_manual.keys()
|
46
|
+
and all(isinstance(v, float) for v in json_data.values())
|
47
|
+
and all(score_range[0] <= v <= score_range[1] for v in json_data.values())
|
48
|
+
):
|
49
|
+
return json_data
|
50
|
+
return None
|
51
|
+
|
52
|
+
logger.info(f"Rating for {to_rate}")
|
53
|
+
return await self.aask_validate(
|
54
|
+
question=(
|
55
|
+
template_manager.render_template(
|
56
|
+
configs.templates.rate_fine_grind_template,
|
57
|
+
{
|
58
|
+
"to_rate": to_rate,
|
59
|
+
"min_score": score_range[0],
|
60
|
+
"max_score": score_range[1],
|
61
|
+
"rating_manual": rating_manual,
|
62
|
+
},
|
63
|
+
)
|
64
|
+
),
|
65
|
+
validator=_validator,
|
66
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
67
|
+
**kwargs,
|
68
|
+
)
|
69
|
+
|
70
|
+
@overload
|
71
|
+
async def rate(
|
72
|
+
self,
|
73
|
+
to_rate: str,
|
74
|
+
topic: str,
|
75
|
+
criteria: Set[str],
|
76
|
+
score_range: Tuple[float, float] = (0.0, 1.0),
|
77
|
+
**kwargs: Unpack[ValidateKwargs],
|
78
|
+
) -> Dict[str, float]: ...
|
79
|
+
|
80
|
+
@overload
|
81
|
+
async def rate(
|
82
|
+
self,
|
83
|
+
to_rate: List[str],
|
84
|
+
topic: str,
|
85
|
+
criteria: Set[str],
|
86
|
+
score_range: Tuple[float, float] = (0.0, 1.0),
|
87
|
+
**kwargs: Unpack[ValidateKwargs],
|
88
|
+
) -> List[Dict[str, float]]: ...
|
89
|
+
|
90
|
+
async def rate(
|
91
|
+
self,
|
92
|
+
to_rate: Union[str, List[str]],
|
93
|
+
topic: str,
|
94
|
+
criteria: Set[str],
|
95
|
+
score_range: Tuple[float, float] = (0.0, 1.0),
|
96
|
+
**kwargs: Unpack[ValidateKwargs],
|
97
|
+
) -> Union[Dict[str, float], List[Dict[str, float]]]:
|
98
|
+
"""Rate a given string or a sequence of strings based on a topic, criteria, and score range.
|
99
|
+
|
100
|
+
Args:
|
101
|
+
to_rate (Union[str, List[str]]): The string or sequence of strings to be rated.
|
102
|
+
topic (str): The topic related to the task.
|
103
|
+
criteria (Set[str]): A set of criteria for rating.
|
104
|
+
score_range (Tuple[float, float], optional): A tuple representing the valid score range. Defaults to (0.0, 1.0).
|
105
|
+
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
106
|
+
|
107
|
+
Returns:
|
108
|
+
Union[Dict[str, float], List[Dict[str, float]]]: A dictionary with the ratings for each criterion if a single string is provided,
|
109
|
+
or a list of dictionaries with the ratings for each criterion if a sequence of strings is provided.
|
110
|
+
"""
|
111
|
+
manual = await self.draft_rating_manual(topic, criteria, **kwargs)
|
112
|
+
if isinstance(to_rate, str):
|
113
|
+
return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
|
114
|
+
if isinstance(to_rate, list):
|
115
|
+
return await gather(*[self.rate_fine_grind(item, manual, score_range, **kwargs) for item in to_rate])
|
116
|
+
raise ValueError("to_rate must be a string or a list of strings")
|
117
|
+
|
118
|
+
async def draft_rating_manual(
|
119
|
+
self, topic: str, criteria: Set[str], **kwargs: Unpack[ValidateKwargs]
|
120
|
+
) -> Dict[str, str]:
|
121
|
+
"""Drafts a rating manual based on a topic and dimensions.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
topic (str): The topic for the rating manual.
|
125
|
+
criteria (Set[str]): A set of dimensions for the rating manual.
|
126
|
+
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
Dict[str, str]: A dictionary representing the drafted rating manual.
|
130
|
+
"""
|
131
|
+
|
132
|
+
def _validator(response: str) -> Dict[str, str] | None:
|
133
|
+
if (
|
134
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
135
|
+
and isinstance(json_data, dict)
|
136
|
+
and json_data.keys() == criteria
|
137
|
+
and all(isinstance(v, str) for v in json_data.values())
|
138
|
+
):
|
139
|
+
return json_data
|
140
|
+
return None
|
141
|
+
|
142
|
+
return await self.aask_validate(
|
143
|
+
question=(
|
144
|
+
template_manager.render_template(
|
145
|
+
configs.templates.draft_rating_manual_template,
|
146
|
+
{
|
147
|
+
"topic": topic,
|
148
|
+
"criteria": criteria,
|
149
|
+
},
|
150
|
+
)
|
151
|
+
),
|
152
|
+
validator=_validator,
|
153
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
154
|
+
**kwargs,
|
155
|
+
)
|
156
|
+
|
157
|
+
async def draft_rating_criteria(
|
158
|
+
self,
|
159
|
+
topic: str,
|
160
|
+
criteria_count: NonNegativeInt = 0,
|
161
|
+
**kwargs: Unpack[ValidateKwargs],
|
162
|
+
) -> Set[str]:
|
163
|
+
"""Drafts rating dimensions based on a topic.
|
164
|
+
|
165
|
+
Args:
|
166
|
+
topic (str): The topic for the rating dimensions.
|
167
|
+
criteria_count (NonNegativeInt, optional): The number of dimensions to draft, 0 means no limit. Defaults to 0.
|
168
|
+
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
169
|
+
|
170
|
+
Returns:
|
171
|
+
Set[str]: A set of rating dimensions.
|
172
|
+
"""
|
173
|
+
|
174
|
+
def _validator(response: str) -> Set[str] | None:
|
175
|
+
if (
|
176
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
177
|
+
and isinstance(json_data, list)
|
178
|
+
and all(isinstance(v, str) for v in json_data)
|
179
|
+
and (criteria_count == 0 or len(json_data) == criteria_count)
|
180
|
+
):
|
181
|
+
return set(json_data)
|
182
|
+
return None
|
183
|
+
|
184
|
+
return await self.aask_validate(
|
185
|
+
question=(
|
186
|
+
template_manager.render_template(
|
187
|
+
configs.templates.draft_rating_criteria_template,
|
188
|
+
{
|
189
|
+
"topic": topic,
|
190
|
+
"criteria_count": criteria_count,
|
191
|
+
},
|
192
|
+
)
|
193
|
+
),
|
194
|
+
validator=_validator,
|
195
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
196
|
+
**kwargs,
|
197
|
+
)
|
198
|
+
|
199
|
+
async def draft_rating_criteria_from_examples(
|
200
|
+
self,
|
201
|
+
topic: str,
|
202
|
+
examples: List[str],
|
203
|
+
reasons_count: PositiveInt = 2,
|
204
|
+
criteria_count: PositiveInt = 5,
|
205
|
+
**kwargs: Unpack[ValidateKwargs],
|
206
|
+
) -> Set[str]:
|
207
|
+
"""Asynchronously drafts a set of rating criteria based on provided examples.
|
208
|
+
|
209
|
+
This function generates rating criteria by analyzing examples and extracting reasons for comparison,
|
210
|
+
then further condensing these reasons into a specified number of criteria.
|
211
|
+
|
212
|
+
Parameters:
|
213
|
+
topic (str): The subject topic for the rating criteria.
|
214
|
+
examples (List[str]): A list of example texts to analyze.
|
215
|
+
reasons_count (PositiveInt, optional): The number of reasons to extract from each pair of examples. Defaults to 2.
|
216
|
+
criteria_count (PositiveInt, optional): The final number of rating criteria to draft. Defaults to 5.
|
217
|
+
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for validation.
|
218
|
+
|
219
|
+
Returns:
|
220
|
+
Set[str]: A set of drafted rating criteria.
|
221
|
+
"""
|
222
|
+
|
223
|
+
def _reasons_validator(response: str) -> List[str] | None:
|
224
|
+
if (
|
225
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
226
|
+
and isinstance(json_data, list)
|
227
|
+
and all(isinstance(v, str) for v in json_data)
|
228
|
+
and len(json_data) == reasons_count
|
229
|
+
):
|
230
|
+
return json_data
|
231
|
+
return None
|
232
|
+
|
233
|
+
def _criteria_validator(response: str) -> Set[str] | None:
|
234
|
+
if (
|
235
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
236
|
+
and isinstance(json_data, list)
|
237
|
+
and all(isinstance(v, str) for v in json_data)
|
238
|
+
and len(json_data) == criteria_count
|
239
|
+
):
|
240
|
+
return set(json_data)
|
241
|
+
return None
|
242
|
+
|
243
|
+
kwargs = GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs)
|
244
|
+
# extract reasons from the comparison of ordered pairs of extracted from examples
|
245
|
+
reasons = flatten(
|
246
|
+
await self.aask_validate_batch(
|
247
|
+
questions=[
|
248
|
+
template_manager.render_template(
|
249
|
+
configs.templates.extract_reasons_from_examples_template,
|
250
|
+
{
|
251
|
+
"topic": topic,
|
252
|
+
"first": pair[0],
|
253
|
+
"second": pair[1],
|
254
|
+
"reasons_count": reasons_count,
|
255
|
+
},
|
256
|
+
)
|
257
|
+
for pair in (permutations(examples, 2))
|
258
|
+
],
|
259
|
+
validator=_reasons_validator,
|
260
|
+
**kwargs,
|
261
|
+
)
|
262
|
+
)
|
263
|
+
# extract certain mount of criteria from reasons according to their importance and frequency
|
264
|
+
return await self.aask_validate(
|
265
|
+
question=(
|
266
|
+
template_manager.render_template(
|
267
|
+
configs.templates.extract_criteria_from_reasons_template,
|
268
|
+
{
|
269
|
+
"topic": topic,
|
270
|
+
"reasons": list(reasons),
|
271
|
+
"criteria_count": criteria_count,
|
272
|
+
},
|
273
|
+
)
|
274
|
+
),
|
275
|
+
validator=_criteria_validator,
|
276
|
+
**kwargs,
|
277
|
+
)
|
278
|
+
|
279
|
+
async def drafting_rating_weights_klee(
|
280
|
+
self,
|
281
|
+
topic: str,
|
282
|
+
criteria: Set[str],
|
283
|
+
**kwargs: Unpack[ValidateKwargs],
|
284
|
+
) -> Dict[str, float]:
|
285
|
+
"""Drafts rating weights for a given topic and criteria using the Klee method.
|
286
|
+
|
287
|
+
Args:
|
288
|
+
topic (str): The topic for the rating weights.
|
289
|
+
criteria (Set[str]): A set of criteria for the rating weights.
|
290
|
+
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
291
|
+
|
292
|
+
Returns:
|
293
|
+
Dict[str, float]: A dictionary representing the drafted rating weights for each criterion.
|
294
|
+
"""
|
295
|
+
if len(criteria) < 2: # noqa: PLR2004
|
296
|
+
raise ValueError("At least two criteria are required to draft rating weights")
|
297
|
+
|
298
|
+
def _validator(resp: str) -> float | None:
|
299
|
+
if (cap := JsonCapture.convert_with(resp, orjson.loads)) is not None and isinstance(cap, float):
|
300
|
+
return cap
|
301
|
+
return None
|
302
|
+
|
303
|
+
criteria = list(criteria) # freeze the order
|
304
|
+
windows = windowed(criteria, 2)
|
305
|
+
|
306
|
+
# get the importance multiplier indicating how important is second criterion compared to the first one
|
307
|
+
relative_weights = await self.aask_validate_batch(
|
308
|
+
questions=[
|
309
|
+
template_manager.render_template(
|
310
|
+
configs.templates.draft_rating_weights_klee_template,
|
311
|
+
{
|
312
|
+
"topic": topic,
|
313
|
+
"first": pair[0],
|
314
|
+
"second": pair[1],
|
315
|
+
},
|
316
|
+
)
|
317
|
+
for pair in windows
|
318
|
+
],
|
319
|
+
validator=_validator,
|
320
|
+
**GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs),
|
321
|
+
)
|
322
|
+
weights = [1]
|
323
|
+
for rw in relative_weights:
|
324
|
+
weights.append(weights[-1] * rw)
|
325
|
+
total = sum(weights)
|
326
|
+
return dict(zip(criteria, [w / total for w in weights], strict=True))
|
327
|
+
|
328
|
+
async def composite_score(
|
329
|
+
self,
|
330
|
+
topic: str,
|
331
|
+
to_rate: List[str],
|
332
|
+
reasons_count: PositiveInt = 2,
|
333
|
+
criteria_count: PositiveInt = 5,
|
334
|
+
**kwargs: Unpack[ValidateKwargs],
|
335
|
+
) -> List[float]:
|
336
|
+
"""Calculates the composite scores for a list of items based on a given topic and criteria.
|
337
|
+
|
338
|
+
Args:
|
339
|
+
topic (str): The topic for the rating.
|
340
|
+
to_rate (List[str]): A list of strings to be rated.
|
341
|
+
reasons_count (PositiveInt, optional): The number of reasons to extract from each pair of examples. Defaults to 2.
|
342
|
+
criteria_count (PositiveInt, optional): The number of criteria to draft. Defaults to 5.
|
343
|
+
**kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
|
344
|
+
|
345
|
+
Returns:
|
346
|
+
List[float]: A list of composite scores for the items.
|
347
|
+
"""
|
348
|
+
criteria = await self.draft_rating_criteria_from_examples(
|
349
|
+
topic, to_rate, reasons_count, criteria_count, **kwargs
|
350
|
+
)
|
351
|
+
weights = await self.drafting_rating_weights_klee(topic, criteria, **kwargs)
|
352
|
+
logger.info(f"Criteria: {criteria}\nWeights: {weights}")
|
353
|
+
ratings_seq = await self.rate(to_rate, topic, criteria, **kwargs)
|
354
|
+
|
355
|
+
return [sum(ratings[c] * weights[c] for c in criteria) for ratings in ratings_seq]
|
@@ -0,0 +1,131 @@
|
|
1
|
+
"""A module for the task capabilities of the Fabricatio library."""
|
2
|
+
|
3
|
+
from types import CodeType
|
4
|
+
from typing import Any, Dict, List, Optional, Tuple, Unpack
|
5
|
+
|
6
|
+
import orjson
|
7
|
+
from fabricatio._rust_instances import template_manager
|
8
|
+
from fabricatio.config import configs
|
9
|
+
from fabricatio.models.generic import WithBriefing
|
10
|
+
from fabricatio.models.kwargs_types import ChooseKwargs, ValidateKwargs
|
11
|
+
from fabricatio.models.task import Task
|
12
|
+
from fabricatio.models.tool import Tool, ToolExecutor
|
13
|
+
from fabricatio.models.usages import LLMUsage, ToolBoxUsage
|
14
|
+
from fabricatio.parser import JsonCapture, PythonCapture
|
15
|
+
from loguru import logger
|
16
|
+
from pydantic import ValidationError
|
17
|
+
|
18
|
+
|
19
|
+
class ProposeTask(WithBriefing, LLMUsage):
|
20
|
+
"""A class that proposes a task based on a prompt."""
|
21
|
+
|
22
|
+
async def propose[T](
|
23
|
+
self,
|
24
|
+
prompt: str,
|
25
|
+
**kwargs: Unpack[ValidateKwargs],
|
26
|
+
) -> Task[T]:
|
27
|
+
"""Asynchronously proposes a task based on a given prompt and parameters.
|
28
|
+
|
29
|
+
Parameters:
|
30
|
+
prompt: The prompt text for proposing a task, which is a string that must be provided.
|
31
|
+
**kwargs: The keyword arguments for the LLM (Large Language Model) usage.
|
32
|
+
|
33
|
+
Returns:
|
34
|
+
A Task object based on the proposal result.
|
35
|
+
"""
|
36
|
+
if not prompt:
|
37
|
+
err = f"{self.name}: Prompt must be provided."
|
38
|
+
logger.error(err)
|
39
|
+
raise ValueError(err)
|
40
|
+
|
41
|
+
def _validate_json(response: str) -> None | Task:
|
42
|
+
try:
|
43
|
+
cap = JsonCapture.capture(response)
|
44
|
+
logger.debug(f"Response: \n{response}")
|
45
|
+
logger.info(f"Captured JSON: \n{cap}")
|
46
|
+
return Task.model_validate_json(cap)
|
47
|
+
except ValidationError as e:
|
48
|
+
logger.error(f"Failed to parse task from JSON: {e}")
|
49
|
+
return None
|
50
|
+
|
51
|
+
template_data = {"prompt": prompt, "json_example": Task.json_example()}
|
52
|
+
return await self.aask_validate(
|
53
|
+
question=template_manager.render_template(configs.templates.propose_task_template, template_data),
|
54
|
+
validator=_validate_json,
|
55
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
56
|
+
**kwargs,
|
57
|
+
)
|
58
|
+
|
59
|
+
|
60
|
+
class HandleTask(WithBriefing, ToolBoxUsage):
|
61
|
+
"""A class that handles a task based on a task object."""
|
62
|
+
|
63
|
+
async def draft_tool_usage_code(
|
64
|
+
self,
|
65
|
+
task: Task,
|
66
|
+
tools: List[Tool],
|
67
|
+
data: Dict[str, Any],
|
68
|
+
**kwargs: Unpack[ValidateKwargs],
|
69
|
+
) -> Tuple[CodeType, List[str]]:
|
70
|
+
"""Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
|
71
|
+
logger.info(f"Drafting tool usage code for task: {task.briefing}")
|
72
|
+
|
73
|
+
if not tools:
|
74
|
+
err = f"{self.name}: Tools must be provided to draft the tool usage code."
|
75
|
+
logger.error(err)
|
76
|
+
raise ValueError(err)
|
77
|
+
|
78
|
+
def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
|
79
|
+
if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
|
80
|
+
to_extract := JsonCapture.convert_with(response, orjson.loads)
|
81
|
+
):
|
82
|
+
return source, to_extract
|
83
|
+
|
84
|
+
return None
|
85
|
+
|
86
|
+
q = template_manager.render_template(
|
87
|
+
configs.templates.draft_tool_usage_code_template,
|
88
|
+
{
|
89
|
+
"data_module_name": configs.toolbox.data_module_name,
|
90
|
+
"tool_module_name": configs.toolbox.tool_module_name,
|
91
|
+
"task": task.briefing,
|
92
|
+
"deps": task.dependencies_prompt,
|
93
|
+
"tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
|
94
|
+
"data": data,
|
95
|
+
},
|
96
|
+
)
|
97
|
+
logger.debug(f"Code Drafting Question: \n{q}")
|
98
|
+
return await self.aask_validate(
|
99
|
+
question=q,
|
100
|
+
validator=_validator,
|
101
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
102
|
+
**kwargs,
|
103
|
+
)
|
104
|
+
|
105
|
+
async def handle_fin_grind(
|
106
|
+
self,
|
107
|
+
task: Task,
|
108
|
+
data: Dict[str, Any],
|
109
|
+
box_choose_kwargs: Optional[ChooseKwargs] = None,
|
110
|
+
tool_choose_kwargs: Optional[ChooseKwargs] = None,
|
111
|
+
**kwargs: Unpack[ValidateKwargs],
|
112
|
+
) -> Optional[Tuple]:
|
113
|
+
"""Asynchronously handles a task based on a given task object and parameters."""
|
114
|
+
logger.info(f"Handling task: \n{task.briefing}")
|
115
|
+
|
116
|
+
tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
|
117
|
+
logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
|
118
|
+
|
119
|
+
if tools:
|
120
|
+
executor = ToolExecutor(candidates=tools, data=data)
|
121
|
+
code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
|
122
|
+
|
123
|
+
cxt = executor.execute(code)
|
124
|
+
if to_extract:
|
125
|
+
return tuple(cxt.get(k) for k in to_extract)
|
126
|
+
|
127
|
+
return None
|
128
|
+
|
129
|
+
async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
|
130
|
+
"""Asynchronously handles a task based on a given task object and parameters."""
|
131
|
+
return await self.handle_fin_grind(task, data, **kwargs)
|
fabricatio/config.py
CHANGED
@@ -163,8 +163,17 @@ class TemplateConfig(BaseModel):
|
|
163
163
|
draft_rating_manual_template: str = Field(default="draft_rating_manual")
|
164
164
|
"""The name of the draft rating manual template which will be used to draft rating manual."""
|
165
165
|
|
166
|
-
|
167
|
-
"""The name of the draft rating
|
166
|
+
draft_rating_criteria_template: str = Field(default="draft_rating_criteria")
|
167
|
+
"""The name of the draft rating criteria template which will be used to draft rating criteria."""
|
168
|
+
|
169
|
+
extract_reasons_from_examples_template: str = Field(default="extract_reasons_from_examples")
|
170
|
+
"""The name of the extract reasons from examples template which will be used to extract reasons from examples."""
|
171
|
+
|
172
|
+
extract_criteria_from_reasons_template: str = Field(default="extract_criteria_from_reasons")
|
173
|
+
"""The name of the extract criteria from reasons template which will be used to extract criteria from reasons."""
|
174
|
+
|
175
|
+
draft_rating_weights_klee_template: str = Field(default="draft_rating_weights_klee")
|
176
|
+
"""The name of the draft rating weights klee template which will be used to draft rating weights with Klee method."""
|
168
177
|
|
169
178
|
|
170
179
|
class MagikaConfig(BaseModel):
|
fabricatio/models/action.py
CHANGED
@@ -5,8 +5,9 @@ from abc import abstractmethod
|
|
5
5
|
from asyncio import Queue
|
6
6
|
from typing import Any, Dict, Self, Tuple, Type, Union, Unpack
|
7
7
|
|
8
|
+
from fabricatio.capabilities.rating import GiveRating
|
9
|
+
from fabricatio.capabilities.task import HandleTask, ProposeTask
|
8
10
|
from fabricatio.journal import logger
|
9
|
-
from fabricatio.models.advanced import GiveRating, HandleTask, ProposeTask
|
10
11
|
from fabricatio.models.generic import WithBriefing
|
11
12
|
from fabricatio.models.task import Task
|
12
13
|
from fabricatio.models.usages import ToolBoxUsage
|
@@ -24,8 +24,13 @@ class ValidateKwargs(LLMKwargs):
|
|
24
24
|
max_validations: NotRequired[PositiveInt]
|
25
25
|
|
26
26
|
|
27
|
-
class
|
28
|
-
"""A type representing the keyword arguments for the
|
27
|
+
class GenerateKwargs(ValidateKwargs):
|
28
|
+
"""A type representing the keyword arguments for the generate method."""
|
29
29
|
|
30
30
|
system_message: NotRequired[str]
|
31
|
+
|
32
|
+
|
33
|
+
class ChooseKwargs(GenerateKwargs):
|
34
|
+
"""A type representing the keyword arguments for the choose method."""
|
35
|
+
|
31
36
|
k: NotRequired[NonNegativeInt]
|
fabricatio/models/role.py
CHANGED
@@ -2,10 +2,11 @@
|
|
2
2
|
|
3
3
|
from typing import Any, Self, Set
|
4
4
|
|
5
|
+
from fabricatio.capabilities.rating import GiveRating
|
6
|
+
from fabricatio.capabilities.task import HandleTask, ProposeTask
|
5
7
|
from fabricatio.core import env
|
6
8
|
from fabricatio.journal import logger
|
7
9
|
from fabricatio.models.action import WorkFlow
|
8
|
-
from fabricatio.models.advanced import GiveRating, HandleTask, ProposeTask
|
9
10
|
from fabricatio.models.events import Event
|
10
11
|
from fabricatio.models.tool import ToolBox
|
11
12
|
from pydantic import Field
|
fabricatio/models/task.py
CHANGED
@@ -46,19 +46,19 @@ class Task[T](WithBriefing, WithJsonExample, WithDependency):
|
|
46
46
|
"""
|
47
47
|
|
48
48
|
name: str = Field(...)
|
49
|
-
"""The name of the task."""
|
49
|
+
"""The name of the task, which should be a concise and descriptive name."""
|
50
50
|
|
51
51
|
description: str = Field(default="")
|
52
|
-
"""The description of the task."""
|
52
|
+
"""The description of the task, which should provide every details and noting about the task if provided, obeying the CEFR level rule and 5W1H rule."""
|
53
53
|
|
54
54
|
goal: List[str] = Field(default=[])
|
55
|
-
"""The goal of the task, a list of strings."""
|
55
|
+
"""The goal of the task, a list of strings. The goal should be a concise and clear statement of what the task is intended to achieve, goal SHALL NOT be too broad or too narrow."""
|
56
56
|
|
57
57
|
namespace: List[str] = Field(default_factory=list)
|
58
|
-
"""The namespace of the task, a list of namespace segment, as string
|
58
|
+
"""The namespace of the task, a list of namespace segment, as string, if it is not directly given out, it SHALL just be a empty list meaning `NOT ASSIGNED`"""
|
59
59
|
|
60
60
|
dependencies: List[str] = Field(default_factory=list)
|
61
|
-
"""A list of file paths, These file are needed to read or write to meet a specific requirement of this task
|
61
|
+
"""A list of file paths, These file are needed to read or write to meet a specific requirement of this task, if it is not directly given out, it SHALL just be a empty list meaning `NOT ASSIGNED`"""
|
62
62
|
|
63
63
|
_output: Queue = PrivateAttr(default_factory=lambda: Queue(maxsize=1))
|
64
64
|
"""The output queue of the task."""
|
fabricatio/models/usages.py
CHANGED
@@ -10,7 +10,7 @@ from fabricatio._rust_instances import template_manager
|
|
10
10
|
from fabricatio.config import configs
|
11
11
|
from fabricatio.journal import logger
|
12
12
|
from fabricatio.models.generic import Base, WithBriefing
|
13
|
-
from fabricatio.models.kwargs_types import ChooseKwargs, LLMKwargs
|
13
|
+
from fabricatio.models.kwargs_types import ChooseKwargs, GenerateKwargs, LLMKwargs
|
14
14
|
from fabricatio.models.task import Task
|
15
15
|
from fabricatio.models.tool import Tool, ToolBox
|
16
16
|
from fabricatio.models.utils import Messages
|
@@ -72,10 +72,10 @@ class LLMUsage(Base):
|
|
72
72
|
Args:
|
73
73
|
messages (List[Dict[str, str]]): A list of messages, where each message is a dictionary containing the role and content of the message.
|
74
74
|
n (PositiveInt | None): The number of responses to generate. Defaults to the instance's `llm_generation_count` or the global configuration.
|
75
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage
|
75
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
76
76
|
|
77
77
|
Returns:
|
78
|
-
ModelResponse: An object containing the generated response and other metadata from the model.
|
78
|
+
ModelResponse | CustomStreamWrapper: An object containing the generated response and other metadata from the model.
|
79
79
|
"""
|
80
80
|
# Call the underlying asynchronous completion function with the provided and default parameters
|
81
81
|
return await litellm.acompletion(
|
@@ -108,7 +108,7 @@ class LLMUsage(Base):
|
|
108
108
|
question (str): The question to ask the model.
|
109
109
|
system_message (str): The system message to provide context to the model. Defaults to an empty string.
|
110
110
|
n (PositiveInt | None): The number of responses to generate. Defaults to the instance's `llm_generation_count` or the global configuration.
|
111
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage
|
111
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
112
112
|
|
113
113
|
Returns:
|
114
114
|
List[Choices | StreamingChoices]: A list of choices or streaming choices from the model response.
|
@@ -170,12 +170,12 @@ class LLMUsage(Base):
|
|
170
170
|
"""Asynchronously asks the language model a question and returns the response content.
|
171
171
|
|
172
172
|
Args:
|
173
|
-
question (str): The question to ask the model.
|
174
|
-
system_message (str): The system message to provide context to the model. Defaults to an empty string.
|
175
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage
|
173
|
+
question (str | List[str]): The question to ask the model.
|
174
|
+
system_message (str | List[str] | None): The system message to provide context to the model. Defaults to an empty string.
|
175
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
176
176
|
|
177
177
|
Returns:
|
178
|
-
str: The content of the model's response message.
|
178
|
+
str | List[str]: The content of the model's response message.
|
179
179
|
"""
|
180
180
|
system_message = system_message or ""
|
181
181
|
match (isinstance(question, list), isinstance(system_message, list)):
|
@@ -226,7 +226,7 @@ class LLMUsage(Base):
|
|
226
226
|
validator (Callable[[str], T | None]): A function to validate the response.
|
227
227
|
max_validations (PositiveInt): Maximum number of validation attempts. Defaults to 2.
|
228
228
|
system_message (str): System message to include in the request. Defaults to an empty string.
|
229
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage
|
229
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
230
230
|
|
231
231
|
Returns:
|
232
232
|
T: The validated response.
|
@@ -248,14 +248,33 @@ class LLMUsage(Base):
|
|
248
248
|
logger.error(f"Failed to validate the response after {max_validations} attempts.")
|
249
249
|
raise ValueError("Failed to validate the response.")
|
250
250
|
|
251
|
+
async def aask_validate_batch[T](
|
252
|
+
self,
|
253
|
+
questions: List[str],
|
254
|
+
validator: Callable[[str], T | None],
|
255
|
+
**kwargs: Unpack[GenerateKwargs],
|
256
|
+
) -> List[T]:
|
257
|
+
"""Asynchronously asks a batch of questions and validates the responses using a given validator.
|
258
|
+
|
259
|
+
Args:
|
260
|
+
questions (List[str]): The list of questions to ask.
|
261
|
+
validator (Callable[[str], T | None]): A function to validate the response.
|
262
|
+
**kwargs (Unpack[GenerateKwargs]): Additional keyword arguments for the LLM usage.
|
263
|
+
|
264
|
+
Returns:
|
265
|
+
T: The validated response.
|
266
|
+
|
267
|
+
Raises:
|
268
|
+
ValueError: If the response fails to validate after the maximum number of attempts.
|
269
|
+
"""
|
270
|
+
return await gather(*[self.aask_validate(question, validator, **kwargs) for question in questions])
|
271
|
+
|
251
272
|
async def achoose[T: WithBriefing](
|
252
273
|
self,
|
253
274
|
instruction: str,
|
254
275
|
choices: List[T],
|
255
276
|
k: NonNegativeInt = 0,
|
256
|
-
|
257
|
-
system_message: str = "",
|
258
|
-
**kwargs: Unpack[LLMKwargs],
|
277
|
+
**kwargs: Unpack[GenerateKwargs],
|
259
278
|
) -> List[T]:
|
260
279
|
"""Asynchronously executes a multi-choice decision-making process, generating a prompt based on the instruction and options, and validates the returned selection results.
|
261
280
|
|
@@ -263,9 +282,7 @@ class LLMUsage(Base):
|
|
263
282
|
instruction (str): The user-provided instruction/question description.
|
264
283
|
choices (List[T]): A list of candidate options, requiring elements to have `name` and `briefing` fields.
|
265
284
|
k (NonNegativeInt): The number of choices to select, 0 means infinite. Defaults to 0.
|
266
|
-
|
267
|
-
system_message (str): Custom system-level prompt, defaults to an empty string.
|
268
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
285
|
+
**kwargs (Unpack[GenerateKwargs]): Additional keyword arguments for the LLM usage.
|
269
286
|
|
270
287
|
Returns:
|
271
288
|
List[T]: The final validated selection result list, with element types matching the input `choices`.
|
@@ -301,8 +318,6 @@ class LLMUsage(Base):
|
|
301
318
|
return await self.aask_validate(
|
302
319
|
question=prompt,
|
303
320
|
validator=_validate,
|
304
|
-
max_validations=max_validations,
|
305
|
-
system_message=system_message,
|
306
321
|
**kwargs,
|
307
322
|
)
|
308
323
|
|
@@ -310,21 +325,14 @@ class LLMUsage(Base):
|
|
310
325
|
self,
|
311
326
|
instruction: str,
|
312
327
|
choices: List[T],
|
313
|
-
|
314
|
-
system_message: str = "",
|
315
|
-
**kwargs: Unpack[LLMKwargs],
|
328
|
+
**kwargs: Unpack[GenerateKwargs],
|
316
329
|
) -> T:
|
317
330
|
"""Asynchronously picks a single choice from a list of options using AI validation.
|
318
331
|
|
319
|
-
This method is a convenience wrapper around `achoose` that always selects exactly one item.
|
320
|
-
|
321
332
|
Args:
|
322
333
|
instruction (str): The user-provided instruction/question description.
|
323
334
|
choices (List[T]): A list of candidate options, requiring elements to have `name` and `briefing` fields.
|
324
|
-
|
325
|
-
system_message (str): Custom system-level prompt, defaults to an empty string.
|
326
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`,
|
327
|
-
`temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
335
|
+
**kwargs (Unpack[GenerateKwargs]): Additional keyword arguments for the LLM usage.
|
328
336
|
|
329
337
|
Returns:
|
330
338
|
T: The single selected item from the choices list.
|
@@ -337,8 +345,6 @@ class LLMUsage(Base):
|
|
337
345
|
instruction=instruction,
|
338
346
|
choices=choices,
|
339
347
|
k=1,
|
340
|
-
max_validations=max_validations,
|
341
|
-
system_message=system_message,
|
342
348
|
**kwargs,
|
343
349
|
)
|
344
350
|
)[0]
|
@@ -348,9 +354,7 @@ class LLMUsage(Base):
|
|
348
354
|
prompt: str,
|
349
355
|
affirm_case: str = "",
|
350
356
|
deny_case: str = "",
|
351
|
-
|
352
|
-
system_message: str = "",
|
353
|
-
**kwargs: Unpack[LLMKwargs],
|
357
|
+
**kwargs: Unpack[GenerateKwargs],
|
354
358
|
) -> bool:
|
355
359
|
"""Asynchronously judges a prompt using AI validation.
|
356
360
|
|
@@ -358,16 +362,10 @@ class LLMUsage(Base):
|
|
358
362
|
prompt (str): The input prompt to be judged.
|
359
363
|
affirm_case (str): The affirmative case for the AI model. Defaults to an empty string.
|
360
364
|
deny_case (str): The negative case for the AI model. Defaults to an empty string.
|
361
|
-
|
362
|
-
system_message (str): System message for the AI model. Defaults to an empty string.
|
363
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage, such as `model`, `temperature`, `stop`, `top_p`, `max_tokens`, `stream`, `timeout`, and `max_retries`.
|
365
|
+
**kwargs (Unpack[GenerateKwargs]): Additional keyword arguments for the LLM usage.
|
364
366
|
|
365
367
|
Returns:
|
366
368
|
bool: The judgment result (True or False) based on the AI's response.
|
367
|
-
|
368
|
-
Notes:
|
369
|
-
The method uses an internal validator to ensure the response is a boolean value.
|
370
|
-
If the response cannot be converted to a boolean, it will return None.
|
371
369
|
"""
|
372
370
|
|
373
371
|
def _validate(response: str) -> bool | None:
|
@@ -382,8 +380,6 @@ class LLMUsage(Base):
|
|
382
380
|
{"prompt": prompt, "affirm_case": affirm_case, "deny_case": deny_case},
|
383
381
|
),
|
384
382
|
validator=_validate,
|
385
|
-
max_validations=max_validations,
|
386
|
-
system_message=system_message,
|
387
383
|
**kwargs,
|
388
384
|
)
|
389
385
|
|
@@ -448,7 +444,7 @@ class ToolBoxUsage(LLMUsage):
|
|
448
444
|
system_message (str): Custom system-level prompt, defaults to an empty string.
|
449
445
|
k (NonNegativeInt): The number of toolboxes to select, 0 means infinite. Defaults to 0.
|
450
446
|
max_validations (PositiveInt): Maximum number of validation failures, default is 2.
|
451
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage
|
447
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
452
448
|
|
453
449
|
Returns:
|
454
450
|
List[ToolBox]: The selected toolboxes.
|
@@ -482,7 +478,7 @@ class ToolBoxUsage(LLMUsage):
|
|
482
478
|
system_message (str): Custom system-level prompt, defaults to an empty string.
|
483
479
|
k (NonNegativeInt): The number of tools to select, 0 means infinite. Defaults to 0.
|
484
480
|
max_validations (PositiveInt): Maximum number of validation failures, default is 2.
|
485
|
-
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage
|
481
|
+
**kwargs (Unpack[LLMKwargs]): Additional keyword arguments for the LLM usage.
|
486
482
|
|
487
483
|
Returns:
|
488
484
|
List[Tool]: The selected tools.
|
@@ -509,8 +505,8 @@ class ToolBoxUsage(LLMUsage):
|
|
509
505
|
|
510
506
|
Args:
|
511
507
|
task (Task): The task for which to gather tools.
|
512
|
-
box_choose_kwargs (Optional[ChooseKwargs]): Keyword arguments for choosing toolboxes
|
513
|
-
tool_choose_kwargs (Optional[ChooseKwargs]): Keyword arguments for choosing tools
|
508
|
+
box_choose_kwargs (Optional[ChooseKwargs]): Keyword arguments for choosing toolboxes.
|
509
|
+
tool_choose_kwargs (Optional[ChooseKwargs]): Keyword arguments for choosing tools.
|
514
510
|
|
515
511
|
Returns:
|
516
512
|
List[Tool]: A list of tools gathered based on the provided task and toolbox and tool selection criteria.
|
@@ -531,7 +527,7 @@ class ToolBoxUsage(LLMUsage):
|
|
531
527
|
|
532
528
|
Args:
|
533
529
|
task (Task): The task for which to gather tools.
|
534
|
-
**kwargs (Unpack[ChooseKwargs]): Keyword arguments for choosing tools
|
530
|
+
**kwargs (Unpack[ChooseKwargs]): Keyword arguments for choosing tools.
|
535
531
|
|
536
532
|
Returns:
|
537
533
|
List[Tool]: A list of tools gathered based on the provided task.
|
Binary file
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fabricatio
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.2
|
4
4
|
Classifier: License :: OSI Approved :: MIT License
|
5
5
|
Classifier: Programming Language :: Rust
|
6
6
|
Classifier: Programming Language :: Python :: 3.12
|
@@ -15,6 +15,7 @@ Requires-Dist: gitpython>=3.1.44
|
|
15
15
|
Requires-Dist: litellm>=1.60.0
|
16
16
|
Requires-Dist: loguru>=0.7.3
|
17
17
|
Requires-Dist: magika>=0.5.1
|
18
|
+
Requires-Dist: more-itertools>=10.6.0
|
18
19
|
Requires-Dist: orjson>=3.10.15
|
19
20
|
Requires-Dist: pydantic>=2.10.6
|
20
21
|
Requires-Dist: pydantic-settings>=2.7.1
|
@@ -22,7 +23,6 @@ Requires-Dist: pymitter>=1.0.0
|
|
22
23
|
Requires-Dist: questionary>=2.1.0
|
23
24
|
Requires-Dist: regex>=2024.11.6
|
24
25
|
Requires-Dist: rich>=13.9.4
|
25
|
-
Requires-Dist: faiss-cpu>=1.10.0 ; extra == 'rag'
|
26
26
|
Requires-Dist: pymilvus>=2.5.4 ; extra == 'rag'
|
27
27
|
Requires-Dist: fabricatio[rag] ; extra == 'full'
|
28
28
|
Provides-Extra: rag
|
@@ -1,25 +1,26 @@
|
|
1
|
-
fabricatio-0.2.
|
2
|
-
fabricatio-0.2.
|
3
|
-
fabricatio-0.2.
|
1
|
+
fabricatio-0.2.2.dist-info/METADATA,sha256=Xnkw59IiSN4arPkzglC1JKpJACZlGkjbr5raJLdrmb8,12334
|
2
|
+
fabricatio-0.2.2.dist-info/WHEEL,sha256=tpW5AN9B-9qsM9WW2FXG2r193YXiqexDadpKp0A2daI,96
|
3
|
+
fabricatio-0.2.2.dist-info/licenses/LICENSE,sha256=do7J7EiCGbq0QPbMAL_FqLYufXpHnCnXBOuqVPwSV8Y,1088
|
4
4
|
fabricatio/actions/communication.py,sha256=NZxIIncKgJSDyBrqNebUtH_haqtxHa8ld2TZxT3CMdU,429
|
5
5
|
fabricatio/actions/transmission.py,sha256=xpvKqbXqgpi1BWy-vUUvmd8NZ1GhRNfsYUBp-l2jLyk,862
|
6
6
|
fabricatio/actions/__init__.py,sha256=eFmFVPQvtNgFynIXBVr3eP-vWQDWCPng60YY5LXvZgg,115
|
7
|
-
fabricatio/
|
7
|
+
fabricatio/capabilities/rating.py,sha256=zmTUvsUfxFgovRQzy4djL2zKRYTHmN6JY7A4lyT5uVQ,14907
|
8
|
+
fabricatio/capabilities/task.py,sha256=d2xtrwQxXWI40UskQCR5YhHarY7ST0ppr8TjY12uWQE,5327
|
9
|
+
fabricatio/config.py,sha256=wzaaUHZZMRCYc37M_M4qKuLOYtwdEjYtyG77-AGkqCg,11467
|
8
10
|
fabricatio/core.py,sha256=yQK2ZrbPYDJOaNDp0Bky3muTkB-ZaQ1ld_Qfflm2dY0,5938
|
9
11
|
fabricatio/decorators.py,sha256=uzsP4tFKQNjDHBkofsjjoJA0IUAaYOtt6YVedoyOqlo,6551
|
10
12
|
fabricatio/fs/curd.py,sha256=faMstgGUiQ4k2AW3OXfvvWWTldTtKXco7QINYaMjmyA,3981
|
11
13
|
fabricatio/fs/readers.py,sha256=Pz1-cdZYtmqr032dsroImlkFXAd0kCYY_9qVpD4UrG4,1045
|
12
14
|
fabricatio/fs/__init__.py,sha256=lWcKYg0v3mv2LnnSegOQaTtlVDODU0vtw_s6iKU5IqQ,122
|
13
15
|
fabricatio/journal.py,sha256=siqimKF0M_QaaOCMxtjr_BJVNyUIAQWILzE9Q4T6-7c,781
|
14
|
-
fabricatio/models/action.py,sha256=
|
15
|
-
fabricatio/models/advanced.py,sha256=xVzHqoO1glNdCqflKZF5FGkK5GQmfIMZpT_07zQi9KM,11420
|
16
|
+
fabricatio/models/action.py,sha256=Urs16f99CzRRAwulslgikloOAb_Ln9ZYKunKJpsAzu0,5721
|
16
17
|
fabricatio/models/events.py,sha256=mrihNEFgQ5o7qFWja1z_qX8dnaTLwPBoJdVlzxQV5oM,2719
|
17
18
|
fabricatio/models/generic.py,sha256=WEjZ96rTyBjaBjkM6e8E4Pg_Naot4xWRvGJteqBiCCI,5133
|
18
|
-
fabricatio/models/kwargs_types.py,sha256=
|
19
|
-
fabricatio/models/role.py,sha256=
|
20
|
-
fabricatio/models/task.py,sha256=
|
19
|
+
fabricatio/models/kwargs_types.py,sha256=nTtD3wzSpCg-NlrJ43yW6lmfeWzD2V_XGMPlL5mXzyc,1147
|
20
|
+
fabricatio/models/role.py,sha256=gYvleTeKUGDUNKPAC5B0EPMLC4jZ4vHsFHmHiVXkU6c,1830
|
21
|
+
fabricatio/models/task.py,sha256=CBZL016mchj7X9Tnd-pqBxJ4mgFYq0tJ8Rq_22zZz4A,9905
|
21
22
|
fabricatio/models/tool.py,sha256=WTFnpF6xZ1nJbmIOonLsGQcM-kkDCeZiAFqyil9xg2U,6988
|
22
|
-
fabricatio/models/usages.py,sha256=
|
23
|
+
fabricatio/models/usages.py,sha256=iLxas1gE7MA55ZtQJJ-qu3W6JP5KLjPgmNKqNYIF6yU,23972
|
23
24
|
fabricatio/models/utils.py,sha256=i_kpcQpct04mQFk1nbcVGV-pl1YThWu4Qk3wbewzKkc,2535
|
24
25
|
fabricatio/parser.py,sha256=uLabsvF07wRKW1PoTGuGEENCx3P4mhmuO8JkmOEkKko,3522
|
25
26
|
fabricatio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -30,6 +31,6 @@ fabricatio/toolboxes/__init__.py,sha256=b13KmASO8q5fBLwew964fn9oH86ER5g-S1PgA4fZ
|
|
30
31
|
fabricatio/_rust.pyi,sha256=0wCqtwWkVxxoqprvk8T27T8QYKIAKHS7xgsmdMNjQKc,1756
|
31
32
|
fabricatio/_rust_instances.py,sha256=dl0-yZ4UvT5g20tQgnPJpmqtkjFGXNG_YK4eLfi_ugQ,279
|
32
33
|
fabricatio/__init__.py,sha256=opIrN8lGyT-h2If4Qez0bRuWBa3uIT9GsM9CZy7_XJ0,1100
|
33
|
-
fabricatio/_rust.cp312-win_amd64.pyd,sha256=
|
34
|
-
fabricatio-0.2.
|
35
|
-
fabricatio-0.2.
|
34
|
+
fabricatio/_rust.cp312-win_amd64.pyd,sha256=4hsd6r-pYsSUSW2cSq9Ik-SSTYVXacEYygliOFx2sfk,1262080
|
35
|
+
fabricatio-0.2.2.data/scripts/tdown.exe,sha256=T_R2-mppH1q8T8LtZdR_pa7QNrdVfrC2nRQuWgalpqQ,3396608
|
36
|
+
fabricatio-0.2.2.dist-info/RECORD,,
|
fabricatio/models/advanced.py
DELETED
@@ -1,289 +0,0 @@
|
|
1
|
-
"""A module for advanced models and functionalities."""
|
2
|
-
|
3
|
-
from types import CodeType
|
4
|
-
from typing import Any, Dict, List, Optional, Set, Tuple, Unpack
|
5
|
-
|
6
|
-
import orjson
|
7
|
-
from fabricatio._rust_instances import template_manager
|
8
|
-
from fabricatio.config import configs
|
9
|
-
from fabricatio.models.generic import WithBriefing
|
10
|
-
from fabricatio.models.kwargs_types import ChooseKwargs, ValidateKwargs
|
11
|
-
from fabricatio.models.task import Task
|
12
|
-
from fabricatio.models.tool import Tool, ToolExecutor
|
13
|
-
from fabricatio.models.usages import LLMUsage, ToolBoxUsage
|
14
|
-
from fabricatio.parser import JsonCapture, PythonCapture
|
15
|
-
from loguru import logger
|
16
|
-
from pydantic import NonNegativeInt, ValidationError
|
17
|
-
|
18
|
-
|
19
|
-
class ProposeTask(WithBriefing, LLMUsage):
|
20
|
-
"""A class that proposes a task based on a prompt."""
|
21
|
-
|
22
|
-
async def propose[T](
|
23
|
-
self,
|
24
|
-
prompt: str,
|
25
|
-
**kwargs: Unpack[ValidateKwargs],
|
26
|
-
) -> Task[T]:
|
27
|
-
"""Asynchronously proposes a task based on a given prompt and parameters.
|
28
|
-
|
29
|
-
Parameters:
|
30
|
-
prompt: The prompt text for proposing a task, which is a string that must be provided.
|
31
|
-
**kwargs: The keyword arguments for the LLM (Large Language Model) usage.
|
32
|
-
|
33
|
-
Returns:
|
34
|
-
A Task object based on the proposal result.
|
35
|
-
"""
|
36
|
-
if not prompt:
|
37
|
-
err = f"{self.name}: Prompt must be provided."
|
38
|
-
logger.error(err)
|
39
|
-
raise ValueError(err)
|
40
|
-
|
41
|
-
def _validate_json(response: str) -> None | Task:
|
42
|
-
try:
|
43
|
-
cap = JsonCapture.capture(response)
|
44
|
-
logger.debug(f"Response: \n{response}")
|
45
|
-
logger.info(f"Captured JSON: \n{cap}")
|
46
|
-
return Task.model_validate_json(cap)
|
47
|
-
except ValidationError as e:
|
48
|
-
logger.error(f"Failed to parse task from JSON: {e}")
|
49
|
-
return None
|
50
|
-
|
51
|
-
template_data = {"prompt": prompt, "json_example": Task.json_example()}
|
52
|
-
return await self.aask_validate(
|
53
|
-
question=template_manager.render_template(configs.templates.propose_task_template, template_data),
|
54
|
-
validator=_validate_json,
|
55
|
-
system_message=f"# your personal briefing: \n{self.briefing}",
|
56
|
-
**kwargs,
|
57
|
-
)
|
58
|
-
|
59
|
-
|
60
|
-
class HandleTask(WithBriefing, ToolBoxUsage):
|
61
|
-
"""A class that handles a task based on a task object."""
|
62
|
-
|
63
|
-
async def draft_tool_usage_code(
|
64
|
-
self,
|
65
|
-
task: Task,
|
66
|
-
tools: List[Tool],
|
67
|
-
data: Dict[str, Any],
|
68
|
-
**kwargs: Unpack[ValidateKwargs],
|
69
|
-
) -> Tuple[CodeType, List[str]]:
|
70
|
-
"""Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
|
71
|
-
logger.info(f"Drafting tool usage code for task: {task.briefing}")
|
72
|
-
|
73
|
-
if not tools:
|
74
|
-
err = f"{self.name}: Tools must be provided to draft the tool usage code."
|
75
|
-
logger.error(err)
|
76
|
-
raise ValueError(err)
|
77
|
-
|
78
|
-
def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
|
79
|
-
if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
|
80
|
-
to_extract := JsonCapture.convert_with(response, orjson.loads)
|
81
|
-
):
|
82
|
-
return source, to_extract
|
83
|
-
|
84
|
-
return None
|
85
|
-
|
86
|
-
q = template_manager.render_template(
|
87
|
-
configs.templates.draft_tool_usage_code_template,
|
88
|
-
{
|
89
|
-
"data_module_name": configs.toolbox.data_module_name,
|
90
|
-
"tool_module_name": configs.toolbox.tool_module_name,
|
91
|
-
"task": task.briefing,
|
92
|
-
"deps": task.dependencies_prompt,
|
93
|
-
"tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
|
94
|
-
"data": data,
|
95
|
-
},
|
96
|
-
)
|
97
|
-
logger.debug(f"Code Drafting Question: \n{q}")
|
98
|
-
return await self.aask_validate(
|
99
|
-
question=q,
|
100
|
-
validator=_validator,
|
101
|
-
system_message=f"# your personal briefing: \n{self.briefing}",
|
102
|
-
**kwargs,
|
103
|
-
)
|
104
|
-
|
105
|
-
async def handle_fin_grind(
|
106
|
-
self,
|
107
|
-
task: Task,
|
108
|
-
data: Dict[str, Any],
|
109
|
-
box_choose_kwargs: Optional[ChooseKwargs] = None,
|
110
|
-
tool_choose_kwargs: Optional[ChooseKwargs] = None,
|
111
|
-
**kwargs: Unpack[ValidateKwargs],
|
112
|
-
) -> Optional[Tuple]:
|
113
|
-
"""Asynchronously handles a task based on a given task object and parameters."""
|
114
|
-
logger.info(f"Handling task: \n{task.briefing}")
|
115
|
-
|
116
|
-
tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
|
117
|
-
logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
|
118
|
-
|
119
|
-
if tools:
|
120
|
-
executor = ToolExecutor(candidates=tools, data=data)
|
121
|
-
code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
|
122
|
-
|
123
|
-
cxt = executor.execute(code)
|
124
|
-
if to_extract:
|
125
|
-
return tuple(cxt.get(k) for k in to_extract)
|
126
|
-
|
127
|
-
return None
|
128
|
-
|
129
|
-
async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
|
130
|
-
"""Asynchronously handles a task based on a given task object and parameters."""
|
131
|
-
return await self.handle_fin_grind(task, data, **kwargs)
|
132
|
-
|
133
|
-
|
134
|
-
class GiveRating(WithBriefing, LLMUsage):
|
135
|
-
"""A class that provides functionality to rate tasks based on a rating manual and score range."""
|
136
|
-
|
137
|
-
async def rate_fine_grind(
|
138
|
-
self,
|
139
|
-
to_rate: str,
|
140
|
-
rating_manual: Dict[str, str],
|
141
|
-
score_range: Tuple[float, float],
|
142
|
-
**kwargs: Unpack[ValidateKwargs],
|
143
|
-
) -> Dict[str, float]:
|
144
|
-
"""Rates a given task based on a rating manual and score range.
|
145
|
-
|
146
|
-
Args:
|
147
|
-
to_rate: The task to be rated.
|
148
|
-
rating_manual: A dictionary containing the rating criteria.
|
149
|
-
score_range: A tuple representing the valid score range.
|
150
|
-
**kwargs: Additional keyword arguments for the LLM usage.
|
151
|
-
|
152
|
-
Returns:
|
153
|
-
A dictionary with the ratings for each dimension.
|
154
|
-
"""
|
155
|
-
|
156
|
-
def _validator(response: str) -> Dict[str, float] | None:
|
157
|
-
if (
|
158
|
-
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
159
|
-
and isinstance(json_data, dict)
|
160
|
-
and json_data.keys() == rating_manual.keys()
|
161
|
-
and all(isinstance(v, float) for v in json_data.values())
|
162
|
-
and all(score_range[0] <= v <= score_range[1] for v in json_data.values())
|
163
|
-
):
|
164
|
-
return json_data
|
165
|
-
return None
|
166
|
-
|
167
|
-
return await self.aask_validate(
|
168
|
-
question=(
|
169
|
-
template_manager.render_template(
|
170
|
-
configs.templates.rate_fine_grind_template,
|
171
|
-
{
|
172
|
-
"to_rate": to_rate,
|
173
|
-
"min_score": score_range[0],
|
174
|
-
"max_score": score_range[1],
|
175
|
-
"rating_manual": rating_manual,
|
176
|
-
},
|
177
|
-
)
|
178
|
-
),
|
179
|
-
validator=_validator,
|
180
|
-
system_message=f"# your personal briefing: \n{self.briefing}",
|
181
|
-
**kwargs,
|
182
|
-
)
|
183
|
-
|
184
|
-
async def rate(
|
185
|
-
self,
|
186
|
-
to_rate: str,
|
187
|
-
topic: str,
|
188
|
-
dimensions: Set[str],
|
189
|
-
score_range: Tuple[float, float] = (0.0, 1.0),
|
190
|
-
**kwargs: Unpack[ValidateKwargs],
|
191
|
-
) -> Dict[str, float]:
|
192
|
-
"""Rates a task based on a topic and dimensions. this function will automatically draft a rating manual based on the topic and dimensions.
|
193
|
-
|
194
|
-
Args:
|
195
|
-
to_rate: The task to be rated.
|
196
|
-
topic: The topic related to the task.
|
197
|
-
dimensions: A set of dimensions for rating.
|
198
|
-
score_range: A tuple representing the valid score range
|
199
|
-
**kwargs: Additional keyword arguments for the LLM usage.
|
200
|
-
|
201
|
-
Returns:
|
202
|
-
A dictionary with the ratings for each dimension.
|
203
|
-
"""
|
204
|
-
manual = await self.draft_rating_manual(topic, dimensions, **kwargs)
|
205
|
-
return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
|
206
|
-
|
207
|
-
async def draft_rating_manual(
|
208
|
-
self, topic: str, dimensions: Set[str], **kwargs: Unpack[ValidateKwargs]
|
209
|
-
) -> Dict[str, str]:
|
210
|
-
"""Drafts a rating manual based on a topic and dimensions.
|
211
|
-
|
212
|
-
Args:
|
213
|
-
topic: The topic for the rating manual.
|
214
|
-
dimensions: A set of dimensions for the rating manual.
|
215
|
-
**kwargs: Additional keyword arguments for the LLM usage.
|
216
|
-
|
217
|
-
Returns:
|
218
|
-
A dictionary representing the drafted rating manual.
|
219
|
-
"""
|
220
|
-
|
221
|
-
def _validator(response: str) -> Dict[str, str] | None:
|
222
|
-
if (
|
223
|
-
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
224
|
-
and isinstance(json_data, dict)
|
225
|
-
and json_data.keys() == dimensions
|
226
|
-
and all(isinstance(v, str) for v in json_data.values())
|
227
|
-
):
|
228
|
-
return json_data
|
229
|
-
return None
|
230
|
-
|
231
|
-
return await self.aask_validate(
|
232
|
-
question=(
|
233
|
-
template_manager.render_template(
|
234
|
-
configs.templates.draft_rating_manual_template,
|
235
|
-
{
|
236
|
-
"topic": topic,
|
237
|
-
"dimensions": dimensions,
|
238
|
-
},
|
239
|
-
)
|
240
|
-
),
|
241
|
-
validator=_validator,
|
242
|
-
system_message=f"# your personal briefing: \n{self.briefing}",
|
243
|
-
**kwargs,
|
244
|
-
)
|
245
|
-
|
246
|
-
async def draft_rating_dimensions(
|
247
|
-
self,
|
248
|
-
topic: str,
|
249
|
-
dimensions_count: NonNegativeInt = 0,
|
250
|
-
examples: Optional[List[str]] = None,
|
251
|
-
**kwargs: Unpack[ValidateKwargs],
|
252
|
-
) -> Set[str]:
|
253
|
-
"""Drafts rating dimensions based on a topic.
|
254
|
-
|
255
|
-
Args:
|
256
|
-
topic: The topic for the rating dimensions.
|
257
|
-
dimensions_count: The number of dimensions to draft, 0 means no limit.
|
258
|
-
examples: A list of examples which is rated based on the rating dimensions.
|
259
|
-
**kwargs: Additional keyword arguments for the LLM usage.
|
260
|
-
|
261
|
-
Returns:
|
262
|
-
A set of rating dimensions.
|
263
|
-
"""
|
264
|
-
|
265
|
-
def _validator(response: str) -> Set[str] | None:
|
266
|
-
if (
|
267
|
-
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
268
|
-
and isinstance(json_data, list)
|
269
|
-
and all(isinstance(v, str) for v in json_data)
|
270
|
-
and (dimensions_count == 0 or len(json_data) == dimensions_count)
|
271
|
-
):
|
272
|
-
return set(json_data)
|
273
|
-
return None
|
274
|
-
|
275
|
-
return await self.aask_validate(
|
276
|
-
question=(
|
277
|
-
template_manager.render_template(
|
278
|
-
configs.templates.draft_rating_dimensions_template,
|
279
|
-
{
|
280
|
-
"topic": topic,
|
281
|
-
"examples": examples,
|
282
|
-
"dimensions_count": dimensions_count,
|
283
|
-
},
|
284
|
-
)
|
285
|
-
),
|
286
|
-
validator=_validator,
|
287
|
-
system_message=f"# your personal briefing: \n{self.briefing}",
|
288
|
-
**kwargs,
|
289
|
-
)
|
File without changes
|
File without changes
|