fabricatio 0.3.15.dev5__cp312-cp312-win_amd64.whl → 0.4.5.dev0__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. fabricatio/__init__.py +7 -8
  2. fabricatio/actions/__init__.py +69 -1
  3. fabricatio/capabilities/__init__.py +63 -1
  4. fabricatio/models/__init__.py +51 -0
  5. fabricatio/rust.cp312-win_amd64.pyd +0 -0
  6. fabricatio/toolboxes/__init__.py +2 -1
  7. fabricatio/toolboxes/arithmetic.py +1 -1
  8. fabricatio/toolboxes/fs.py +2 -2
  9. fabricatio/workflows/__init__.py +9 -0
  10. fabricatio-0.4.5.dev0.data/scripts/tdown.exe +0 -0
  11. {fabricatio-0.3.15.dev5.dist-info → fabricatio-0.4.5.dev0.dist-info}/METADATA +58 -27
  12. fabricatio-0.4.5.dev0.dist-info/RECORD +15 -0
  13. fabricatio/actions/article.py +0 -415
  14. fabricatio/actions/article_rag.py +0 -407
  15. fabricatio/actions/fs.py +0 -25
  16. fabricatio/actions/output.py +0 -247
  17. fabricatio/actions/rag.py +0 -96
  18. fabricatio/actions/rules.py +0 -83
  19. fabricatio/capabilities/advanced_judge.py +0 -20
  20. fabricatio/capabilities/advanced_rag.py +0 -61
  21. fabricatio/capabilities/censor.py +0 -105
  22. fabricatio/capabilities/check.py +0 -212
  23. fabricatio/capabilities/correct.py +0 -228
  24. fabricatio/capabilities/extract.py +0 -74
  25. fabricatio/capabilities/propose.py +0 -65
  26. fabricatio/capabilities/rag.py +0 -264
  27. fabricatio/capabilities/rating.py +0 -404
  28. fabricatio/capabilities/review.py +0 -114
  29. fabricatio/capabilities/task.py +0 -113
  30. fabricatio/decorators.py +0 -253
  31. fabricatio/emitter.py +0 -177
  32. fabricatio/fs/__init__.py +0 -35
  33. fabricatio/fs/curd.py +0 -153
  34. fabricatio/fs/readers.py +0 -61
  35. fabricatio/journal.py +0 -12
  36. fabricatio/models/action.py +0 -263
  37. fabricatio/models/adv_kwargs_types.py +0 -63
  38. fabricatio/models/extra/__init__.py +0 -1
  39. fabricatio/models/extra/advanced_judge.py +0 -32
  40. fabricatio/models/extra/aricle_rag.py +0 -286
  41. fabricatio/models/extra/article_base.py +0 -488
  42. fabricatio/models/extra/article_essence.py +0 -98
  43. fabricatio/models/extra/article_main.py +0 -285
  44. fabricatio/models/extra/article_outline.py +0 -45
  45. fabricatio/models/extra/article_proposal.py +0 -52
  46. fabricatio/models/extra/patches.py +0 -20
  47. fabricatio/models/extra/problem.py +0 -165
  48. fabricatio/models/extra/rag.py +0 -98
  49. fabricatio/models/extra/rule.py +0 -51
  50. fabricatio/models/generic.py +0 -904
  51. fabricatio/models/kwargs_types.py +0 -121
  52. fabricatio/models/role.py +0 -156
  53. fabricatio/models/task.py +0 -310
  54. fabricatio/models/tool.py +0 -328
  55. fabricatio/models/usages.py +0 -791
  56. fabricatio/parser.py +0 -114
  57. fabricatio/rust.pyi +0 -846
  58. fabricatio/utils.py +0 -156
  59. fabricatio/workflows/articles.py +0 -24
  60. fabricatio/workflows/rag.py +0 -11
  61. fabricatio-0.3.15.dev5.data/scripts/tdown.exe +0 -0
  62. fabricatio-0.3.15.dev5.data/scripts/ttm.exe +0 -0
  63. fabricatio-0.3.15.dev5.dist-info/RECORD +0 -63
  64. {fabricatio-0.3.15.dev5.dist-info → fabricatio-0.4.5.dev0.dist-info}/WHEEL +0 -0
  65. {fabricatio-0.3.15.dev5.dist-info → fabricatio-0.4.5.dev0.dist-info}/licenses/LICENSE +0 -0
@@ -1,404 +0,0 @@
1
- """A module that provides functionality to rate tasks based on a rating manual and score range."""
2
-
3
- from abc import ABC
4
- from itertools import permutations
5
- from random import sample
6
- from typing import Dict, List, Optional, Set, Tuple, Union, Unpack, overload
7
-
8
- from more_itertools import flatten, windowed
9
- from pydantic import Field, NonNegativeInt, PositiveInt, create_model
10
-
11
- from fabricatio.capabilities.propose import Propose
12
- from fabricatio.journal import logger
13
- from fabricatio.models.generic import Display, ProposedAble
14
- from fabricatio.models.kwargs_types import CompositeScoreKwargs, ValidateKwargs
15
- from fabricatio.parser import JsonCapture
16
- from fabricatio.rust import CONFIG, TEMPLATE_MANAGER
17
- from fabricatio.utils import ok, override_kwargs
18
-
19
-
20
- class Rating(Propose, ABC):
21
- """A class that provides functionality to rate tasks based on a rating manual and score range.
22
-
23
- References:
24
- Lu X, Li J, Takeuchi K, et al. AHP-powered LLM reasoning for multi-criteria evaluation of open-ended responses[A/OL]. arXiv, 2024. DOI: 10.48550/arXiv.2410.01246.
25
- """
26
-
27
- async def rate_fine_grind(
28
- self,
29
- to_rate: str | List[str],
30
- rating_manual: Dict[str, str],
31
- score_range: Tuple[float, float],
32
- **kwargs: Unpack[ValidateKwargs[Dict[str, float]]],
33
- ) -> Dict[str, float] | List[Dict[str, float]] | List[Optional[Dict[str, float]]] | None:
34
- """Rate a given string based on a rating manual and score range.
35
-
36
- Args:
37
- to_rate (str): The string to be rated.
38
- rating_manual (Dict[str, str]): A dictionary containing the rating criteria.
39
- score_range (Tuple[float, float]): A tuple representing the valid score range.
40
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
41
-
42
- Returns:
43
- Dict[str, float]: A dictionary with the ratings for each dimension.
44
- """
45
- min_score, max_score = score_range
46
- tip = (max_score - min_score) / 9
47
-
48
- model = create_model( # pyright: ignore [reportCallIssue]
49
- "RatingResult",
50
- __base__=ProposedAble,
51
- __doc__=f"The rating result contains the scores against each criterion, with min_score={min_score} and max_score={max_score}.",
52
- **{ # pyright: ignore [reportArgumentType]
53
- criterion: (
54
- float,
55
- Field(
56
- ge=min_score,
57
- le=max_score,
58
- description=desc,
59
- examples=[round(min_score + tip * i, 2) for i in range(10)],
60
- ),
61
- )
62
- for criterion, desc in rating_manual.items()
63
- },
64
- )
65
-
66
- res = await self.propose(
67
- model,
68
- TEMPLATE_MANAGER.render_template(
69
- CONFIG.templates.rate_fine_grind_template,
70
- {"to_rate": to_rate, "min_score": min_score, "max_score": max_score},
71
- )
72
- if isinstance(to_rate, str)
73
- else [
74
- TEMPLATE_MANAGER.render_template(
75
- CONFIG.templates.rate_fine_grind_template,
76
- {"to_rate": t, "min_score": min_score, "max_score": max_score},
77
- )
78
- for t in to_rate
79
- ],
80
- **override_kwargs(kwargs, default=None),
81
- )
82
- default = kwargs.get("default")
83
- if isinstance(res, list):
84
- return [r.model_dump() if r else default for r in res]
85
- if res is None:
86
- return default
87
- return res.model_dump()
88
-
89
- @overload
90
- async def rate(
91
- self,
92
- to_rate: str,
93
- topic: str,
94
- criteria: Set[str],
95
- manual: Optional[Dict[str, str]] = None,
96
- score_range: Tuple[float, float] = (0.0, 1.0),
97
- **kwargs: Unpack[ValidateKwargs],
98
- ) -> Dict[str, float]: ...
99
-
100
- @overload
101
- async def rate(
102
- self,
103
- to_rate: List[str],
104
- topic: str,
105
- criteria: Set[str],
106
- manual: Optional[Dict[str, str]] = None,
107
- score_range: Tuple[float, float] = (0.0, 1.0),
108
- **kwargs: Unpack[ValidateKwargs],
109
- ) -> List[Dict[str, float]]: ...
110
-
111
- async def rate(
112
- self,
113
- to_rate: Union[str, List[str]],
114
- topic: str,
115
- criteria: Set[str],
116
- manual: Optional[Dict[str, str]] = None,
117
- score_range: Tuple[float, float] = (0.0, 1.0),
118
- **kwargs: Unpack[ValidateKwargs],
119
- ) -> Dict[str, float] | List[Dict[str, float]] | List[Optional[Dict[str, float]]] | None:
120
- """Rate a given string or a sequence of strings based on a topic, criteria, and score range.
121
-
122
- Args:
123
- to_rate (Union[str, List[str]]): The string or sequence of strings to be rated.
124
- topic (str): The topic related to the task.
125
- criteria (Set[str]): A set of criteria for rating.
126
- manual (Optional[Dict[str, str]]): A dictionary containing the rating criteria. If not provided, then this method will draft the criteria automatically.
127
- score_range (Tuple[float, float], optional): A tuple representing the valid score range. Defaults to (0.0, 1.0).
128
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
129
-
130
- Returns:
131
- Union[Dict[str, float], List[Dict[str, float]]]: A dictionary with the ratings for each criterion if a single string is provided,
132
- or a list of dictionaries with the ratings for each criterion if a sequence of strings is provided.
133
- """
134
- manual = (
135
- manual
136
- or await self.draft_rating_manual(topic, criteria, **override_kwargs(kwargs, default=None))
137
- or dict(zip(criteria, criteria, strict=True))
138
- )
139
-
140
- return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
141
-
142
- async def draft_rating_manual(
143
- self, topic: str, criteria: Optional[Set[str]] = None, **kwargs: Unpack[ValidateKwargs[Dict[str, str]]]
144
- ) -> Optional[Dict[str, str]]:
145
- """Drafts a rating manual based on a topic and dimensions.
146
-
147
- Args:
148
- topic (str): The topic for the rating manual.
149
- criteria (Optional[Set[str]], optional): A set of criteria for the rating manual. If not specified, then this method will draft the criteria automatically.
150
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
151
-
152
- Returns:
153
- Dict[str, str]: A dictionary representing the drafted rating manual.
154
- """
155
-
156
- def _validator(response: str) -> Dict[str, str] | None:
157
- if (
158
- (json_data := JsonCapture.validate_with(response, target_type=dict, elements_type=str)) is not None
159
- and json_data.keys() == criteria
160
- and all(isinstance(v, str) for v in json_data.values())
161
- ):
162
- return json_data
163
- return None
164
-
165
- criteria = criteria or await self.draft_rating_criteria(topic, **override_kwargs(dict(kwargs), default=None))
166
-
167
- if criteria is None:
168
- logger.error(f"Failed to draft rating criteria for topic {topic}")
169
- return None
170
-
171
- return await self.aask_validate(
172
- question=(
173
- TEMPLATE_MANAGER.render_template(
174
- CONFIG.templates.draft_rating_manual_template,
175
- {
176
- "topic": topic,
177
- "criteria": list(criteria),
178
- },
179
- )
180
- ),
181
- validator=_validator,
182
- **kwargs,
183
- )
184
-
185
- async def draft_rating_criteria(
186
- self,
187
- topic: str,
188
- criteria_count: NonNegativeInt = 0,
189
- **kwargs: Unpack[ValidateKwargs[Set[str]]],
190
- ) -> Optional[Set[str]]:
191
- """Drafts rating dimensions based on a topic.
192
-
193
- Args:
194
- topic (str): The topic for the rating dimensions.
195
- criteria_count (NonNegativeInt, optional): The number of dimensions to draft, 0 means no limit. Defaults to 0.
196
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
197
-
198
- Returns:
199
- Set[str]: A set of rating dimensions.
200
- """
201
- return await self.aask_validate(
202
- question=(
203
- TEMPLATE_MANAGER.render_template(
204
- CONFIG.templates.draft_rating_criteria_template,
205
- {
206
- "topic": topic,
207
- "criteria_count": criteria_count,
208
- },
209
- )
210
- ),
211
- validator=lambda resp: set(out)
212
- if (out := JsonCapture.validate_with(resp, list, str, criteria_count)) is not None
213
- else out,
214
- **kwargs,
215
- )
216
-
217
- async def draft_rating_criteria_from_examples(
218
- self,
219
- topic: str,
220
- examples: List[str],
221
- m: NonNegativeInt = 0,
222
- reasons_count: PositiveInt = 2,
223
- criteria_count: PositiveInt = 5,
224
- **kwargs: Unpack[ValidateKwargs],
225
- ) -> Optional[Set[str]]:
226
- """Asynchronously drafts a set of rating criteria based on provided examples.
227
-
228
- This function generates rating criteria by analyzing examples and extracting reasons for comparison,
229
- then further condensing these reasons into a specified number of criteria.
230
-
231
- Parameters:
232
- topic (str): The subject topic for the rating criteria.
233
- examples (List[str]): A list of example texts to analyze.
234
- m (NonNegativeInt, optional): The number of examples to sample from the provided list. Defaults to 0 (no sampling).
235
- reasons_count (PositiveInt, optional): The number of reasons to extract from each pair of examples. Defaults to 2.
236
- criteria_count (PositiveInt, optional): The final number of rating criteria to draft. Defaults to 5.
237
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for validation.
238
-
239
- Returns:
240
- Set[str]: A set of drafted rating criteria.
241
-
242
- Warnings:
243
- Since this function uses pairwise comparisons, it may not be suitable for large lists of examples.
244
- For that reason, consider using a smaller list of examples or setting `m` to a non-zero value smaller than the length of the examples.
245
- """
246
- if m:
247
- examples = sample(examples, m)
248
-
249
- # extract reasons from the comparison of ordered pairs of extracted from examples
250
- reasons = flatten(
251
- await self.aask_validate( # pyright: ignore [reportArgumentType]
252
- question=[
253
- TEMPLATE_MANAGER.render_template(
254
- CONFIG.templates.extract_reasons_from_examples_template,
255
- {
256
- "topic": topic,
257
- "first": pair[0],
258
- "second": pair[1],
259
- "reasons_count": reasons_count,
260
- },
261
- )
262
- for pair in (permutations(examples, 2))
263
- ],
264
- validator=lambda resp: JsonCapture.validate_with(
265
- resp, target_type=list, elements_type=str, length=reasons_count
266
- ),
267
- **kwargs,
268
- )
269
- )
270
- # extract certain mount of criteria from reasons according to their importance and frequency
271
- return await self.aask_validate(
272
- question=(
273
- TEMPLATE_MANAGER.render_template(
274
- CONFIG.templates.extract_criteria_from_reasons_template,
275
- {
276
- "topic": topic,
277
- "reasons": list(reasons),
278
- "criteria_count": criteria_count,
279
- },
280
- )
281
- ),
282
- validator=lambda resp: set(out)
283
- if (out := JsonCapture.validate_with(resp, target_type=list, elements_type=str, length=criteria_count))
284
- else None,
285
- **kwargs,
286
- )
287
-
288
- async def drafting_rating_weights_klee(
289
- self,
290
- topic: str,
291
- criteria: Set[str],
292
- **kwargs: Unpack[ValidateKwargs[float]],
293
- ) -> Dict[str, float]:
294
- """Drafts rating weights for a given topic and criteria using the Klee method.
295
-
296
- Args:
297
- topic (str): The topic for the rating weights.
298
- criteria (Set[str]): A set of criteria for the rating weights.
299
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
300
-
301
- Returns:
302
- Dict[str, float]: A dictionary representing the drafted rating weights for each criterion.
303
- """
304
- if len(criteria) < 2: # noqa: PLR2004
305
- raise ValueError("At least two criteria are required to draft rating weights")
306
-
307
- criteria_seq = list(criteria) # freeze the order
308
- windows = windowed(criteria_seq, 2)
309
-
310
- # get the importance multiplier indicating how important is second criterion compared to the first one
311
- relative_weights = await self.aask_validate(
312
- question=[
313
- TEMPLATE_MANAGER.render_template(
314
- CONFIG.templates.draft_rating_weights_klee_template,
315
- {
316
- "topic": topic,
317
- "first": pair[0],
318
- "second": pair[1],
319
- },
320
- )
321
- for pair in windows
322
- ],
323
- validator=lambda resp: JsonCapture.validate_with(resp, target_type=float),
324
- **kwargs,
325
- )
326
- if not all(relative_weights):
327
- raise ValueError(f"found illegal weight: {relative_weights}")
328
- weights = [1.0]
329
- for rw in relative_weights:
330
- weights.append(weights[-1] * rw) # pyright: ignore [reportOperatorIssue]
331
- total = sum(weights)
332
- return dict(zip(criteria_seq, [w / total for w in weights], strict=True))
333
-
334
- async def composite_score(
335
- self,
336
- topic: str,
337
- to_rate: List[str],
338
- criteria: Optional[Set[str]] = None,
339
- weights: Optional[Dict[str, float]] = None,
340
- manual: Optional[Dict[str, str]] = None,
341
- approx: bool = False,
342
- **kwargs: Unpack[ValidateKwargs[List[Dict[str, float]]]],
343
- ) -> List[float]:
344
- """Calculates the composite scores for a list of items based on a given topic and criteria.
345
-
346
- Args:
347
- topic (str): The topic for the rating.
348
- to_rate (List[str]): A list of strings to be rated.
349
- criteria (Optional[Set[str]]): A set of criteria for the rating. Defaults to None.
350
- weights (Optional[Dict[str, float]]): A dictionary of rating weights for each criterion. Defaults to None.
351
- manual (Optional[Dict[str, str]]): A dictionary of manual ratings for each item. Defaults to None.
352
- approx (bool): Whether to use approximate rating criteria. Defaults to False.
353
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
354
-
355
- Returns:
356
- List[float]: A list of composite scores for the items.
357
- """
358
- criteria = ok(
359
- criteria
360
- or (await self.draft_rating_criteria(topic, **override_kwargs(kwargs, default=None)) if approx else None)
361
- or await self.draft_rating_criteria_from_examples(topic, to_rate, **override_kwargs(kwargs, default=None))
362
- )
363
- weights = ok(
364
- weights or await self.drafting_rating_weights_klee(topic, criteria, **override_kwargs(kwargs, default=None))
365
- )
366
- logger.info(f"Criteria: {criteria}\nWeights: {weights}")
367
- ratings_seq = await self.rate(to_rate, topic, criteria, manual, **kwargs)
368
-
369
- return [sum(ratings[c] * weights[c] for c in criteria) for ratings in ratings_seq]
370
-
371
- @overload
372
- async def best(self, candidates: List[str], k: int = 1, **kwargs: Unpack[CompositeScoreKwargs]) -> List[str]: ...
373
-
374
- @overload
375
- async def best[T: Display](
376
- self, candidates: List[T], k: int = 1, **kwargs: Unpack[CompositeScoreKwargs]
377
- ) -> List[T]: ...
378
-
379
- async def best[T: Display](
380
- self, candidates: List[str] | List[T], k: int = 1, **kwargs: Unpack[CompositeScoreKwargs]
381
- ) -> Optional[List[str] | List[T]]:
382
- """Choose the best candidates from the list of candidates based on the composite score.
383
-
384
- Args:
385
- k (int): The number of best candidates to choose.
386
- candidates (List[str]): A list of candidates to choose from.
387
- **kwargs (CompositeScoreKwargs): Additional keyword arguments for the composite score calculation.
388
-
389
- Returns:
390
- List[str]: The best candidates.
391
- """
392
- if (leng := len(candidates)) == 0:
393
- logger.warning(f"No candidates, got {leng}, return None.")
394
- return None
395
-
396
- if leng == 1:
397
- logger.warning(f"Only one candidate, got {leng}, return it.")
398
- return candidates
399
- logger.info(f"Choose best {k} from {leng} candidates.")
400
-
401
- rating_seq = await self.composite_score(
402
- to_rate=[c.display() if isinstance(c, Display) else c for c in candidates], **kwargs
403
- )
404
- return [a[0] for a in sorted(zip(candidates, rating_seq, strict=True), key=lambda x: x[1], reverse=True)[:k]] # pyright: ignore [reportReturnType]
@@ -1,114 +0,0 @@
1
- """A module that provides functionality to rate tasks based on a rating manual and score range."""
2
-
3
- from abc import ABC
4
- from typing import Dict, Optional, Set, Unpack
5
-
6
- from fabricatio.capabilities.propose import Propose
7
- from fabricatio.capabilities.rating import Rating
8
- from fabricatio.models.extra.problem import Improvement
9
- from fabricatio.models.generic import Display, WithBriefing
10
- from fabricatio.models.kwargs_types import ReviewKwargs, ValidateKwargs
11
- from fabricatio.models.task import Task
12
- from fabricatio.rust import CONFIG, TEMPLATE_MANAGER
13
- from fabricatio.utils import ok
14
-
15
-
16
- class Review(Rating, Propose, ABC):
17
- """Class that provides functionality to review tasks and strings using a language model.
18
-
19
- This class extends GiveRating and Propose capabilities to analyze content,
20
- identify problems, and suggest solutions based on specified criteria.
21
-
22
- The review process can be applied to Task objects or plain strings with
23
- appropriate topic and criteria.
24
- """
25
-
26
- async def review_task[T](self, task: Task[T], **kwargs: Unpack[ReviewKwargs]) -> Optional[Improvement]:
27
- """Review a task using specified review criteria.
28
-
29
- This method analyzes a task object to identify problems and propose solutions
30
- based on the criteria provided in kwargs.
31
-
32
- Args:
33
- task (Task[T]): The task object to be reviewed.
34
- **kwargs (Unpack[ReviewKwargs]): Additional keyword arguments for the review process,
35
- including topic and optional criteria.
36
-
37
- Returns:
38
- Improvement[Task[T]]: A review result containing identified problems and proposed solutions,
39
- with a reference to the original task.
40
- """
41
- return await self.review_obj(task, **kwargs)
42
-
43
- async def review_string(
44
- self,
45
- input_text: str,
46
- topic: str,
47
- criteria: Optional[Set[str]] = None,
48
- rating_manual: Optional[Dict[str, str]] = None,
49
- **kwargs: Unpack[ValidateKwargs[Improvement]],
50
- ) -> Optional[Improvement]:
51
- """Review a string based on specified topic and criteria.
52
-
53
- This method analyzes a text string to identify problems and propose solutions
54
- based on the given topic and criteria.
55
-
56
- Args:
57
- input_text (str): The text content to be reviewed.
58
- topic (str): The subject topic for the review criteria.
59
- criteria (Optional[Set[str]], optional): A set of criteria for the review.
60
- If not provided, criteria will be drafted automatically. Defaults to None.
61
- rating_manual (Optional[Dict[str,str]], optional): A dictionary of rating criteria and their corresponding scores.
62
- **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
63
-
64
- Returns:
65
- Improvement: A review result containing identified problems and proposed solutions,
66
- with a reference to the original text.
67
- """
68
- default = None
69
- if "default" in kwargs:
70
- # this `default` is the default for the `propose` method
71
- default = kwargs.pop("default")
72
-
73
- criteria = ok(criteria or (await self.draft_rating_criteria(topic, **kwargs)), " No criteria could be use.")
74
- manual = rating_manual or await self.draft_rating_manual(topic, criteria, **kwargs)
75
-
76
- if default is not None:
77
- kwargs["default"] = default
78
- return await self.propose(
79
- Improvement,
80
- TEMPLATE_MANAGER.render_template(
81
- CONFIG.templates.review_string_template,
82
- {"text": input_text, "topic": topic, "criteria_manual": manual},
83
- ),
84
- **kwargs,
85
- )
86
-
87
- async def review_obj[M: (Display, WithBriefing)](
88
- self, obj: M, **kwargs: Unpack[ReviewKwargs[Improvement]]
89
- ) -> Optional[Improvement]:
90
- """Review an object that implements Display or WithBriefing interface.
91
-
92
- This method extracts displayable text from the object and performs a review
93
- based on the criteria provided in kwargs.
94
-
95
- Args:
96
- obj (M): The object to be reviewed, which must implement either Display or WithBriefing.
97
- **kwargs (Unpack[ReviewKwargs]): Additional keyword arguments for the review process,
98
- including topic and optional criteria.
99
-
100
- Raises:
101
- TypeError: If the object does not implement Display or WithBriefing.
102
-
103
- Returns:
104
- Improvement: A review result containing identified problems and proposed solutions,
105
- with a reference to the original object.
106
- """
107
- if isinstance(obj, Display):
108
- text_to_review = obj.display()
109
- elif isinstance(obj, WithBriefing):
110
- text_to_review = obj.briefing
111
- else:
112
- raise TypeError(f"Unsupported type for review: {type(obj)}")
113
-
114
- return await self.review_string(text_to_review, **kwargs)
@@ -1,113 +0,0 @@
1
- """A module for the task capabilities of the Fabricatio library."""
2
-
3
- from abc import ABC
4
- from types import CodeType
5
- from typing import Any, Dict, List, Optional, Tuple, Unpack
6
-
7
- import ujson
8
-
9
- from fabricatio.capabilities.propose import Propose
10
- from fabricatio.journal import logger
11
- from fabricatio.models.kwargs_types import ChooseKwargs, ValidateKwargs
12
- from fabricatio.models.task import Task
13
- from fabricatio.models.tool import Tool, ToolExecutor
14
- from fabricatio.models.usages import ToolBoxUsage
15
- from fabricatio.parser import JsonCapture, PythonCapture
16
- from fabricatio.rust import CONFIG, TEMPLATE_MANAGER
17
-
18
-
19
- class ProposeTask(Propose, ABC):
20
- """A class that proposes a task based on a prompt."""
21
-
22
- async def propose_task[T](
23
- self,
24
- prompt: str,
25
- **kwargs: Unpack[ValidateKwargs[Task[T]]],
26
- ) -> Optional[Task[T]]:
27
- """Asynchronously proposes a task based on a given prompt and parameters.
28
-
29
- Parameters:
30
- prompt: The prompt text for proposing a task, which is a string that must be provided.
31
- **kwargs: The keyword arguments for the LLM (Large Language Model) usage.
32
-
33
- Returns:
34
- A Task object based on the proposal result.
35
- """
36
- if not prompt:
37
- logger.error(err := "Prompt must be provided.")
38
- raise ValueError(err)
39
-
40
- return await self.propose(Task, prompt, **kwargs)
41
-
42
-
43
- class HandleTask(ToolBoxUsage,ABC):
44
- """A class that handles a task based on a task object."""
45
-
46
- async def draft_tool_usage_code(
47
- self,
48
- task: Task,
49
- tools: List[Tool],
50
- data: Dict[str, Any],
51
- **kwargs: Unpack[ValidateKwargs],
52
- ) -> Optional[Tuple[CodeType, List[str]]]:
53
- """Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
54
- logger.info(f"Drafting tool usage code for task: {task.briefing}")
55
-
56
- if not tools:
57
- err = "Tools must be provided to draft the tool usage code."
58
- logger.error(err)
59
- raise ValueError(err)
60
-
61
- def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
62
- if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
63
- to_extract := JsonCapture.convert_with(response, ujson.loads)
64
- ):
65
- return source, to_extract
66
-
67
- return None
68
-
69
- q = TEMPLATE_MANAGER.render_template(
70
- CONFIG.templates.draft_tool_usage_code_template,
71
- {
72
- "data_module_name": CONFIG.toolbox.data_module_name,
73
- "tool_module_name": CONFIG.toolbox.tool_module_name,
74
- "task": task.briefing,
75
- "deps": task.dependencies_prompt,
76
- "tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
77
- "data": data,
78
- },
79
- )
80
- logger.debug(f"Code Drafting Question: \n{q}")
81
- return await self.aask_validate(
82
- question=q,
83
- validator=_validator,
84
- **kwargs,
85
- )
86
-
87
- async def handle_fine_grind(
88
- self,
89
- task: Task,
90
- data: Dict[str, Any],
91
- box_choose_kwargs: Optional[ChooseKwargs] = None,
92
- tool_choose_kwargs: Optional[ChooseKwargs] = None,
93
- **kwargs: Unpack[ValidateKwargs],
94
- ) -> Optional[Tuple]:
95
- """Asynchronously handles a task based on a given task object and parameters."""
96
- logger.info(f"Handling task: \n{task.briefing}")
97
-
98
- tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
99
- logger.info(f"Gathered {[t.name for t in tools]}")
100
-
101
- if tools and (pack := await self.draft_tool_usage_code(task, tools, data, **kwargs)):
102
- executor = ToolExecutor(candidates=tools, data=data)
103
-
104
- code, to_extract = pack
105
- cxt = executor.execute(code)
106
- if to_extract:
107
- return tuple(cxt.get(k) for k in to_extract)
108
-
109
- return None
110
-
111
- async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
112
- """Asynchronously handles a task based on a given task object and parameters."""
113
- return await self.handle_fine_grind(task, data, **kwargs)