fabricatio 0.2.4.dev2__cp312-cp312-win_amd64.whl → 0.2.5__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. fabricatio/__init__.py +14 -5
  2. fabricatio/_rust.cp312-win_amd64.pyd +0 -0
  3. fabricatio/_rust.pyi +65 -16
  4. fabricatio/_rust_instances.py +2 -0
  5. fabricatio/actions/article.py +46 -14
  6. fabricatio/actions/output.py +21 -0
  7. fabricatio/actions/rag.py +1 -1
  8. fabricatio/capabilities/propose.py +14 -20
  9. fabricatio/capabilities/rag.py +85 -26
  10. fabricatio/capabilities/rating.py +59 -51
  11. fabricatio/capabilities/review.py +241 -0
  12. fabricatio/capabilities/task.py +7 -8
  13. fabricatio/config.py +36 -4
  14. fabricatio/fs/__init__.py +13 -1
  15. fabricatio/fs/curd.py +27 -8
  16. fabricatio/fs/readers.py +6 -3
  17. fabricatio/journal.py +1 -1
  18. fabricatio/models/action.py +6 -8
  19. fabricatio/models/events.py +6 -4
  20. fabricatio/models/extra.py +100 -25
  21. fabricatio/models/generic.py +56 -4
  22. fabricatio/models/kwargs_types.py +123 -35
  23. fabricatio/models/role.py +3 -3
  24. fabricatio/models/task.py +0 -14
  25. fabricatio/models/tool.py +7 -6
  26. fabricatio/models/usages.py +144 -101
  27. fabricatio/parser.py +26 -5
  28. fabricatio/toolboxes/__init__.py +1 -3
  29. fabricatio/toolboxes/fs.py +17 -1
  30. fabricatio/workflows/articles.py +10 -6
  31. fabricatio/workflows/rag.py +11 -0
  32. fabricatio-0.2.5.data/scripts/tdown.exe +0 -0
  33. {fabricatio-0.2.4.dev2.dist-info → fabricatio-0.2.5.dist-info}/METADATA +2 -1
  34. fabricatio-0.2.5.dist-info/RECORD +41 -0
  35. fabricatio/toolboxes/task.py +0 -6
  36. fabricatio-0.2.4.dev2.data/scripts/tdown.exe +0 -0
  37. fabricatio-0.2.4.dev2.dist-info/RECORD +0 -39
  38. {fabricatio-0.2.4.dev2.dist-info → fabricatio-0.2.5.dist-info}/WHEEL +0 -0
  39. {fabricatio-0.2.4.dev2.dist-info → fabricatio-0.2.5.dist-info}/licenses/LICENSE +0 -0
@@ -1,15 +1,14 @@
1
1
  """A module that provides functionality to rate tasks based on a rating manual and score range."""
2
2
 
3
- from asyncio import gather
4
3
  from itertools import permutations
5
- from typing import Dict, List, Set, Tuple, Union, Unpack, overload
4
+ from random import sample
5
+ from typing import Dict, List, Optional, Set, Tuple, Union, Unpack, overload
6
6
 
7
- import orjson
8
7
  from fabricatio._rust_instances import template_manager
9
8
  from fabricatio.config import configs
10
9
  from fabricatio.journal import logger
11
10
  from fabricatio.models.generic import WithBriefing
12
- from fabricatio.models.kwargs_types import GenerateKwargs, ValidateKwargs
11
+ from fabricatio.models.kwargs_types import ValidateKwargs
13
12
  from fabricatio.models.usages import LLMUsage
14
13
  from fabricatio.parser import JsonCapture
15
14
  from more_itertools import flatten, windowed
@@ -17,15 +16,19 @@ from pydantic import NonNegativeInt, PositiveInt
17
16
 
18
17
 
19
18
  class GiveRating(WithBriefing, LLMUsage):
20
- """A class that provides functionality to rate tasks based on a rating manual and score range."""
19
+ """A class that provides functionality to rate tasks based on a rating manual and score range.
20
+
21
+ References:
22
+ Lu X, Li J, Takeuchi K, et al. AHP-powered LLM reasoning for multi-criteria evaluation of open-ended responses[A/OL]. arXiv, 2024. DOI: 10.48550/arXiv.2410.01246.
23
+ """
21
24
 
22
25
  async def rate_fine_grind(
23
26
  self,
24
- to_rate: str,
27
+ to_rate: str | List[str],
25
28
  rating_manual: Dict[str, str],
26
29
  score_range: Tuple[float, float],
27
- **kwargs: Unpack[ValidateKwargs],
28
- ) -> Dict[str, float]:
30
+ **kwargs: Unpack[ValidateKwargs[Dict[str, float]]],
31
+ ) -> Optional[Dict[str, float] | List[Dict[str, float]]]:
29
32
  """Rate a given string based on a rating manual and score range.
30
33
 
31
34
  Args:
@@ -40,10 +43,8 @@ class GiveRating(WithBriefing, LLMUsage):
40
43
 
41
44
  def _validator(response: str) -> Dict[str, float] | None:
42
45
  if (
43
- (json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
44
- and isinstance(json_data, dict)
46
+ (json_data := JsonCapture.validate_with(response, dict, str))
45
47
  and json_data.keys() == rating_manual.keys()
46
- and all(isinstance(v, float) for v in json_data.values())
47
48
  and all(score_range[0] <= v <= score_range[1] for v in json_data.values())
48
49
  ):
49
50
  return json_data
@@ -61,9 +62,21 @@ class GiveRating(WithBriefing, LLMUsage):
61
62
  "rating_manual": rating_manual,
62
63
  },
63
64
  )
64
- ),
65
+ )
66
+ if isinstance(to_rate, str)
67
+ else [
68
+ template_manager.render_template(
69
+ configs.templates.rate_fine_grind_template,
70
+ {
71
+ "to_rate": item,
72
+ "min_score": score_range[0],
73
+ "max_score": score_range[1],
74
+ "rating_manual": rating_manual,
75
+ },
76
+ )
77
+ for item in to_rate
78
+ ],
65
79
  validator=_validator,
66
- system_message=f"# your personal briefing: \n{self.briefing}",
67
80
  **kwargs,
68
81
  )
69
82
 
@@ -94,7 +107,7 @@ class GiveRating(WithBriefing, LLMUsage):
94
107
  criteria: Set[str],
95
108
  score_range: Tuple[float, float] = (0.0, 1.0),
96
109
  **kwargs: Unpack[ValidateKwargs],
97
- ) -> Union[Dict[str, float], List[Dict[str, float]]]:
110
+ ) -> Optional[Dict[str, float] | List[Dict[str, float]]]:
98
111
  """Rate a given string or a sequence of strings based on a topic, criteria, and score range.
99
112
 
100
113
  Args:
@@ -108,16 +121,13 @@ class GiveRating(WithBriefing, LLMUsage):
108
121
  Union[Dict[str, float], List[Dict[str, float]]]: A dictionary with the ratings for each criterion if a single string is provided,
109
122
  or a list of dictionaries with the ratings for each criterion if a sequence of strings is provided.
110
123
  """
111
- manual = await self.draft_rating_manual(topic, criteria, **kwargs)
112
- if isinstance(to_rate, str):
113
- return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
114
- if isinstance(to_rate, list):
115
- return await gather(*[self.rate_fine_grind(item, manual, score_range, **kwargs) for item in to_rate])
116
- raise ValueError("to_rate must be a string or a list of strings")
124
+ manual = await self.draft_rating_manual(topic, criteria, **kwargs) or dict(zip(criteria, criteria, strict=True))
125
+
126
+ return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
117
127
 
118
128
  async def draft_rating_manual(
119
- self, topic: str, criteria: Set[str], **kwargs: Unpack[ValidateKwargs]
120
- ) -> Dict[str, str]:
129
+ self, topic: str, criteria: Set[str], **kwargs: Unpack[ValidateKwargs[Dict[str, str]]]
130
+ ) -> Optional[Dict[str, str]]:
121
131
  """Drafts a rating manual based on a topic and dimensions.
122
132
 
123
133
  Args:
@@ -149,16 +159,15 @@ class GiveRating(WithBriefing, LLMUsage):
149
159
  )
150
160
  ),
151
161
  validator=_validator,
152
- system_message=f"# your personal briefing: \n{self.briefing}",
153
- **kwargs,
162
+ **self.prepend(kwargs),
154
163
  )
155
164
 
156
165
  async def draft_rating_criteria(
157
166
  self,
158
167
  topic: str,
159
168
  criteria_count: NonNegativeInt = 0,
160
- **kwargs: Unpack[ValidateKwargs],
161
- ) -> Set[str]:
169
+ **kwargs: Unpack[ValidateKwargs[Set[str]]],
170
+ ) -> Optional[Set[str]]:
162
171
  """Drafts rating dimensions based on a topic.
163
172
 
164
173
  Args:
@@ -169,16 +178,6 @@ class GiveRating(WithBriefing, LLMUsage):
169
178
  Returns:
170
179
  Set[str]: A set of rating dimensions.
171
180
  """
172
-
173
- def _validator(response: str) -> Set[str] | None:
174
- if (
175
- json_data := JsonCapture.validate_with(
176
- response, target_type=list, elements_type=str, length=criteria_count
177
- )
178
- ) is not None:
179
- return set(json_data)
180
- return None
181
-
182
181
  return await self.aask_validate(
183
182
  question=(
184
183
  template_manager.render_template(
@@ -189,19 +188,21 @@ class GiveRating(WithBriefing, LLMUsage):
189
188
  },
190
189
  )
191
190
  ),
192
- validator=_validator,
193
- system_message=f"# your personal briefing: \n{self.briefing}",
194
- **kwargs,
191
+ validator=lambda resp: set(out)
192
+ if (out := JsonCapture.validate_with(resp, list, str, criteria_count)) is not None
193
+ else out,
194
+ **self.prepend(kwargs),
195
195
  )
196
196
 
197
197
  async def draft_rating_criteria_from_examples(
198
198
  self,
199
199
  topic: str,
200
200
  examples: List[str],
201
+ m: NonNegativeInt = 0,
201
202
  reasons_count: PositiveInt = 2,
202
203
  criteria_count: PositiveInt = 5,
203
204
  **kwargs: Unpack[ValidateKwargs],
204
- ) -> Set[str]:
205
+ ) -> Optional[Set[str]]:
205
206
  """Asynchronously drafts a set of rating criteria based on provided examples.
206
207
 
207
208
  This function generates rating criteria by analyzing examples and extracting reasons for comparison,
@@ -210,18 +211,25 @@ class GiveRating(WithBriefing, LLMUsage):
210
211
  Parameters:
211
212
  topic (str): The subject topic for the rating criteria.
212
213
  examples (List[str]): A list of example texts to analyze.
214
+ m (NonNegativeInt, optional): The number of examples to sample from the provided list. Defaults to 0 (no sampling).
213
215
  reasons_count (PositiveInt, optional): The number of reasons to extract from each pair of examples. Defaults to 2.
214
216
  criteria_count (PositiveInt, optional): The final number of rating criteria to draft. Defaults to 5.
215
217
  **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for validation.
216
218
 
217
219
  Returns:
218
220
  Set[str]: A set of drafted rating criteria.
221
+
222
+ Warnings:
223
+ Since this function uses pairwise comparisons, it may not be suitable for large lists of examples.
224
+ For that reason, consider using a smaller list of examples or setting `m` to a non-zero value smaller than the length of the examples.
219
225
  """
220
- kwargs = GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs)
226
+ if m:
227
+ examples = sample(examples, m)
228
+
221
229
  # extract reasons from the comparison of ordered pairs of extracted from examples
222
230
  reasons = flatten(
223
- await self.aask_validate_batch(
224
- questions=[
231
+ await self.aask_validate(
232
+ question=[
225
233
  template_manager.render_template(
226
234
  configs.templates.extract_reasons_from_examples_template,
227
235
  {
@@ -236,7 +244,7 @@ class GiveRating(WithBriefing, LLMUsage):
236
244
  validator=lambda resp: JsonCapture.validate_with(
237
245
  resp, target_type=list, elements_type=str, length=reasons_count
238
246
  ),
239
- **kwargs,
247
+ **self.prepend(kwargs),
240
248
  )
241
249
  )
242
250
  # extract certain mount of criteria from reasons according to their importance and frequency
@@ -261,7 +269,7 @@ class GiveRating(WithBriefing, LLMUsage):
261
269
  self,
262
270
  topic: str,
263
271
  criteria: Set[str],
264
- **kwargs: Unpack[ValidateKwargs],
272
+ **kwargs: Unpack[ValidateKwargs[float]],
265
273
  ) -> Dict[str, float]:
266
274
  """Drafts rating weights for a given topic and criteria using the Klee method.
267
275
 
@@ -276,12 +284,12 @@ class GiveRating(WithBriefing, LLMUsage):
276
284
  if len(criteria) < 2: # noqa: PLR2004
277
285
  raise ValueError("At least two criteria are required to draft rating weights")
278
286
 
279
- criteria = list(criteria) # freeze the order
280
- windows = windowed(criteria, 2)
287
+ criteria_seq = list(criteria) # freeze the order
288
+ windows = windowed(criteria_seq, 2)
281
289
 
282
290
  # get the importance multiplier indicating how important is second criterion compared to the first one
283
- relative_weights = await self.aask_validate_batch(
284
- questions=[
291
+ relative_weights = await self.aask_validate(
292
+ question=[
285
293
  template_manager.render_template(
286
294
  configs.templates.draft_rating_weights_klee_template,
287
295
  {
@@ -293,13 +301,13 @@ class GiveRating(WithBriefing, LLMUsage):
293
301
  for pair in windows
294
302
  ],
295
303
  validator=lambda resp: JsonCapture.validate_with(resp, target_type=float),
296
- **GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs),
304
+ **self.prepend(kwargs),
297
305
  )
298
306
  weights = [1]
299
307
  for rw in relative_weights:
300
308
  weights.append(weights[-1] * rw)
301
309
  total = sum(weights)
302
- return dict(zip(criteria, [w / total for w in weights], strict=True))
310
+ return dict(zip(criteria_seq, [w / total for w in weights], strict=True))
303
311
 
304
312
  async def composite_score(
305
313
  self,
@@ -0,0 +1,241 @@
1
+ """A module that provides functionality to rate tasks based on a rating manual and score range."""
2
+
3
+ from typing import List, Optional, Self, Set, Unpack, cast
4
+
5
+ from fabricatio import template_manager
6
+ from fabricatio.capabilities.propose import Propose
7
+ from fabricatio.capabilities.rating import GiveRating
8
+ from fabricatio.config import configs
9
+ from fabricatio.models.generic import Base, Display, ProposedAble, WithBriefing
10
+ from fabricatio.models.kwargs_types import ReviewKwargs, ValidateKwargs
11
+ from fabricatio.models.task import Task
12
+ from questionary import Choice, checkbox
13
+ from rich import print
14
+
15
+
16
+ class ProblemSolutions(Base):
17
+ """Represents a problem-solution pair identified during a review process.
18
+
19
+ This class encapsulates a single problem with its corresponding potential solutions,
20
+ providing a structured way to manage review findings.
21
+
22
+ Attributes:
23
+ problem (str): The problem statement identified during review.
24
+ solutions (List[str]): A collection of potential solutions to address the problem.
25
+ """
26
+
27
+ problem: str
28
+ """The problem identified in the review."""
29
+ solutions: List[str]
30
+ """A collection of potential solutions to address the problem."""
31
+
32
+ def update_problem(self, problem: str) -> Self:
33
+ """Update the problem description.
34
+
35
+ Args:
36
+ problem (str): The new problem description to replace the current one.
37
+
38
+ Returns:
39
+ Self: The current instance with updated problem description.
40
+ """
41
+ self.problem = problem
42
+ return self
43
+
44
+ def update_solutions(self, solutions: List[str]) -> Self:
45
+ """Update the list of potential solutions.
46
+
47
+ Args:
48
+ solutions (List[str]): The new collection of solutions to replace the current ones.
49
+
50
+ Returns:
51
+ Self: The current instance with updated solutions.
52
+ """
53
+ self.solutions = solutions
54
+ return self
55
+
56
+
57
+ class ReviewResult[T](ProposedAble, Display):
58
+ """Represents the outcome of a review process with identified problems and solutions.
59
+
60
+ This class maintains a structured collection of problems found during a review,
61
+ their proposed solutions, and a reference to the original reviewed object.
62
+
63
+ Attributes:
64
+ review_topic (str): The subject or focus area of the review.
65
+ problem_solutions (List[ProblemSolutions]): Collection of problems identified
66
+ during review along with their potential solutions.
67
+
68
+ Type Parameters:
69
+ T: The type of the object being reviewed.
70
+ """
71
+
72
+ review_topic: str
73
+ """The subject or focus area of the review."""
74
+
75
+ problem_solutions: List[ProblemSolutions]
76
+ """Collection of problems identified during review along with their potential solutions."""
77
+
78
+ _ref: T
79
+ """Reference to the original object that was reviewed."""
80
+
81
+ def update_topic(self, topic: str) -> Self:
82
+ """Update the review topic.
83
+
84
+ Args:
85
+ topic (str): The new topic to be associated with this review.
86
+
87
+ Returns:
88
+ Self: The current instance with updated review topic.
89
+ """
90
+ self.review_topic = topic
91
+ return self
92
+
93
+ def update_ref[K](self, ref: K) -> "ReviewResult[K]":
94
+ """Update the reference to the reviewed object.
95
+
96
+ Args:
97
+ ref (K): The new reference object to be associated with this review.
98
+
99
+ Returns:
100
+ ReviewResult[K]: The current instance with updated reference type.
101
+ """
102
+ self._ref = ref # pyright: ignore [reportAttributeAccessIssue]
103
+ return cast(ReviewResult[K], self)
104
+
105
+ def deref(self) -> T:
106
+ """Retrieve the referenced object that was reviewed.
107
+
108
+ Returns:
109
+ T: The original object that was reviewed.
110
+ """
111
+ return self._ref
112
+
113
+ async def supervisor_check(self, check_solutions: bool = False) -> Self:
114
+ """Perform an interactive review session to filter problems and solutions.
115
+
116
+ Presents an interactive prompt allowing a supervisor to select which
117
+ problems (and optionally solutions) should be retained in the final review.
118
+
119
+ Args:
120
+ check_solutions (bool, optional): When True, also prompts for filtering
121
+ individual solutions for each retained problem. Defaults to False.
122
+
123
+ Returns:
124
+ Self: The current instance with filtered problems and solutions.
125
+ """
126
+ if isinstance(self._ref, str):
127
+ display = self._ref
128
+ elif isinstance(self._ref, WithBriefing):
129
+ display = self._ref.briefing
130
+ elif isinstance(self._ref, Display):
131
+ display = self._ref.display()
132
+ else:
133
+ raise TypeError(f"Unsupported type for review: {type(self._ref)}")
134
+ # Choose the problems to retain
135
+ print(display)
136
+ chosen_ones: List[ProblemSolutions] = await checkbox(
137
+ f"Please choose the problems you want to retain.(Default: retain all)\n\t`{self.review_topic}`",
138
+ choices=[Choice(p.problem, p, checked=True) for p in self.problem_solutions],
139
+ ).ask_async()
140
+ if not check_solutions:
141
+ self.problem_solutions = chosen_ones
142
+ return self
143
+
144
+ # Choose the solutions to retain
145
+ for to_exam in chosen_ones:
146
+ to_exam.update_solutions(
147
+ await checkbox(
148
+ f"Please choose the solutions you want to retain.(Default: retain all)\n\t`{to_exam.problem}`",
149
+ choices=[Choice(s, s, checked=True) for s in to_exam.solutions],
150
+ ).ask_async()
151
+ )
152
+ return self
153
+
154
+
155
+ class Review(GiveRating, Propose):
156
+ """Class that provides functionality to review tasks and strings using a language model.
157
+
158
+ This class extends GiveRating and Propose capabilities to analyze content,
159
+ identify problems, and suggest solutions based on specified criteria.
160
+
161
+ The review process can be applied to Task objects or plain strings with
162
+ appropriate topic and criteria.
163
+ """
164
+
165
+ async def review_task[T](self, task: Task[T], **kwargs: Unpack[ReviewKwargs]) -> ReviewResult[Task[T]]:
166
+ """Review a task using specified review criteria.
167
+
168
+ This method analyzes a task object to identify problems and propose solutions
169
+ based on the criteria provided in kwargs.
170
+
171
+ Args:
172
+ task (Task[T]): The task object to be reviewed.
173
+ **kwargs (Unpack[ReviewKwargs]): Additional keyword arguments for the review process,
174
+ including topic and optional criteria.
175
+
176
+ Returns:
177
+ ReviewResult[Task[T]]: A review result containing identified problems and proposed solutions,
178
+ with a reference to the original task.
179
+ """
180
+ return cast(ReviewResult[Task[T]], await self.review_obj(task, **kwargs))
181
+
182
+ async def review_string(
183
+ self,
184
+ text: str,
185
+ topic: str,
186
+ criteria: Optional[Set[str]] = None,
187
+ **kwargs: Unpack[ValidateKwargs[ReviewResult[str]]],
188
+ ) -> ReviewResult[str]:
189
+ """Review a string based on specified topic and criteria.
190
+
191
+ This method analyzes a text string to identify problems and propose solutions
192
+ based on the given topic and criteria.
193
+
194
+ Args:
195
+ text (str): The text content to be reviewed.
196
+ topic (str): The subject topic for the review criteria.
197
+ criteria (Optional[Set[str]], optional): A set of criteria for the review.
198
+ If not provided, criteria will be drafted automatically. Defaults to None.
199
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
200
+
201
+ Returns:
202
+ ReviewResult[str]: A review result containing identified problems and proposed solutions,
203
+ with a reference to the original text.
204
+ """
205
+ criteria = criteria or (await self.draft_rating_criteria(topic))
206
+ manual = await self.draft_rating_manual(topic, criteria)
207
+ res: ReviewResult[str] = await self.propose(
208
+ ReviewResult,
209
+ template_manager.render_template(
210
+ configs.templates.review_string_template, {"text": text, "topic": topic, "criteria_manual": manual}
211
+ ),
212
+ **kwargs,
213
+ )
214
+ return res.update_ref(text).update_topic(topic)
215
+
216
+ async def review_obj[M: (Display, WithBriefing)](self, obj: M, **kwargs: Unpack[ReviewKwargs]) -> ReviewResult[M]:
217
+ """Review an object that implements Display or WithBriefing interface.
218
+
219
+ This method extracts displayable text from the object and performs a review
220
+ based on the criteria provided in kwargs.
221
+
222
+ Args:
223
+ obj (M): The object to be reviewed, which must implement either Display or WithBriefing.
224
+ **kwargs (Unpack[ReviewKwargs]): Additional keyword arguments for the review process,
225
+ including topic and optional criteria.
226
+
227
+ Raises:
228
+ TypeError: If the object does not implement Display or WithBriefing.
229
+
230
+ Returns:
231
+ ReviewResult[M]: A review result containing identified problems and proposed solutions,
232
+ with a reference to the original object.
233
+ """
234
+ if isinstance(obj, Display):
235
+ text = obj.display()
236
+ elif isinstance(obj, WithBriefing):
237
+ text = obj.briefing
238
+ else:
239
+ raise TypeError(f"Unsupported type for review: {type(obj)}")
240
+
241
+ return (await self.review_string(text, **kwargs)).update_ref(obj)
@@ -1,7 +1,7 @@
1
1
  """A module for the task capabilities of the Fabricatio library."""
2
2
 
3
3
  from types import CodeType
4
- from typing import Any, Dict, List, Optional, Tuple, Unpack
4
+ from typing import Any, Dict, List, Optional, Tuple, Unpack, cast
5
5
 
6
6
  import orjson
7
7
  from fabricatio._rust_instances import template_manager
@@ -22,7 +22,7 @@ class ProposeTask(WithBriefing, Propose):
22
22
  async def propose_task[T](
23
23
  self,
24
24
  prompt: str,
25
- **kwargs: Unpack[ValidateKwargs],
25
+ **kwargs: Unpack[ValidateKwargs[Task[T]]],
26
26
  ) -> Task[T]:
27
27
  """Asynchronously proposes a task based on a given prompt and parameters.
28
28
 
@@ -37,7 +37,7 @@ class ProposeTask(WithBriefing, Propose):
37
37
  logger.error(err := f"{self.name}: Prompt must be provided.")
38
38
  raise ValueError(err)
39
39
 
40
- return await self.propose(Task, prompt, system_message=f"# your personal briefing: \n{self.briefing}", **kwargs)
40
+ return await self.propose(Task, prompt, **self.prepend(cast(Dict[str, Any], kwargs)))
41
41
 
42
42
 
43
43
  class HandleTask(WithBriefing, ToolBoxUsage):
@@ -49,7 +49,7 @@ class HandleTask(WithBriefing, ToolBoxUsage):
49
49
  tools: List[Tool],
50
50
  data: Dict[str, Any],
51
51
  **kwargs: Unpack[ValidateKwargs],
52
- ) -> Tuple[CodeType, List[str]]:
52
+ ) -> Optional[Tuple[CodeType, List[str]]]:
53
53
  """Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
54
54
  logger.info(f"Drafting tool usage code for task: {task.briefing}")
55
55
 
@@ -81,8 +81,7 @@ class HandleTask(WithBriefing, ToolBoxUsage):
81
81
  return await self.aask_validate(
82
82
  question=q,
83
83
  validator=_validator,
84
- system_message=f"# your personal briefing: \n{self.briefing}",
85
- **kwargs,
84
+ **self.prepend(cast(Dict[str, Any], kwargs)),
86
85
  )
87
86
 
88
87
  async def handle_fin_grind(
@@ -99,10 +98,10 @@ class HandleTask(WithBriefing, ToolBoxUsage):
99
98
  tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
100
99
  logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
101
100
 
102
- if tools:
101
+ if tools and (pack := await self.draft_tool_usage_code(task, tools, data, **kwargs)):
103
102
  executor = ToolExecutor(candidates=tools, data=data)
104
- code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
105
103
 
104
+ code, to_extract = pack
106
105
  cxt = executor.execute(code)
107
106
  if to_extract:
108
107
  return tuple(cxt.get(k) for k in to_extract)
fabricatio/config.py CHANGED
@@ -1,8 +1,10 @@
1
1
  """Configuration module for the Fabricatio application."""
2
2
 
3
+ from pathlib import Path
3
4
  from typing import List, Literal, Optional
4
5
 
5
6
  from appdirs import user_config_dir
7
+ from litellm.types.caching import LiteLLMCacheType
6
8
  from pydantic import (
7
9
  BaseModel,
8
10
  ConfigDict,
@@ -25,6 +27,8 @@ from pydantic_settings import (
25
27
  TomlConfigSettingsSource,
26
28
  )
27
29
 
30
+ from fabricatio.models.kwargs_types import CacheKwargs
31
+
28
32
  ROAMING_DIR = user_config_dir("fabricatio", "", roaming=True)
29
33
 
30
34
 
@@ -64,7 +68,7 @@ class LLMConfig(BaseModel):
64
68
  temperature: NonNegativeFloat = Field(default=1.0)
65
69
  """The temperature of the LLM model. Controls randomness in generation. Set to 1.0 as per request."""
66
70
 
67
- stop_sign: str | List[str] = Field(default=("\n\n\n", "User:"))
71
+ stop_sign: str | List[str] = Field(default_factory=lambda :["\n\n\n", "User:"])
68
72
  """The stop sign of the LLM model. No default stop sign specified."""
69
73
 
70
74
  top_p: NonNegativeFloat = Field(default=0.35)
@@ -140,7 +144,7 @@ class DebugConfig(BaseModel):
140
144
  log_level: Literal["DEBUG", "INFO", "SUCCESS", "WARNING", "ERROR", "CRITICAL"] = Field(default="INFO")
141
145
  """The log level of the application."""
142
146
 
143
- log_file: FilePath = Field(default=rf"{ROAMING_DIR}\fabricatio.log")
147
+ log_file: FilePath = Field(default=Path(rf"{ROAMING_DIR}\fabricatio.log"))
144
148
  """The log file of the application."""
145
149
 
146
150
  rotation: int = Field(default=1)
@@ -158,7 +162,7 @@ class TemplateConfig(BaseModel):
158
162
 
159
163
  model_config = ConfigDict(use_attribute_docstrings=True)
160
164
  template_dir: List[DirectoryPath] = Field(
161
- default_factory=lambda: [DirectoryPath(r".\templates"), DirectoryPath(rf"{ROAMING_DIR}\templates")]
165
+ default_factory=lambda: [Path(r".\templates"), Path(rf"{ROAMING_DIR}\templates")]
162
166
  )
163
167
  """The directory containing the templates."""
164
168
  active_loading: bool = Field(default=False)
@@ -209,6 +213,15 @@ class TemplateConfig(BaseModel):
209
213
  liststr_template: str = Field(default="liststr")
210
214
  """The name of the liststr template which will be used to display a list of strings."""
211
215
 
216
+ refined_query_template: str = Field(default="refined_query")
217
+ """The name of the refined query template which will be used to refine a query."""
218
+
219
+ pathstr_template: str = Field(default="pathstr")
220
+ """The name of the pathstr template which will be used to acquire a path of strings."""
221
+
222
+ review_string_template: str = Field(default="review_string")
223
+ """The name of the review string template which will be used to review a string."""
224
+
212
225
 
213
226
  class MagikaConfig(BaseModel):
214
227
  """Magika configuration class."""
@@ -222,12 +235,15 @@ class GeneralConfig(BaseModel):
222
235
  """Global configuration class."""
223
236
 
224
237
  model_config = ConfigDict(use_attribute_docstrings=True)
225
- workspace: DirectoryPath = Field(default=DirectoryPath(r"."))
238
+ workspace: DirectoryPath = Field(default=Path())
226
239
  """The workspace directory for the application."""
227
240
 
228
241
  confirm_on_ops: bool = Field(default=True)
229
242
  """Whether to confirm on operations."""
230
243
 
244
+ use_json_repair: bool = Field(default=True)
245
+ """Whether to use JSON repair."""
246
+
231
247
 
232
248
  class ToolBoxConfig(BaseModel):
233
249
  """Toolbox configuration class."""
@@ -256,6 +272,19 @@ class RagConfig(BaseModel):
256
272
  """The dimensions of the Milvus server."""
257
273
 
258
274
 
275
+ class CacheConfig(BaseModel):
276
+ """cache configuration class, uses litellm as cache backend. more info see https://docs.litellm.ai/docs/caching/all_caches."""
277
+
278
+ model_config = ConfigDict(use_attribute_docstrings=True)
279
+
280
+ type: Optional[LiteLLMCacheType] = None
281
+ """The type of cache to use. If None, the default cache type will be used."""
282
+ params: CacheKwargs = Field(default_factory=CacheKwargs)
283
+ """The parameters for the cache. If type is None, the default parameters will be used."""
284
+ enabled: bool = Field(default=False)
285
+ """Whether to enable cache."""
286
+
287
+
259
288
  class Settings(BaseSettings):
260
289
  """Application settings class.
261
290
 
@@ -305,6 +334,9 @@ class Settings(BaseSettings):
305
334
  rag: RagConfig = Field(default_factory=RagConfig)
306
335
  """RAG Configuration"""
307
336
 
337
+ cache: CacheConfig = Field(default_factory=CacheConfig)
338
+ """Cache Configuration"""
339
+
308
340
  @classmethod
309
341
  def settings_customise_sources(
310
342
  cls,
fabricatio/fs/__init__.py CHANGED
@@ -1,14 +1,26 @@
1
1
  """FileSystem manipulation module for Fabricatio."""
2
2
 
3
- from fabricatio.fs.curd import copy_file, create_directory, delete_directory, delete_file, dump_text, move_file, tree
3
+ from fabricatio.fs.curd import (
4
+ absolute_path,
5
+ copy_file,
6
+ create_directory,
7
+ delete_directory,
8
+ delete_file,
9
+ dump_text,
10
+ gather_files,
11
+ move_file,
12
+ tree,
13
+ )
4
14
  from fabricatio.fs.readers import magika, safe_json_read, safe_text_read
5
15
 
6
16
  __all__ = [
17
+ "absolute_path",
7
18
  "copy_file",
8
19
  "create_directory",
9
20
  "delete_directory",
10
21
  "delete_file",
11
22
  "dump_text",
23
+ "gather_files",
12
24
  "magika",
13
25
  "move_file",
14
26
  "safe_json_read",