fabricatio 0.2.1.dev3__tar.gz → 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/PKG-INFO +2 -2
  2. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/make_a_rating/rating.py +56 -12
  3. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/pyproject.toml +2 -2
  4. fabricatio-0.2.2/python/fabricatio/capabilities/rating.py +355 -0
  5. fabricatio-0.2.2/python/fabricatio/capabilities/task.py +131 -0
  6. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/config.py +11 -2
  7. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/action.py +2 -1
  8. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/kwargs_types.py +7 -2
  9. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/role.py +2 -1
  10. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/task.py +5 -5
  11. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/usages.py +41 -45
  12. fabricatio-0.2.1.dev3/templates/built-in/draft_rating_dimensions.hbs → fabricatio-0.2.2/templates/built-in/draft_rating_criteria.hbs +3 -3
  13. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/draft_rating_manual.hbs +5 -4
  14. fabricatio-0.2.2/templates/built-in/draft_rating_weights_klee.hbs +23 -0
  15. fabricatio-0.2.2/templates/built-in/extract_criteria_from_reasons.hbs +24 -0
  16. fabricatio-0.2.2/templates/built-in/extract_reasons_from_examples.hbs +18 -0
  17. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/propose_task.hbs +3 -2
  18. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/rate_fine_grind.hbs +4 -4
  19. fabricatio-0.2.2/templates.tar.gz +0 -0
  20. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_models/test_advanced.py +6 -3
  21. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/uv.lock +12 -21
  22. fabricatio-0.2.1.dev3/python/fabricatio/models/advanced.py +0 -289
  23. fabricatio-0.2.1.dev3/templates.tar.gz +0 -0
  24. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/.github/workflows/build-package.yaml +0 -0
  25. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/.github/workflows/ruff.yaml +0 -0
  26. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/.github/workflows/tests.yaml +0 -0
  27. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/.gitignore +0 -0
  28. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/.python-version +0 -0
  29. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/Cargo.lock +0 -0
  30. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/Cargo.toml +0 -0
  31. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/LICENSE +0 -0
  32. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/Makefile +0 -0
  33. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/README.md +0 -0
  34. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/llm_usages/llm_usage.py +0 -0
  35. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/make_diary/commits.json +0 -0
  36. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/make_diary/diary.py +0 -0
  37. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/minor/hello_fabricatio.py +0 -0
  38. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/propose_task/propose.py +0 -0
  39. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/simple_chat/chat.py +0 -0
  40. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/examples/task_handle/handle_task.py +0 -0
  41. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/__init__.py +0 -0
  42. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/_rust.pyi +0 -0
  43. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/_rust_instances.py +0 -0
  44. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/actions/__init__.py +0 -0
  45. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/actions/communication.py +0 -0
  46. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/actions/transmission.py +0 -0
  47. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/core.py +0 -0
  48. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/decorators.py +0 -0
  49. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/fs/__init__.py +0 -0
  50. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/fs/curd.py +0 -0
  51. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/fs/readers.py +0 -0
  52. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/journal.py +0 -0
  53. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/events.py +0 -0
  54. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/generic.py +0 -0
  55. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/tool.py +0 -0
  56. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/models/utils.py +0 -0
  57. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/parser.py +0 -0
  58. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/py.typed +0 -0
  59. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/toolboxes/__init__.py +0 -0
  60. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/toolboxes/arithmetic.py +0 -0
  61. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/toolboxes/fs.py +0 -0
  62. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/python/fabricatio/toolboxes/task.py +0 -0
  63. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/src/hash.rs +0 -0
  64. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/src/lib.rs +0 -0
  65. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/src/templates.rs +0 -0
  66. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/binary-exploitation-ctf-solver.hbs +0 -0
  67. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/claude-xml.hbs +0 -0
  68. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/clean-up-code.hbs +0 -0
  69. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/cryptography-ctf-solver.hbs +0 -0
  70. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/dependencies.hbs +0 -0
  71. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/document-the-code.hbs +0 -0
  72. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/draft_tool_usage_code.hbs +0 -0
  73. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/find-security-vulnerabilities.hbs +0 -0
  74. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/fix-bugs.hbs +0 -0
  75. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/improve-performance.hbs +0 -0
  76. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/make_choice.hbs +0 -0
  77. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/make_judgment.hbs +0 -0
  78. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/refactor.hbs +0 -0
  79. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/reverse-engineering-ctf-solver.hbs +0 -0
  80. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/task_briefing.hbs +0 -0
  81. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/web-ctf-solver.hbs +0 -0
  82. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/write-git-commit.hbs +0 -0
  83. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/write-github-pull-request.hbs +0 -0
  84. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/templates/built-in/write-github-readme.hbs +0 -0
  85. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_config.py +0 -0
  86. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_models/test_action.py +0 -0
  87. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_models/test_generic.py +0 -0
  88. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_models/test_role.py +0 -0
  89. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_models/test_task.py +0 -0
  90. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_models/test_tool.py +0 -0
  91. {fabricatio-0.2.1.dev3 → fabricatio-0.2.2}/tests/test_models/test_usages.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fabricatio
3
- Version: 0.2.1.dev3
3
+ Version: 0.2.2
4
4
  Classifier: License :: OSI Approved :: MIT License
5
5
  Classifier: Programming Language :: Rust
6
6
  Classifier: Programming Language :: Python :: 3.12
@@ -15,6 +15,7 @@ Requires-Dist: gitpython>=3.1.44
15
15
  Requires-Dist: litellm>=1.60.0
16
16
  Requires-Dist: loguru>=0.7.3
17
17
  Requires-Dist: magika>=0.5.1
18
+ Requires-Dist: more-itertools>=10.6.0
18
19
  Requires-Dist: orjson>=3.10.15
19
20
  Requires-Dist: pydantic>=2.10.6
20
21
  Requires-Dist: pydantic-settings>=2.7.1
@@ -22,7 +23,6 @@ Requires-Dist: pymitter>=1.0.0
22
23
  Requires-Dist: questionary>=2.1.0
23
24
  Requires-Dist: regex>=2024.11.6
24
25
  Requires-Dist: rich>=13.9.4
25
- Requires-Dist: faiss-cpu>=1.10.0 ; extra == 'rag'
26
26
  Requires-Dist: pymilvus>=2.5.4 ; extra == 'rag'
27
27
  Requires-Dist: fabricatio[rag] ; extra == 'full'
28
28
  Provides-Extra: rag
@@ -15,17 +15,13 @@ class Rate(Action):
15
15
  name: str = "rate"
16
16
  output_key: str = "task_output"
17
17
 
18
- async def _execute(self, to_rate: List[str], rate_topic: str, dimensions: Set[str], **_) -> [Dict[str, float]]:
18
+ async def _execute(self, to_rate: List[str], rate_topic: str, criteria: Set[str], **_) -> List[Dict[str, float]]:
19
+ logger.info(f"Rating the: \n{to_rate}")
19
20
  """Rate the task."""
20
- return await asyncio.gather(
21
- *[
22
- self.rate(
23
- target,
24
- rate_topic,
25
- dimensions,
26
- )
27
- for target in to_rate
28
- ]
21
+ return await self.rate(
22
+ to_rate,
23
+ rate_topic,
24
+ criteria,
29
25
  )
30
26
 
31
27
 
@@ -56,6 +52,32 @@ class WhatToRate(Action):
56
52
  )
57
53
 
58
54
 
55
+ class MakeCriteria(Action):
56
+ """Make criteria for rating."""
57
+
58
+ name: str = "make criteria"
59
+ output_key: str = "criteria"
60
+
61
+ async def _execute(self, rate_topic: str, to_rate: List[str], **cxt: Unpack) -> Set[str]:
62
+ criteria = await self.draft_rating_criteria_from_examples(rate_topic, to_rate)
63
+ logger.info(f"Criteria: \n{criteria}")
64
+ return set(criteria)
65
+
66
+
67
+ class MakeCompositeScore(Action):
68
+ """Make a composite score."""
69
+
70
+ name: str = "make composite score"
71
+
72
+ output_key: str = "task_output"
73
+
74
+ async def _execute(self, rate_topic: str, to_rate: List[str], **cxt: Unpack) -> List[float]:
75
+ return await self.composite_score(
76
+ rate_topic,
77
+ to_rate,
78
+ )
79
+
80
+
59
81
  async def main() -> None:
60
82
  """Main function."""
61
83
  role = Role(
@@ -67,18 +89,40 @@ async def main() -> None:
67
89
  steps=(WhatToRate, Rate),
68
90
  extra_init_context={
69
91
  "rate_topic": "If this food is cheap and delicious",
70
- "dimensions": {"taste", "price", "quality", "safety", "healthiness", "freshness"},
92
+ "criteria": {"taste", "price", "quality", "safety", "healthiness"},
93
+ },
94
+ ),
95
+ Event.instantiate_from("make_criteria_for_food").push_wildcard().push("pending"): WorkFlow(
96
+ name="Make criteria for food",
97
+ steps=(WhatToRate, MakeCriteria, Rate),
98
+ extra_init_context={
99
+ "rate_topic": "if the food is 'good'",
100
+ },
101
+ ),
102
+ Event.instantiate_from("make_composite_score").push_wildcard().push("pending"): WorkFlow(
103
+ name="Make composite score",
104
+ steps=(WhatToRate, MakeCompositeScore),
105
+ extra_init_context={
106
+ "rate_topic": "if the food is 'good'",
71
107
  },
72
108
  ),
73
109
  },
74
110
  )
75
111
  task = await role.propose(
76
- "rate for rotten apple, ripen banana, fresh orange, giga-burger, smelly pizza with flies on it, and a boiling instant coffee",
112
+ "rate these food, so that i can decide what to eat today. choco cake, strawberry icecream, giga burger, cup of coffee, rotten bread from the trash bin, and a salty of fruit salad",
77
113
  )
78
114
  rating = await task.move_to("rate_food").delegate()
79
115
 
80
116
  logger.success(f"Result: \n{rating}")
81
117
 
118
+ generated_criteria = await task.move_to("make_criteria_for_food").delegate()
119
+
120
+ logger.success(f"Generated Criteria: \n{generated_criteria}")
121
+
122
+ composite_score = await task.move_to("make_composite_score").delegate()
123
+
124
+ logger.success(f"Composite Score: \n{composite_score}")
125
+
82
126
 
83
127
  if __name__ == "__main__":
84
128
  asyncio.run(main())
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "fabricatio"
3
- version = "0.2.1-dev3"
3
+ version = "0.2.2"
4
4
  description = "A LLM multi-agent framework."
5
5
  readme = "README.md"
6
6
  license = { file = "LICENSE" }
@@ -34,6 +34,7 @@ dependencies = [
34
34
  "litellm>=1.60.0",
35
35
  "loguru>=0.7.3",
36
36
  "magika>=0.5.1",
37
+ "more-itertools>=10.6.0",
37
38
  "orjson>=3.10.15",
38
39
  "pydantic>=2.10.6",
39
40
  "pydantic-settings>=2.7.1",
@@ -76,7 +77,6 @@ dev = [
76
77
 
77
78
  [project.optional-dependencies]
78
79
  rag = [
79
- "faiss-cpu>=1.10.0",
80
80
  "pymilvus>=2.5.4",
81
81
  ]
82
82
 
@@ -0,0 +1,355 @@
1
+ """A module that provides functionality to rate tasks based on a rating manual and score range."""
2
+
3
+ from asyncio import gather
4
+ from itertools import permutations
5
+ from typing import Dict, List, Set, Tuple, Union, Unpack, overload
6
+
7
+ import orjson
8
+ from fabricatio._rust_instances import template_manager
9
+ from fabricatio.config import configs
10
+ from fabricatio.journal import logger
11
+ from fabricatio.models.generic import WithBriefing
12
+ from fabricatio.models.kwargs_types import GenerateKwargs, ValidateKwargs
13
+ from fabricatio.models.usages import LLMUsage
14
+ from fabricatio.parser import JsonCapture
15
+ from more_itertools import flatten, windowed
16
+ from pydantic import NonNegativeInt, PositiveInt
17
+
18
+
19
+ class GiveRating(WithBriefing, LLMUsage):
20
+ """A class that provides functionality to rate tasks based on a rating manual and score range."""
21
+
22
+ async def rate_fine_grind(
23
+ self,
24
+ to_rate: str,
25
+ rating_manual: Dict[str, str],
26
+ score_range: Tuple[float, float],
27
+ **kwargs: Unpack[ValidateKwargs],
28
+ ) -> Dict[str, float]:
29
+ """Rate a given string based on a rating manual and score range.
30
+
31
+ Args:
32
+ to_rate (str): The string to be rated.
33
+ rating_manual (Dict[str, str]): A dictionary containing the rating criteria.
34
+ score_range (Tuple[float, float]): A tuple representing the valid score range.
35
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
36
+
37
+ Returns:
38
+ Dict[str, float]: A dictionary with the ratings for each dimension.
39
+ """
40
+
41
+ def _validator(response: str) -> Dict[str, float] | None:
42
+ if (
43
+ (json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
44
+ and isinstance(json_data, dict)
45
+ and json_data.keys() == rating_manual.keys()
46
+ and all(isinstance(v, float) for v in json_data.values())
47
+ and all(score_range[0] <= v <= score_range[1] for v in json_data.values())
48
+ ):
49
+ return json_data
50
+ return None
51
+
52
+ logger.info(f"Rating for {to_rate}")
53
+ return await self.aask_validate(
54
+ question=(
55
+ template_manager.render_template(
56
+ configs.templates.rate_fine_grind_template,
57
+ {
58
+ "to_rate": to_rate,
59
+ "min_score": score_range[0],
60
+ "max_score": score_range[1],
61
+ "rating_manual": rating_manual,
62
+ },
63
+ )
64
+ ),
65
+ validator=_validator,
66
+ system_message=f"# your personal briefing: \n{self.briefing}",
67
+ **kwargs,
68
+ )
69
+
70
+ @overload
71
+ async def rate(
72
+ self,
73
+ to_rate: str,
74
+ topic: str,
75
+ criteria: Set[str],
76
+ score_range: Tuple[float, float] = (0.0, 1.0),
77
+ **kwargs: Unpack[ValidateKwargs],
78
+ ) -> Dict[str, float]: ...
79
+
80
+ @overload
81
+ async def rate(
82
+ self,
83
+ to_rate: List[str],
84
+ topic: str,
85
+ criteria: Set[str],
86
+ score_range: Tuple[float, float] = (0.0, 1.0),
87
+ **kwargs: Unpack[ValidateKwargs],
88
+ ) -> List[Dict[str, float]]: ...
89
+
90
+ async def rate(
91
+ self,
92
+ to_rate: Union[str, List[str]],
93
+ topic: str,
94
+ criteria: Set[str],
95
+ score_range: Tuple[float, float] = (0.0, 1.0),
96
+ **kwargs: Unpack[ValidateKwargs],
97
+ ) -> Union[Dict[str, float], List[Dict[str, float]]]:
98
+ """Rate a given string or a sequence of strings based on a topic, criteria, and score range.
99
+
100
+ Args:
101
+ to_rate (Union[str, List[str]]): The string or sequence of strings to be rated.
102
+ topic (str): The topic related to the task.
103
+ criteria (Set[str]): A set of criteria for rating.
104
+ score_range (Tuple[float, float], optional): A tuple representing the valid score range. Defaults to (0.0, 1.0).
105
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
106
+
107
+ Returns:
108
+ Union[Dict[str, float], List[Dict[str, float]]]: A dictionary with the ratings for each criterion if a single string is provided,
109
+ or a list of dictionaries with the ratings for each criterion if a sequence of strings is provided.
110
+ """
111
+ manual = await self.draft_rating_manual(topic, criteria, **kwargs)
112
+ if isinstance(to_rate, str):
113
+ return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
114
+ if isinstance(to_rate, list):
115
+ return await gather(*[self.rate_fine_grind(item, manual, score_range, **kwargs) for item in to_rate])
116
+ raise ValueError("to_rate must be a string or a list of strings")
117
+
118
+ async def draft_rating_manual(
119
+ self, topic: str, criteria: Set[str], **kwargs: Unpack[ValidateKwargs]
120
+ ) -> Dict[str, str]:
121
+ """Drafts a rating manual based on a topic and dimensions.
122
+
123
+ Args:
124
+ topic (str): The topic for the rating manual.
125
+ criteria (Set[str]): A set of dimensions for the rating manual.
126
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
127
+
128
+ Returns:
129
+ Dict[str, str]: A dictionary representing the drafted rating manual.
130
+ """
131
+
132
+ def _validator(response: str) -> Dict[str, str] | None:
133
+ if (
134
+ (json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
135
+ and isinstance(json_data, dict)
136
+ and json_data.keys() == criteria
137
+ and all(isinstance(v, str) for v in json_data.values())
138
+ ):
139
+ return json_data
140
+ return None
141
+
142
+ return await self.aask_validate(
143
+ question=(
144
+ template_manager.render_template(
145
+ configs.templates.draft_rating_manual_template,
146
+ {
147
+ "topic": topic,
148
+ "criteria": criteria,
149
+ },
150
+ )
151
+ ),
152
+ validator=_validator,
153
+ system_message=f"# your personal briefing: \n{self.briefing}",
154
+ **kwargs,
155
+ )
156
+
157
+ async def draft_rating_criteria(
158
+ self,
159
+ topic: str,
160
+ criteria_count: NonNegativeInt = 0,
161
+ **kwargs: Unpack[ValidateKwargs],
162
+ ) -> Set[str]:
163
+ """Drafts rating dimensions based on a topic.
164
+
165
+ Args:
166
+ topic (str): The topic for the rating dimensions.
167
+ criteria_count (NonNegativeInt, optional): The number of dimensions to draft, 0 means no limit. Defaults to 0.
168
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
169
+
170
+ Returns:
171
+ Set[str]: A set of rating dimensions.
172
+ """
173
+
174
+ def _validator(response: str) -> Set[str] | None:
175
+ if (
176
+ (json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
177
+ and isinstance(json_data, list)
178
+ and all(isinstance(v, str) for v in json_data)
179
+ and (criteria_count == 0 or len(json_data) == criteria_count)
180
+ ):
181
+ return set(json_data)
182
+ return None
183
+
184
+ return await self.aask_validate(
185
+ question=(
186
+ template_manager.render_template(
187
+ configs.templates.draft_rating_criteria_template,
188
+ {
189
+ "topic": topic,
190
+ "criteria_count": criteria_count,
191
+ },
192
+ )
193
+ ),
194
+ validator=_validator,
195
+ system_message=f"# your personal briefing: \n{self.briefing}",
196
+ **kwargs,
197
+ )
198
+
199
+ async def draft_rating_criteria_from_examples(
200
+ self,
201
+ topic: str,
202
+ examples: List[str],
203
+ reasons_count: PositiveInt = 2,
204
+ criteria_count: PositiveInt = 5,
205
+ **kwargs: Unpack[ValidateKwargs],
206
+ ) -> Set[str]:
207
+ """Asynchronously drafts a set of rating criteria based on provided examples.
208
+
209
+ This function generates rating criteria by analyzing examples and extracting reasons for comparison,
210
+ then further condensing these reasons into a specified number of criteria.
211
+
212
+ Parameters:
213
+ topic (str): The subject topic for the rating criteria.
214
+ examples (List[str]): A list of example texts to analyze.
215
+ reasons_count (PositiveInt, optional): The number of reasons to extract from each pair of examples. Defaults to 2.
216
+ criteria_count (PositiveInt, optional): The final number of rating criteria to draft. Defaults to 5.
217
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for validation.
218
+
219
+ Returns:
220
+ Set[str]: A set of drafted rating criteria.
221
+ """
222
+
223
+ def _reasons_validator(response: str) -> List[str] | None:
224
+ if (
225
+ (json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
226
+ and isinstance(json_data, list)
227
+ and all(isinstance(v, str) for v in json_data)
228
+ and len(json_data) == reasons_count
229
+ ):
230
+ return json_data
231
+ return None
232
+
233
+ def _criteria_validator(response: str) -> Set[str] | None:
234
+ if (
235
+ (json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
236
+ and isinstance(json_data, list)
237
+ and all(isinstance(v, str) for v in json_data)
238
+ and len(json_data) == criteria_count
239
+ ):
240
+ return set(json_data)
241
+ return None
242
+
243
+ kwargs = GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs)
244
+ # extract reasons from the comparison of ordered pairs of extracted from examples
245
+ reasons = flatten(
246
+ await self.aask_validate_batch(
247
+ questions=[
248
+ template_manager.render_template(
249
+ configs.templates.extract_reasons_from_examples_template,
250
+ {
251
+ "topic": topic,
252
+ "first": pair[0],
253
+ "second": pair[1],
254
+ "reasons_count": reasons_count,
255
+ },
256
+ )
257
+ for pair in (permutations(examples, 2))
258
+ ],
259
+ validator=_reasons_validator,
260
+ **kwargs,
261
+ )
262
+ )
263
+ # extract certain mount of criteria from reasons according to their importance and frequency
264
+ return await self.aask_validate(
265
+ question=(
266
+ template_manager.render_template(
267
+ configs.templates.extract_criteria_from_reasons_template,
268
+ {
269
+ "topic": topic,
270
+ "reasons": list(reasons),
271
+ "criteria_count": criteria_count,
272
+ },
273
+ )
274
+ ),
275
+ validator=_criteria_validator,
276
+ **kwargs,
277
+ )
278
+
279
+ async def drafting_rating_weights_klee(
280
+ self,
281
+ topic: str,
282
+ criteria: Set[str],
283
+ **kwargs: Unpack[ValidateKwargs],
284
+ ) -> Dict[str, float]:
285
+ """Drafts rating weights for a given topic and criteria using the Klee method.
286
+
287
+ Args:
288
+ topic (str): The topic for the rating weights.
289
+ criteria (Set[str]): A set of criteria for the rating weights.
290
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
291
+
292
+ Returns:
293
+ Dict[str, float]: A dictionary representing the drafted rating weights for each criterion.
294
+ """
295
+ if len(criteria) < 2: # noqa: PLR2004
296
+ raise ValueError("At least two criteria are required to draft rating weights")
297
+
298
+ def _validator(resp: str) -> float | None:
299
+ if (cap := JsonCapture.convert_with(resp, orjson.loads)) is not None and isinstance(cap, float):
300
+ return cap
301
+ return None
302
+
303
+ criteria = list(criteria) # freeze the order
304
+ windows = windowed(criteria, 2)
305
+
306
+ # get the importance multiplier indicating how important is second criterion compared to the first one
307
+ relative_weights = await self.aask_validate_batch(
308
+ questions=[
309
+ template_manager.render_template(
310
+ configs.templates.draft_rating_weights_klee_template,
311
+ {
312
+ "topic": topic,
313
+ "first": pair[0],
314
+ "second": pair[1],
315
+ },
316
+ )
317
+ for pair in windows
318
+ ],
319
+ validator=_validator,
320
+ **GenerateKwargs(system_message=f"# your personal briefing: \n{self.briefing}", **kwargs),
321
+ )
322
+ weights = [1]
323
+ for rw in relative_weights:
324
+ weights.append(weights[-1] * rw)
325
+ total = sum(weights)
326
+ return dict(zip(criteria, [w / total for w in weights], strict=True))
327
+
328
+ async def composite_score(
329
+ self,
330
+ topic: str,
331
+ to_rate: List[str],
332
+ reasons_count: PositiveInt = 2,
333
+ criteria_count: PositiveInt = 5,
334
+ **kwargs: Unpack[ValidateKwargs],
335
+ ) -> List[float]:
336
+ """Calculates the composite scores for a list of items based on a given topic and criteria.
337
+
338
+ Args:
339
+ topic (str): The topic for the rating.
340
+ to_rate (List[str]): A list of strings to be rated.
341
+ reasons_count (PositiveInt, optional): The number of reasons to extract from each pair of examples. Defaults to 2.
342
+ criteria_count (PositiveInt, optional): The number of criteria to draft. Defaults to 5.
343
+ **kwargs (Unpack[ValidateKwargs]): Additional keyword arguments for the LLM usage.
344
+
345
+ Returns:
346
+ List[float]: A list of composite scores for the items.
347
+ """
348
+ criteria = await self.draft_rating_criteria_from_examples(
349
+ topic, to_rate, reasons_count, criteria_count, **kwargs
350
+ )
351
+ weights = await self.drafting_rating_weights_klee(topic, criteria, **kwargs)
352
+ logger.info(f"Criteria: {criteria}\nWeights: {weights}")
353
+ ratings_seq = await self.rate(to_rate, topic, criteria, **kwargs)
354
+
355
+ return [sum(ratings[c] * weights[c] for c in criteria) for ratings in ratings_seq]
@@ -0,0 +1,131 @@
1
+ """A module for the task capabilities of the Fabricatio library."""
2
+
3
+ from types import CodeType
4
+ from typing import Any, Dict, List, Optional, Tuple, Unpack
5
+
6
+ import orjson
7
+ from fabricatio._rust_instances import template_manager
8
+ from fabricatio.config import configs
9
+ from fabricatio.models.generic import WithBriefing
10
+ from fabricatio.models.kwargs_types import ChooseKwargs, ValidateKwargs
11
+ from fabricatio.models.task import Task
12
+ from fabricatio.models.tool import Tool, ToolExecutor
13
+ from fabricatio.models.usages import LLMUsage, ToolBoxUsage
14
+ from fabricatio.parser import JsonCapture, PythonCapture
15
+ from loguru import logger
16
+ from pydantic import ValidationError
17
+
18
+
19
+ class ProposeTask(WithBriefing, LLMUsage):
20
+ """A class that proposes a task based on a prompt."""
21
+
22
+ async def propose[T](
23
+ self,
24
+ prompt: str,
25
+ **kwargs: Unpack[ValidateKwargs],
26
+ ) -> Task[T]:
27
+ """Asynchronously proposes a task based on a given prompt and parameters.
28
+
29
+ Parameters:
30
+ prompt: The prompt text for proposing a task, which is a string that must be provided.
31
+ **kwargs: The keyword arguments for the LLM (Large Language Model) usage.
32
+
33
+ Returns:
34
+ A Task object based on the proposal result.
35
+ """
36
+ if not prompt:
37
+ err = f"{self.name}: Prompt must be provided."
38
+ logger.error(err)
39
+ raise ValueError(err)
40
+
41
+ def _validate_json(response: str) -> None | Task:
42
+ try:
43
+ cap = JsonCapture.capture(response)
44
+ logger.debug(f"Response: \n{response}")
45
+ logger.info(f"Captured JSON: \n{cap}")
46
+ return Task.model_validate_json(cap)
47
+ except ValidationError as e:
48
+ logger.error(f"Failed to parse task from JSON: {e}")
49
+ return None
50
+
51
+ template_data = {"prompt": prompt, "json_example": Task.json_example()}
52
+ return await self.aask_validate(
53
+ question=template_manager.render_template(configs.templates.propose_task_template, template_data),
54
+ validator=_validate_json,
55
+ system_message=f"# your personal briefing: \n{self.briefing}",
56
+ **kwargs,
57
+ )
58
+
59
+
60
+ class HandleTask(WithBriefing, ToolBoxUsage):
61
+ """A class that handles a task based on a task object."""
62
+
63
+ async def draft_tool_usage_code(
64
+ self,
65
+ task: Task,
66
+ tools: List[Tool],
67
+ data: Dict[str, Any],
68
+ **kwargs: Unpack[ValidateKwargs],
69
+ ) -> Tuple[CodeType, List[str]]:
70
+ """Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
71
+ logger.info(f"Drafting tool usage code for task: {task.briefing}")
72
+
73
+ if not tools:
74
+ err = f"{self.name}: Tools must be provided to draft the tool usage code."
75
+ logger.error(err)
76
+ raise ValueError(err)
77
+
78
+ def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
79
+ if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
80
+ to_extract := JsonCapture.convert_with(response, orjson.loads)
81
+ ):
82
+ return source, to_extract
83
+
84
+ return None
85
+
86
+ q = template_manager.render_template(
87
+ configs.templates.draft_tool_usage_code_template,
88
+ {
89
+ "data_module_name": configs.toolbox.data_module_name,
90
+ "tool_module_name": configs.toolbox.tool_module_name,
91
+ "task": task.briefing,
92
+ "deps": task.dependencies_prompt,
93
+ "tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
94
+ "data": data,
95
+ },
96
+ )
97
+ logger.debug(f"Code Drafting Question: \n{q}")
98
+ return await self.aask_validate(
99
+ question=q,
100
+ validator=_validator,
101
+ system_message=f"# your personal briefing: \n{self.briefing}",
102
+ **kwargs,
103
+ )
104
+
105
+ async def handle_fin_grind(
106
+ self,
107
+ task: Task,
108
+ data: Dict[str, Any],
109
+ box_choose_kwargs: Optional[ChooseKwargs] = None,
110
+ tool_choose_kwargs: Optional[ChooseKwargs] = None,
111
+ **kwargs: Unpack[ValidateKwargs],
112
+ ) -> Optional[Tuple]:
113
+ """Asynchronously handles a task based on a given task object and parameters."""
114
+ logger.info(f"Handling task: \n{task.briefing}")
115
+
116
+ tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
117
+ logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
118
+
119
+ if tools:
120
+ executor = ToolExecutor(candidates=tools, data=data)
121
+ code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
122
+
123
+ cxt = executor.execute(code)
124
+ if to_extract:
125
+ return tuple(cxt.get(k) for k in to_extract)
126
+
127
+ return None
128
+
129
+ async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
130
+ """Asynchronously handles a task based on a given task object and parameters."""
131
+ return await self.handle_fin_grind(task, data, **kwargs)
@@ -163,8 +163,17 @@ class TemplateConfig(BaseModel):
163
163
  draft_rating_manual_template: str = Field(default="draft_rating_manual")
164
164
  """The name of the draft rating manual template which will be used to draft rating manual."""
165
165
 
166
- draft_rating_dimensions_template: str = Field(default="draft_rating_dimensions")
167
- """The name of the draft rating dimensions template which will be used to draft rating dimensions."""
166
+ draft_rating_criteria_template: str = Field(default="draft_rating_criteria")
167
+ """The name of the draft rating criteria template which will be used to draft rating criteria."""
168
+
169
+ extract_reasons_from_examples_template: str = Field(default="extract_reasons_from_examples")
170
+ """The name of the extract reasons from examples template which will be used to extract reasons from examples."""
171
+
172
+ extract_criteria_from_reasons_template: str = Field(default="extract_criteria_from_reasons")
173
+ """The name of the extract criteria from reasons template which will be used to extract criteria from reasons."""
174
+
175
+ draft_rating_weights_klee_template: str = Field(default="draft_rating_weights_klee")
176
+ """The name of the draft rating weights klee template which will be used to draft rating weights with Klee method."""
168
177
 
169
178
 
170
179
  class MagikaConfig(BaseModel):
@@ -5,8 +5,9 @@ from abc import abstractmethod
5
5
  from asyncio import Queue
6
6
  from typing import Any, Dict, Self, Tuple, Type, Union, Unpack
7
7
 
8
+ from fabricatio.capabilities.rating import GiveRating
9
+ from fabricatio.capabilities.task import HandleTask, ProposeTask
8
10
  from fabricatio.journal import logger
9
- from fabricatio.models.advanced import GiveRating, HandleTask, ProposeTask
10
11
  from fabricatio.models.generic import WithBriefing
11
12
  from fabricatio.models.task import Task
12
13
  from fabricatio.models.usages import ToolBoxUsage
@@ -24,8 +24,13 @@ class ValidateKwargs(LLMKwargs):
24
24
  max_validations: NotRequired[PositiveInt]
25
25
 
26
26
 
27
- class ChooseKwargs(ValidateKwargs):
28
- """A type representing the keyword arguments for the choose method."""
27
+ class GenerateKwargs(ValidateKwargs):
28
+ """A type representing the keyword arguments for the generate method."""
29
29
 
30
30
  system_message: NotRequired[str]
31
+
32
+
33
+ class ChooseKwargs(GenerateKwargs):
34
+ """A type representing the keyword arguments for the choose method."""
35
+
31
36
  k: NotRequired[NonNegativeInt]