fabricatio 0.2.1.dev1__tar.gz → 0.2.1.dev3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/.gitignore +2 -1
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/PKG-INFO +1 -1
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/examples/llm_usages/llm_usage.py +4 -2
- fabricatio-0.2.1.dev3/examples/make_a_rating/rating.py +84 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/examples/minor/hello_fabricatio.py +1 -1
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/examples/propose_task/propose.py +1 -1
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/examples/simple_chat/chat.py +1 -1
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/examples/task_handle/handle_task.py +2 -3
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/pyproject.toml +1 -1
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/config.py +10 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/action.py +2 -2
- fabricatio-0.2.1.dev3/python/fabricatio/models/advanced.py +289 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/kwargs_types.py +7 -2
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/role.py +2 -3
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/task.py +3 -0
- fabricatio-0.2.1.dev3/templates/built-in/draft_rating_dimensions.hbs +24 -0
- fabricatio-0.2.1.dev3/templates/built-in/draft_rating_manual.hbs +26 -0
- fabricatio-0.2.1.dev3/templates/built-in/rate_fine_grind.hbs +39 -0
- fabricatio-0.2.1.dev3/templates.tar.gz +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/uv.lock +1 -1
- fabricatio-0.2.1.dev1/python/fabricatio/models/advanced.py +0 -128
- fabricatio-0.2.1.dev1/templates.tar.gz +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/.github/workflows/build-package.yaml +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/.github/workflows/ruff.yaml +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/.github/workflows/tests.yaml +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/.python-version +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/Cargo.lock +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/Cargo.toml +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/LICENSE +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/Makefile +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/README.md +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/examples/make_diary/commits.json +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/examples/make_diary/diary.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/__init__.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/_rust.pyi +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/_rust_instances.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/actions/__init__.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/actions/communication.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/actions/transmission.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/core.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/decorators.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/fs/__init__.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/fs/curd.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/fs/readers.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/journal.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/events.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/generic.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/tool.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/usages.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/models/utils.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/parser.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/py.typed +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/toolboxes/__init__.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/toolboxes/arithmetic.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/toolboxes/fs.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/python/fabricatio/toolboxes/task.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/src/hash.rs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/src/lib.rs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/src/templates.rs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/binary-exploitation-ctf-solver.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/claude-xml.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/clean-up-code.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/cryptography-ctf-solver.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/dependencies.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/document-the-code.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/draft_tool_usage_code.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/find-security-vulnerabilities.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/fix-bugs.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/improve-performance.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/make_choice.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/make_judgment.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/propose_task.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/refactor.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/reverse-engineering-ctf-solver.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/task_briefing.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/web-ctf-solver.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/write-git-commit.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/write-github-pull-request.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/write-github-readme.hbs +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_config.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_models/test_action.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_models/test_advanced.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_models/test_generic.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_models/test_role.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_models/test_task.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_models/test_tool.py +0 -0
- {fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/tests/test_models/test_usages.py +0 -0
@@ -38,8 +38,10 @@ async def main() -> None:
|
|
38
38
|
name="Coder",
|
39
39
|
description="A python coder who can write code and documentation",
|
40
40
|
registry={
|
41
|
-
Event.instantiate_from("coding
|
42
|
-
|
41
|
+
Event.instantiate_from("coding").push_wildcard().push("pending"): WorkFlow(
|
42
|
+
name="write code", steps=(WriteCode,)
|
43
|
+
),
|
44
|
+
Event.instantiate_from("doc").push_wildcard().push("pending"): WorkFlow(
|
43
45
|
name="write documentation", steps=(WriteDocumentation,)
|
44
46
|
),
|
45
47
|
},
|
@@ -0,0 +1,84 @@
|
|
1
|
+
"""Example of proposing a task to a role."""
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
from typing import Dict, List, Set, Unpack
|
5
|
+
|
6
|
+
import orjson
|
7
|
+
from fabricatio import Action, JsonCapture, Role, WorkFlow, logger
|
8
|
+
from fabricatio.models.events import Event
|
9
|
+
from fabricatio.models.task import Task
|
10
|
+
|
11
|
+
|
12
|
+
class Rate(Action):
|
13
|
+
"""Rate the task."""
|
14
|
+
|
15
|
+
name: str = "rate"
|
16
|
+
output_key: str = "task_output"
|
17
|
+
|
18
|
+
async def _execute(self, to_rate: List[str], rate_topic: str, dimensions: Set[str], **_) -> [Dict[str, float]]:
|
19
|
+
"""Rate the task."""
|
20
|
+
return await asyncio.gather(
|
21
|
+
*[
|
22
|
+
self.rate(
|
23
|
+
target,
|
24
|
+
rate_topic,
|
25
|
+
dimensions,
|
26
|
+
)
|
27
|
+
for target in to_rate
|
28
|
+
]
|
29
|
+
)
|
30
|
+
|
31
|
+
|
32
|
+
class WhatToRate(Action):
|
33
|
+
"""Figure out what to rate."""
|
34
|
+
|
35
|
+
name: str = "figure out what to rate"
|
36
|
+
|
37
|
+
output_key: str = "to_rate"
|
38
|
+
|
39
|
+
async def _execute(self, task_input: Task, rate_topic: str, **cxt: Unpack) -> List[str]:
|
40
|
+
def _validate(resp: str) -> List[str] | None:
|
41
|
+
if (
|
42
|
+
(cap := JsonCapture.convert_with(resp, orjson.loads)) is not None
|
43
|
+
and isinstance(cap, list)
|
44
|
+
and all(isinstance(i, str) for i in cap)
|
45
|
+
):
|
46
|
+
return cap
|
47
|
+
return None
|
48
|
+
|
49
|
+
return await self.aask_validate(
|
50
|
+
f"This is task briefing:\n{task_input.briefing}\n\n"
|
51
|
+
f"We are talking about {rate_topic}. you need to extract targets to rate into a the JSON array\n"
|
52
|
+
f"The response SHALL be a JSON array of strings within the codeblock\n"
|
53
|
+
f"# Example\n"
|
54
|
+
f'```json\n["this is a target to rate", "this is another target to rate"]\n```',
|
55
|
+
_validate,
|
56
|
+
)
|
57
|
+
|
58
|
+
|
59
|
+
async def main() -> None:
|
60
|
+
"""Main function."""
|
61
|
+
role = Role(
|
62
|
+
name="TaskRater",
|
63
|
+
description="A role that can rate tasks.",
|
64
|
+
registry={
|
65
|
+
Event.instantiate_from("rate_food").push_wildcard().push("pending"): WorkFlow(
|
66
|
+
name="Rate food",
|
67
|
+
steps=(WhatToRate, Rate),
|
68
|
+
extra_init_context={
|
69
|
+
"rate_topic": "If this food is cheap and delicious",
|
70
|
+
"dimensions": {"taste", "price", "quality", "safety", "healthiness", "freshness"},
|
71
|
+
},
|
72
|
+
),
|
73
|
+
},
|
74
|
+
)
|
75
|
+
task = await role.propose(
|
76
|
+
"rate for rotten apple, ripen banana, fresh orange, giga-burger, smelly pizza with flies on it, and a boiling instant coffee",
|
77
|
+
)
|
78
|
+
rating = await task.move_to("rate_food").delegate()
|
79
|
+
|
80
|
+
logger.success(f"Result: \n{rating}")
|
81
|
+
|
82
|
+
|
83
|
+
if __name__ == "__main__":
|
84
|
+
asyncio.run(main())
|
@@ -5,7 +5,7 @@ from typing import Any
|
|
5
5
|
|
6
6
|
from fabricatio import Action, Role, Task, WorkFlow, logger
|
7
7
|
|
8
|
-
task = Task(name="say hello", goal="say hello", description="say hello to the world")
|
8
|
+
task = Task(name="say hello", goal=["say hello"], description="say hello to the world")
|
9
9
|
|
10
10
|
|
11
11
|
class Hello(Action):
|
@@ -5,7 +5,7 @@ from typing import Any
|
|
5
5
|
|
6
6
|
from fabricatio import Action, Role, Task, WorkFlow, logger
|
7
7
|
|
8
|
-
task = Task(name="say hello", goal="say hello", description="say hello to the world")
|
8
|
+
task = Task(name="say hello", goal=["say hello"], description="say hello to the world")
|
9
9
|
|
10
10
|
|
11
11
|
class Talk(Action):
|
@@ -5,7 +5,7 @@ from typing import Any
|
|
5
5
|
|
6
6
|
from fabricatio import Action, Role, Task, WorkFlow, logger
|
7
7
|
|
8
|
-
task = Task(name="say hello", goal="say hello", description="say hello to the world")
|
8
|
+
task = Task(name="say hello", goal=["say hello"], description="say hello to the world")
|
9
9
|
|
10
10
|
|
11
11
|
class Talk(Action):
|
@@ -3,7 +3,6 @@
|
|
3
3
|
import asyncio
|
4
4
|
from typing import Any, Set, Unpack
|
5
5
|
|
6
|
-
import questionary
|
7
6
|
from fabricatio import Action, Event, PythonCapture, Role, Task, ToolBox, WorkFlow, fs_toolbox, logger
|
8
7
|
from pydantic import Field
|
9
8
|
|
@@ -36,7 +35,7 @@ class DumpText(Action):
|
|
36
35
|
["dump the text contained in `text_to_dump` to a file", "only return the path of the written file"]
|
37
36
|
)
|
38
37
|
|
39
|
-
path = await self.
|
38
|
+
path = await self.handle(
|
40
39
|
task_input,
|
41
40
|
{"text_to_dump": dump_text},
|
42
41
|
)
|
@@ -72,7 +71,7 @@ async def main() -> None:
|
|
72
71
|
},
|
73
72
|
)
|
74
73
|
|
75
|
-
prompt =
|
74
|
+
prompt = "i want you to write a cli app implemented with python , which can calculate the sum to a given n, all write to a single file names `cli.py`, put it in `output` folder."
|
76
75
|
|
77
76
|
proposed_task = await role.propose(prompt)
|
78
77
|
path = await proposed_task.move_to("coding").delegate()
|
@@ -157,6 +157,15 @@ class TemplateConfig(BaseModel):
|
|
157
157
|
task_briefing_template: str = Field(default="task_briefing")
|
158
158
|
"""The name of the task briefing template which will be used to brief a task."""
|
159
159
|
|
160
|
+
rate_fine_grind_template: str = Field(default="rate_fine_grind")
|
161
|
+
"""The name of the rate fine grind template which will be used to rate fine grind."""
|
162
|
+
|
163
|
+
draft_rating_manual_template: str = Field(default="draft_rating_manual")
|
164
|
+
"""The name of the draft rating manual template which will be used to draft rating manual."""
|
165
|
+
|
166
|
+
draft_rating_dimensions_template: str = Field(default="draft_rating_dimensions")
|
167
|
+
"""The name of the draft rating dimensions template which will be used to draft rating dimensions."""
|
168
|
+
|
160
169
|
|
161
170
|
class MagikaConfig(BaseModel):
|
162
171
|
"""Magika configuration class."""
|
@@ -208,6 +217,7 @@ class Settings(BaseSettings):
|
|
208
217
|
toml_file=["fabricatio.toml", rf"{ROAMING_DIR}\fabricatio.toml"],
|
209
218
|
env_file=[".env", ".envrc"],
|
210
219
|
use_attribute_docstrings=True,
|
220
|
+
extra="ignore",
|
211
221
|
)
|
212
222
|
|
213
223
|
llm: LLMConfig = Field(default_factory=LLMConfig)
|
@@ -6,14 +6,14 @@ from asyncio import Queue
|
|
6
6
|
from typing import Any, Dict, Self, Tuple, Type, Union, Unpack
|
7
7
|
|
8
8
|
from fabricatio.journal import logger
|
9
|
-
from fabricatio.models.advanced import HandleTask, ProposeTask
|
9
|
+
from fabricatio.models.advanced import GiveRating, HandleTask, ProposeTask
|
10
10
|
from fabricatio.models.generic import WithBriefing
|
11
11
|
from fabricatio.models.task import Task
|
12
12
|
from fabricatio.models.usages import ToolBoxUsage
|
13
13
|
from pydantic import Field, PrivateAttr
|
14
14
|
|
15
15
|
|
16
|
-
class Action(HandleTask, ProposeTask):
|
16
|
+
class Action(HandleTask, ProposeTask, GiveRating):
|
17
17
|
"""Class that represents an action to be executed in a workflow."""
|
18
18
|
|
19
19
|
personality: str = Field(default="")
|
@@ -0,0 +1,289 @@
|
|
1
|
+
"""A module for advanced models and functionalities."""
|
2
|
+
|
3
|
+
from types import CodeType
|
4
|
+
from typing import Any, Dict, List, Optional, Set, Tuple, Unpack
|
5
|
+
|
6
|
+
import orjson
|
7
|
+
from fabricatio._rust_instances import template_manager
|
8
|
+
from fabricatio.config import configs
|
9
|
+
from fabricatio.models.generic import WithBriefing
|
10
|
+
from fabricatio.models.kwargs_types import ChooseKwargs, ValidateKwargs
|
11
|
+
from fabricatio.models.task import Task
|
12
|
+
from fabricatio.models.tool import Tool, ToolExecutor
|
13
|
+
from fabricatio.models.usages import LLMUsage, ToolBoxUsage
|
14
|
+
from fabricatio.parser import JsonCapture, PythonCapture
|
15
|
+
from loguru import logger
|
16
|
+
from pydantic import NonNegativeInt, ValidationError
|
17
|
+
|
18
|
+
|
19
|
+
class ProposeTask(WithBriefing, LLMUsage):
|
20
|
+
"""A class that proposes a task based on a prompt."""
|
21
|
+
|
22
|
+
async def propose[T](
|
23
|
+
self,
|
24
|
+
prompt: str,
|
25
|
+
**kwargs: Unpack[ValidateKwargs],
|
26
|
+
) -> Task[T]:
|
27
|
+
"""Asynchronously proposes a task based on a given prompt and parameters.
|
28
|
+
|
29
|
+
Parameters:
|
30
|
+
prompt: The prompt text for proposing a task, which is a string that must be provided.
|
31
|
+
**kwargs: The keyword arguments for the LLM (Large Language Model) usage.
|
32
|
+
|
33
|
+
Returns:
|
34
|
+
A Task object based on the proposal result.
|
35
|
+
"""
|
36
|
+
if not prompt:
|
37
|
+
err = f"{self.name}: Prompt must be provided."
|
38
|
+
logger.error(err)
|
39
|
+
raise ValueError(err)
|
40
|
+
|
41
|
+
def _validate_json(response: str) -> None | Task:
|
42
|
+
try:
|
43
|
+
cap = JsonCapture.capture(response)
|
44
|
+
logger.debug(f"Response: \n{response}")
|
45
|
+
logger.info(f"Captured JSON: \n{cap}")
|
46
|
+
return Task.model_validate_json(cap)
|
47
|
+
except ValidationError as e:
|
48
|
+
logger.error(f"Failed to parse task from JSON: {e}")
|
49
|
+
return None
|
50
|
+
|
51
|
+
template_data = {"prompt": prompt, "json_example": Task.json_example()}
|
52
|
+
return await self.aask_validate(
|
53
|
+
question=template_manager.render_template(configs.templates.propose_task_template, template_data),
|
54
|
+
validator=_validate_json,
|
55
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
56
|
+
**kwargs,
|
57
|
+
)
|
58
|
+
|
59
|
+
|
60
|
+
class HandleTask(WithBriefing, ToolBoxUsage):
|
61
|
+
"""A class that handles a task based on a task object."""
|
62
|
+
|
63
|
+
async def draft_tool_usage_code(
|
64
|
+
self,
|
65
|
+
task: Task,
|
66
|
+
tools: List[Tool],
|
67
|
+
data: Dict[str, Any],
|
68
|
+
**kwargs: Unpack[ValidateKwargs],
|
69
|
+
) -> Tuple[CodeType, List[str]]:
|
70
|
+
"""Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
|
71
|
+
logger.info(f"Drafting tool usage code for task: {task.briefing}")
|
72
|
+
|
73
|
+
if not tools:
|
74
|
+
err = f"{self.name}: Tools must be provided to draft the tool usage code."
|
75
|
+
logger.error(err)
|
76
|
+
raise ValueError(err)
|
77
|
+
|
78
|
+
def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
|
79
|
+
if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
|
80
|
+
to_extract := JsonCapture.convert_with(response, orjson.loads)
|
81
|
+
):
|
82
|
+
return source, to_extract
|
83
|
+
|
84
|
+
return None
|
85
|
+
|
86
|
+
q = template_manager.render_template(
|
87
|
+
configs.templates.draft_tool_usage_code_template,
|
88
|
+
{
|
89
|
+
"data_module_name": configs.toolbox.data_module_name,
|
90
|
+
"tool_module_name": configs.toolbox.tool_module_name,
|
91
|
+
"task": task.briefing,
|
92
|
+
"deps": task.dependencies_prompt,
|
93
|
+
"tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
|
94
|
+
"data": data,
|
95
|
+
},
|
96
|
+
)
|
97
|
+
logger.debug(f"Code Drafting Question: \n{q}")
|
98
|
+
return await self.aask_validate(
|
99
|
+
question=q,
|
100
|
+
validator=_validator,
|
101
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
102
|
+
**kwargs,
|
103
|
+
)
|
104
|
+
|
105
|
+
async def handle_fin_grind(
|
106
|
+
self,
|
107
|
+
task: Task,
|
108
|
+
data: Dict[str, Any],
|
109
|
+
box_choose_kwargs: Optional[ChooseKwargs] = None,
|
110
|
+
tool_choose_kwargs: Optional[ChooseKwargs] = None,
|
111
|
+
**kwargs: Unpack[ValidateKwargs],
|
112
|
+
) -> Optional[Tuple]:
|
113
|
+
"""Asynchronously handles a task based on a given task object and parameters."""
|
114
|
+
logger.info(f"Handling task: \n{task.briefing}")
|
115
|
+
|
116
|
+
tools = await self.gather_tools_fine_grind(task, box_choose_kwargs, tool_choose_kwargs)
|
117
|
+
logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
|
118
|
+
|
119
|
+
if tools:
|
120
|
+
executor = ToolExecutor(candidates=tools, data=data)
|
121
|
+
code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
|
122
|
+
|
123
|
+
cxt = executor.execute(code)
|
124
|
+
if to_extract:
|
125
|
+
return tuple(cxt.get(k) for k in to_extract)
|
126
|
+
|
127
|
+
return None
|
128
|
+
|
129
|
+
async def handle(self, task: Task, data: Dict[str, Any], **kwargs: Unpack[ValidateKwargs]) -> Optional[Tuple]:
|
130
|
+
"""Asynchronously handles a task based on a given task object and parameters."""
|
131
|
+
return await self.handle_fin_grind(task, data, **kwargs)
|
132
|
+
|
133
|
+
|
134
|
+
class GiveRating(WithBriefing, LLMUsage):
|
135
|
+
"""A class that provides functionality to rate tasks based on a rating manual and score range."""
|
136
|
+
|
137
|
+
async def rate_fine_grind(
|
138
|
+
self,
|
139
|
+
to_rate: str,
|
140
|
+
rating_manual: Dict[str, str],
|
141
|
+
score_range: Tuple[float, float],
|
142
|
+
**kwargs: Unpack[ValidateKwargs],
|
143
|
+
) -> Dict[str, float]:
|
144
|
+
"""Rates a given task based on a rating manual and score range.
|
145
|
+
|
146
|
+
Args:
|
147
|
+
to_rate: The task to be rated.
|
148
|
+
rating_manual: A dictionary containing the rating criteria.
|
149
|
+
score_range: A tuple representing the valid score range.
|
150
|
+
**kwargs: Additional keyword arguments for the LLM usage.
|
151
|
+
|
152
|
+
Returns:
|
153
|
+
A dictionary with the ratings for each dimension.
|
154
|
+
"""
|
155
|
+
|
156
|
+
def _validator(response: str) -> Dict[str, float] | None:
|
157
|
+
if (
|
158
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
159
|
+
and isinstance(json_data, dict)
|
160
|
+
and json_data.keys() == rating_manual.keys()
|
161
|
+
and all(isinstance(v, float) for v in json_data.values())
|
162
|
+
and all(score_range[0] <= v <= score_range[1] for v in json_data.values())
|
163
|
+
):
|
164
|
+
return json_data
|
165
|
+
return None
|
166
|
+
|
167
|
+
return await self.aask_validate(
|
168
|
+
question=(
|
169
|
+
template_manager.render_template(
|
170
|
+
configs.templates.rate_fine_grind_template,
|
171
|
+
{
|
172
|
+
"to_rate": to_rate,
|
173
|
+
"min_score": score_range[0],
|
174
|
+
"max_score": score_range[1],
|
175
|
+
"rating_manual": rating_manual,
|
176
|
+
},
|
177
|
+
)
|
178
|
+
),
|
179
|
+
validator=_validator,
|
180
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
181
|
+
**kwargs,
|
182
|
+
)
|
183
|
+
|
184
|
+
async def rate(
|
185
|
+
self,
|
186
|
+
to_rate: str,
|
187
|
+
topic: str,
|
188
|
+
dimensions: Set[str],
|
189
|
+
score_range: Tuple[float, float] = (0.0, 1.0),
|
190
|
+
**kwargs: Unpack[ValidateKwargs],
|
191
|
+
) -> Dict[str, float]:
|
192
|
+
"""Rates a task based on a topic and dimensions. this function will automatically draft a rating manual based on the topic and dimensions.
|
193
|
+
|
194
|
+
Args:
|
195
|
+
to_rate: The task to be rated.
|
196
|
+
topic: The topic related to the task.
|
197
|
+
dimensions: A set of dimensions for rating.
|
198
|
+
score_range: A tuple representing the valid score range
|
199
|
+
**kwargs: Additional keyword arguments for the LLM usage.
|
200
|
+
|
201
|
+
Returns:
|
202
|
+
A dictionary with the ratings for each dimension.
|
203
|
+
"""
|
204
|
+
manual = await self.draft_rating_manual(topic, dimensions, **kwargs)
|
205
|
+
return await self.rate_fine_grind(to_rate, manual, score_range, **kwargs)
|
206
|
+
|
207
|
+
async def draft_rating_manual(
|
208
|
+
self, topic: str, dimensions: Set[str], **kwargs: Unpack[ValidateKwargs]
|
209
|
+
) -> Dict[str, str]:
|
210
|
+
"""Drafts a rating manual based on a topic and dimensions.
|
211
|
+
|
212
|
+
Args:
|
213
|
+
topic: The topic for the rating manual.
|
214
|
+
dimensions: A set of dimensions for the rating manual.
|
215
|
+
**kwargs: Additional keyword arguments for the LLM usage.
|
216
|
+
|
217
|
+
Returns:
|
218
|
+
A dictionary representing the drafted rating manual.
|
219
|
+
"""
|
220
|
+
|
221
|
+
def _validator(response: str) -> Dict[str, str] | None:
|
222
|
+
if (
|
223
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
224
|
+
and isinstance(json_data, dict)
|
225
|
+
and json_data.keys() == dimensions
|
226
|
+
and all(isinstance(v, str) for v in json_data.values())
|
227
|
+
):
|
228
|
+
return json_data
|
229
|
+
return None
|
230
|
+
|
231
|
+
return await self.aask_validate(
|
232
|
+
question=(
|
233
|
+
template_manager.render_template(
|
234
|
+
configs.templates.draft_rating_manual_template,
|
235
|
+
{
|
236
|
+
"topic": topic,
|
237
|
+
"dimensions": dimensions,
|
238
|
+
},
|
239
|
+
)
|
240
|
+
),
|
241
|
+
validator=_validator,
|
242
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
243
|
+
**kwargs,
|
244
|
+
)
|
245
|
+
|
246
|
+
async def draft_rating_dimensions(
|
247
|
+
self,
|
248
|
+
topic: str,
|
249
|
+
dimensions_count: NonNegativeInt = 0,
|
250
|
+
examples: Optional[List[str]] = None,
|
251
|
+
**kwargs: Unpack[ValidateKwargs],
|
252
|
+
) -> Set[str]:
|
253
|
+
"""Drafts rating dimensions based on a topic.
|
254
|
+
|
255
|
+
Args:
|
256
|
+
topic: The topic for the rating dimensions.
|
257
|
+
dimensions_count: The number of dimensions to draft, 0 means no limit.
|
258
|
+
examples: A list of examples which is rated based on the rating dimensions.
|
259
|
+
**kwargs: Additional keyword arguments for the LLM usage.
|
260
|
+
|
261
|
+
Returns:
|
262
|
+
A set of rating dimensions.
|
263
|
+
"""
|
264
|
+
|
265
|
+
def _validator(response: str) -> Set[str] | None:
|
266
|
+
if (
|
267
|
+
(json_data := JsonCapture.convert_with(response, orjson.loads)) is not None
|
268
|
+
and isinstance(json_data, list)
|
269
|
+
and all(isinstance(v, str) for v in json_data)
|
270
|
+
and (dimensions_count == 0 or len(json_data) == dimensions_count)
|
271
|
+
):
|
272
|
+
return set(json_data)
|
273
|
+
return None
|
274
|
+
|
275
|
+
return await self.aask_validate(
|
276
|
+
question=(
|
277
|
+
template_manager.render_template(
|
278
|
+
configs.templates.draft_rating_dimensions_template,
|
279
|
+
{
|
280
|
+
"topic": topic,
|
281
|
+
"examples": examples,
|
282
|
+
"dimensions_count": dimensions_count,
|
283
|
+
},
|
284
|
+
)
|
285
|
+
),
|
286
|
+
validator=_validator,
|
287
|
+
system_message=f"# your personal briefing: \n{self.briefing}",
|
288
|
+
**kwargs,
|
289
|
+
)
|
@@ -18,9 +18,14 @@ class LLMKwargs(TypedDict):
|
|
18
18
|
max_retries: NotRequired[PositiveInt]
|
19
19
|
|
20
20
|
|
21
|
-
class
|
22
|
-
"""A type representing the keyword arguments for the
|
21
|
+
class ValidateKwargs(LLMKwargs):
|
22
|
+
"""A type representing the keyword arguments for the validate method."""
|
23
23
|
|
24
24
|
max_validations: NotRequired[PositiveInt]
|
25
|
+
|
26
|
+
|
27
|
+
class ChooseKwargs(ValidateKwargs):
|
28
|
+
"""A type representing the keyword arguments for the choose method."""
|
29
|
+
|
25
30
|
system_message: NotRequired[str]
|
26
31
|
k: NotRequired[NonNegativeInt]
|
@@ -5,14 +5,13 @@ from typing import Any, Self, Set
|
|
5
5
|
from fabricatio.core import env
|
6
6
|
from fabricatio.journal import logger
|
7
7
|
from fabricatio.models.action import WorkFlow
|
8
|
-
from fabricatio.models.advanced import ProposeTask
|
8
|
+
from fabricatio.models.advanced import GiveRating, HandleTask, ProposeTask
|
9
9
|
from fabricatio.models.events import Event
|
10
10
|
from fabricatio.models.tool import ToolBox
|
11
|
-
from fabricatio.models.usages import ToolBoxUsage
|
12
11
|
from pydantic import Field
|
13
12
|
|
14
13
|
|
15
|
-
class Role(ProposeTask,
|
14
|
+
class Role(ProposeTask, HandleTask, GiveRating):
|
16
15
|
"""Class that represents a role with a registry of events and workflows."""
|
17
16
|
|
18
17
|
registry: dict[Event | str, WorkFlow] = Field(...)
|
@@ -57,6 +57,9 @@ class Task[T](WithBriefing, WithJsonExample, WithDependency):
|
|
57
57
|
namespace: List[str] = Field(default_factory=list)
|
58
58
|
"""The namespace of the task, a list of namespace segment, as string."""
|
59
59
|
|
60
|
+
dependencies: List[str] = Field(default_factory=list)
|
61
|
+
"""A list of file paths, These file are needed to read or write to meet a specific requirement of this task."""
|
62
|
+
|
60
63
|
_output: Queue = PrivateAttr(default_factory=lambda: Queue(maxsize=1))
|
61
64
|
"""The output queue of the task."""
|
62
65
|
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# Task
|
2
|
+
Please draft rating dimensions for the topic: "{{ topic }}".
|
3
|
+
Return the dimensions{{#if dimensions_count}} with length of {{ dimensions_count }}{{/if}} in JSON format as a list of strings within code block.
|
4
|
+
|
5
|
+
|
6
|
+
# Example:
|
7
|
+
returning dimensions{{#if dimensions_count}} with length of 3{{/if}} for the topic "Nice icecream"
|
8
|
+
|
9
|
+
----- Start of response example -----
|
10
|
+
```json
|
11
|
+
[
|
12
|
+
"outlook",
|
13
|
+
"taste",
|
14
|
+
"texture"
|
15
|
+
]
|
16
|
+
```
|
17
|
+
----- End of response example -----
|
18
|
+
|
19
|
+
|
20
|
+
# Warning
|
21
|
+
- Please ensure that the dimensions are clear and concise.
|
22
|
+
- The response SHALL be returned as a JSON object within the codeblock.
|
23
|
+
- No additional Explanation is needed.
|
24
|
+
{{#if dimensions_count }}- You must return exactly {{ dimensions_count }} dimensions.{{/if}}
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# Task
|
2
|
+
Please draft a rating manual for the following topic and dimensions, which will be used to rate the entity based.
|
3
|
+
|
4
|
+
Topic: {{ topic }}
|
5
|
+
Dimensions: {{ dimensions }}
|
6
|
+
|
7
|
+
Return the rating manual as a JSON object within the codeblock where the keys are the dimensions and the values are the criteria for each dimension.
|
8
|
+
|
9
|
+
|
10
|
+
# Example:
|
11
|
+
|
12
|
+
topic: Nice icecream
|
13
|
+
----- Start of response example -----
|
14
|
+
```json
|
15
|
+
{
|
16
|
+
"outlook": "Appearance of the icecream, how it looks. High score for an ice cream with a beautiful appearance such as vibrant color combinations and topped with fresh fruits or chocolate pieces; low score for an ice cream that looks unappealing, possibly showing signs of melting or covered in ice crystals.",
|
17
|
+
"taste": "How the icecream tastes. High score for delicious ice cream with rich and authentic flavors, like vanilla with natural vanilla bean aroma and moderate sweetness; low score for bland or overly sweet ice cream with noticeable artificial flavoring or not matching its claimed flavor.",
|
18
|
+
"texture": "How the icecream feels. High score for smooth texture without icy bits, melts smoothly in the mouth providing a creamy experience; low score for rough texture containing many ice crystals, feeling like it has been frozen for too long lacking softness."
|
19
|
+
}
|
20
|
+
```
|
21
|
+
----- end of response example -----
|
22
|
+
|
23
|
+
# Warning
|
24
|
+
- Please ensure that the rating manual is clear and concise, and that the criteria for each dimension are well-defined.
|
25
|
+
- The response SHALL be returned as a JSON object within the codeblock.
|
26
|
+
- No additional Explanation is needed.
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# Task:
|
2
|
+
Please rate the following task based on the provided rating manual and score range.
|
3
|
+
|
4
|
+
To rate text below:
|
5
|
+
----- Start of text to tate -----
|
6
|
+
{{ to_rate }}
|
7
|
+
----- End of text to tate -----
|
8
|
+
|
9
|
+
Rating Manual:
|
10
|
+
{{ rating_manual }}
|
11
|
+
|
12
|
+
Score Range:
|
13
|
+
- Minimal score: {{ min_score }}
|
14
|
+
- Maximum score is {{ max_score }}
|
15
|
+
|
16
|
+
|
17
|
+
By referring the rating manual, rate the text based on the provided rating manual and score range.
|
18
|
+
|
19
|
+
# Example:
|
20
|
+
rate the cherry icecream with ["outlook", "taste", "texture"] dimensions, the score range is 0-5.
|
21
|
+
|
22
|
+
----- Start of response example -----
|
23
|
+
```json
|
24
|
+
{
|
25
|
+
"outlook": 3.56,
|
26
|
+
"taste": 4.55,
|
27
|
+
"texture": 3.01
|
28
|
+
}
|
29
|
+
```
|
30
|
+
----- end of response example -----
|
31
|
+
|
32
|
+
|
33
|
+
# Warning
|
34
|
+
- Please ensure that the rating is done based on the rating manual and score range provided.
|
35
|
+
- The response SHALL be returned as a JSON object within the codeblock.
|
36
|
+
- No additional Explanation is needed.
|
37
|
+
- The rating for each dimension SHALL be rounded to two decimal places.
|
38
|
+
- The score dor each dimension SHALL be within the score range provided, that is, not smaller than {{ min_score }} and not larger than {{ max_score }}.
|
39
|
+
- the response SHALL include all the dimensions provided in the [{{#each rating_manual}}"{{@key}}",{{/each}}].
|
Binary file
|
@@ -1,128 +0,0 @@
|
|
1
|
-
"""A module for advanced models and functionalities."""
|
2
|
-
|
3
|
-
from types import CodeType
|
4
|
-
from typing import Any, Dict, List, Optional, Tuple, Unpack
|
5
|
-
|
6
|
-
import orjson
|
7
|
-
from fabricatio._rust_instances import template_manager
|
8
|
-
from fabricatio.config import configs
|
9
|
-
from fabricatio.models.generic import WithBriefing
|
10
|
-
from fabricatio.models.kwargs_types import LLMKwargs
|
11
|
-
from fabricatio.models.task import Task
|
12
|
-
from fabricatio.models.tool import Tool, ToolExecutor
|
13
|
-
from fabricatio.models.usages import LLMUsage, ToolBoxUsage
|
14
|
-
from fabricatio.parser import JsonCapture, PythonCapture
|
15
|
-
from loguru import logger
|
16
|
-
from pydantic import PositiveInt, ValidationError
|
17
|
-
|
18
|
-
|
19
|
-
class ProposeTask(WithBriefing, LLMUsage):
|
20
|
-
"""A class that proposes a task based on a prompt."""
|
21
|
-
|
22
|
-
async def propose[T](
|
23
|
-
self,
|
24
|
-
prompt: str,
|
25
|
-
max_validations: PositiveInt = 2,
|
26
|
-
**kwargs: Unpack[LLMKwargs],
|
27
|
-
) -> Task[T]:
|
28
|
-
"""Asynchronously proposes a task based on a given prompt and parameters.
|
29
|
-
|
30
|
-
Parameters:
|
31
|
-
prompt: The prompt text for proposing a task, which is a string that must be provided.
|
32
|
-
max_validations: The maximum number of validations allowed, default is 2.
|
33
|
-
**kwargs: The keyword arguments for the LLM (Large Language Model) usage.
|
34
|
-
|
35
|
-
Returns:
|
36
|
-
A Task object based on the proposal result.
|
37
|
-
"""
|
38
|
-
if not prompt:
|
39
|
-
err = f"{self.name}: Prompt must be provided."
|
40
|
-
logger.error(err)
|
41
|
-
raise ValueError(err)
|
42
|
-
|
43
|
-
def _validate_json(response: str) -> None | Task:
|
44
|
-
try:
|
45
|
-
cap = JsonCapture.capture(response)
|
46
|
-
logger.debug(f"Response: \n{response}")
|
47
|
-
logger.info(f"Captured JSON: \n{cap}")
|
48
|
-
return Task.model_validate_json(cap)
|
49
|
-
except ValidationError as e:
|
50
|
-
logger.error(f"Failed to parse task from JSON: {e}")
|
51
|
-
return None
|
52
|
-
|
53
|
-
template_data = {"prompt": prompt, "json_example": Task.json_example()}
|
54
|
-
return await self.aask_validate(
|
55
|
-
question=template_manager.render_template(configs.templates.propose_task_template, template_data),
|
56
|
-
validator=_validate_json,
|
57
|
-
system_message=f"# your personal briefing: \n{self.briefing}",
|
58
|
-
max_validations=max_validations,
|
59
|
-
**kwargs,
|
60
|
-
)
|
61
|
-
|
62
|
-
|
63
|
-
class HandleTask(WithBriefing, ToolBoxUsage):
|
64
|
-
"""A class that handles a task based on a task object."""
|
65
|
-
|
66
|
-
async def draft_tool_usage_code(
|
67
|
-
self,
|
68
|
-
task: Task,
|
69
|
-
tools: List[Tool],
|
70
|
-
data: Dict[str, Any],
|
71
|
-
**kwargs: Unpack[LLMKwargs],
|
72
|
-
) -> Tuple[CodeType, List[str]]:
|
73
|
-
"""Asynchronously drafts the tool usage code for a task based on a given task object and tools."""
|
74
|
-
logger.info(f"Drafting tool usage code for task: {task.briefing}")
|
75
|
-
|
76
|
-
if not tools:
|
77
|
-
err = f"{self.name}: Tools must be provided to draft the tool usage code."
|
78
|
-
logger.error(err)
|
79
|
-
raise ValueError(err)
|
80
|
-
|
81
|
-
def _validator(response: str) -> Tuple[CodeType, List[str]] | None:
|
82
|
-
if (source := PythonCapture.convert_with(response, lambda resp: compile(resp, "<string>", "exec"))) and (
|
83
|
-
to_extract := JsonCapture.convert_with(response, orjson.loads)
|
84
|
-
):
|
85
|
-
return source, to_extract
|
86
|
-
|
87
|
-
return None
|
88
|
-
|
89
|
-
q = template_manager.render_template(
|
90
|
-
configs.templates.draft_tool_usage_code_template,
|
91
|
-
{
|
92
|
-
"data_module_name": configs.toolbox.data_module_name,
|
93
|
-
"tool_module_name": configs.toolbox.tool_module_name,
|
94
|
-
"task": task.briefing,
|
95
|
-
"deps": task.dependencies_prompt,
|
96
|
-
"tools": [{"name": t.name, "briefing": t.briefing} for t in tools],
|
97
|
-
"data": data,
|
98
|
-
},
|
99
|
-
)
|
100
|
-
logger.debug(f"Code Drafting Question: \n{q}")
|
101
|
-
return await self.aask_validate(
|
102
|
-
question=q,
|
103
|
-
validator=_validator,
|
104
|
-
system_message=f"# your personal briefing: \n{self.briefing}",
|
105
|
-
**kwargs,
|
106
|
-
)
|
107
|
-
|
108
|
-
async def handle_fin_grind(
|
109
|
-
self,
|
110
|
-
task: Task,
|
111
|
-
data: Dict[str, Any],
|
112
|
-
**kwargs: Unpack[LLMKwargs],
|
113
|
-
) -> Optional[Tuple]:
|
114
|
-
"""Asynchronously handles a task based on a given task object and parameters."""
|
115
|
-
logger.info(f"Handling task: \n{task.briefing}")
|
116
|
-
|
117
|
-
tools = await self.gather_tools(task)
|
118
|
-
logger.info(f"{self.name} have gathered {[t.name for t in tools]}")
|
119
|
-
|
120
|
-
if tools:
|
121
|
-
executor = ToolExecutor(candidates=tools, data=data)
|
122
|
-
code, to_extract = await self.draft_tool_usage_code(task, tools, data, **kwargs)
|
123
|
-
|
124
|
-
cxt = executor.execute(code)
|
125
|
-
if to_extract:
|
126
|
-
return tuple(cxt.get(k) for k in to_extract)
|
127
|
-
|
128
|
-
return None
|
Binary file
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/cryptography-ctf-solver.hbs
RENAMED
File without changes
|
File without changes
|
File without changes
|
{fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/draft_tool_usage_code.hbs
RENAMED
File without changes
|
{fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/find-security-vulnerabilities.hbs
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{fabricatio-0.2.1.dev1 → fabricatio-0.2.1.dev3}/templates/built-in/write-github-pull-request.hbs
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|