zrb 1.8.10__py3-none-any.whl → 1.21.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zrb might be problematic. Click here for more details.
- zrb/__init__.py +126 -113
- zrb/__main__.py +1 -1
- zrb/attr/type.py +10 -7
- zrb/builtin/__init__.py +2 -50
- zrb/builtin/git.py +12 -1
- zrb/builtin/group.py +31 -15
- zrb/builtin/http.py +7 -8
- zrb/builtin/llm/attachment.py +40 -0
- zrb/builtin/llm/chat_completion.py +274 -0
- zrb/builtin/llm/chat_session.py +152 -85
- zrb/builtin/llm/chat_session_cmd.py +288 -0
- zrb/builtin/llm/chat_trigger.py +79 -0
- zrb/builtin/llm/history.py +7 -9
- zrb/builtin/llm/llm_ask.py +221 -98
- zrb/builtin/llm/tool/api.py +74 -52
- zrb/builtin/llm/tool/cli.py +46 -17
- zrb/builtin/llm/tool/code.py +71 -90
- zrb/builtin/llm/tool/file.py +301 -241
- zrb/builtin/llm/tool/note.py +84 -0
- zrb/builtin/llm/tool/rag.py +38 -8
- zrb/builtin/llm/tool/sub_agent.py +67 -50
- zrb/builtin/llm/tool/web.py +146 -122
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
- zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
- zrb/builtin/searxng/config/settings.yml +5671 -0
- zrb/builtin/searxng/start.py +21 -0
- zrb/builtin/setup/latex/ubuntu.py +1 -0
- zrb/builtin/setup/ubuntu.py +1 -1
- zrb/builtin/shell/autocomplete/bash.py +4 -3
- zrb/builtin/shell/autocomplete/zsh.py +4 -3
- zrb/builtin/todo.py +13 -2
- zrb/config/config.py +614 -0
- zrb/config/default_prompt/file_extractor_system_prompt.md +112 -0
- zrb/config/default_prompt/interactive_system_prompt.md +29 -0
- zrb/config/default_prompt/persona.md +1 -0
- zrb/config/default_prompt/repo_extractor_system_prompt.md +112 -0
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +29 -0
- zrb/config/default_prompt/summarization_prompt.md +57 -0
- zrb/config/default_prompt/system_prompt.md +38 -0
- zrb/config/llm_config.py +339 -0
- zrb/config/llm_context/config.py +166 -0
- zrb/config/llm_context/config_parser.py +40 -0
- zrb/config/llm_context/workflow.py +81 -0
- zrb/config/llm_rate_limitter.py +190 -0
- zrb/{runner → config}/web_auth_config.py +17 -22
- zrb/context/any_shared_context.py +17 -1
- zrb/context/context.py +16 -2
- zrb/context/shared_context.py +18 -8
- zrb/group/any_group.py +12 -5
- zrb/group/group.py +67 -3
- zrb/input/any_input.py +5 -1
- zrb/input/base_input.py +18 -6
- zrb/input/option_input.py +13 -1
- zrb/input/text_input.py +8 -25
- zrb/runner/cli.py +25 -23
- zrb/runner/common_util.py +24 -19
- zrb/runner/web_app.py +3 -3
- zrb/runner/web_route/docs_route.py +1 -1
- zrb/runner/web_route/error_page/serve_default_404.py +1 -1
- zrb/runner/web_route/error_page/show_error_page.py +1 -1
- zrb/runner/web_route/home_page/home_page_route.py +2 -2
- zrb/runner/web_route/login_api_route.py +1 -1
- zrb/runner/web_route/login_page/login_page_route.py +2 -2
- zrb/runner/web_route/logout_api_route.py +1 -1
- zrb/runner/web_route/logout_page/logout_page_route.py +2 -2
- zrb/runner/web_route/node_page/group/show_group_page.py +1 -1
- zrb/runner/web_route/node_page/node_page_route.py +1 -1
- zrb/runner/web_route/node_page/task/show_task_page.py +1 -1
- zrb/runner/web_route/refresh_token_api_route.py +1 -1
- zrb/runner/web_route/static/static_route.py +1 -1
- zrb/runner/web_route/task_input_api_route.py +6 -6
- zrb/runner/web_route/task_session_api_route.py +20 -12
- zrb/runner/web_util/cookie.py +1 -1
- zrb/runner/web_util/token.py +1 -1
- zrb/runner/web_util/user.py +8 -4
- zrb/session/any_session.py +24 -17
- zrb/session/session.py +50 -25
- zrb/session_state_logger/any_session_state_logger.py +9 -4
- zrb/session_state_logger/file_session_state_logger.py +16 -6
- zrb/session_state_logger/session_state_logger_factory.py +1 -1
- zrb/task/any_task.py +30 -9
- zrb/task/base/context.py +17 -9
- zrb/task/base/execution.py +15 -8
- zrb/task/base/lifecycle.py +8 -4
- zrb/task/base/monitoring.py +12 -7
- zrb/task/base_task.py +69 -5
- zrb/task/base_trigger.py +12 -5
- zrb/task/cmd_task.py +1 -1
- zrb/task/llm/agent.py +154 -161
- zrb/task/llm/agent_runner.py +152 -0
- zrb/task/llm/config.py +47 -18
- zrb/task/llm/conversation_history.py +209 -0
- zrb/task/llm/conversation_history_model.py +67 -0
- zrb/task/llm/default_workflow/coding/workflow.md +41 -0
- zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
- zrb/task/llm/default_workflow/git/workflow.md +118 -0
- zrb/task/llm/default_workflow/golang/workflow.md +128 -0
- zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
- zrb/task/llm/default_workflow/java/workflow.md +146 -0
- zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
- zrb/task/llm/default_workflow/python/workflow.md +160 -0
- zrb/task/llm/default_workflow/researching/workflow.md +153 -0
- zrb/task/llm/default_workflow/rust/workflow.md +162 -0
- zrb/task/llm/default_workflow/shell/workflow.md +299 -0
- zrb/task/llm/error.py +24 -10
- zrb/task/llm/file_replacement.py +206 -0
- zrb/task/llm/file_tool_model.py +57 -0
- zrb/task/llm/history_processor.py +206 -0
- zrb/task/llm/history_summarization.py +11 -166
- zrb/task/llm/print_node.py +193 -69
- zrb/task/llm/prompt.py +242 -45
- zrb/task/llm/subagent_conversation_history.py +41 -0
- zrb/task/llm/tool_wrapper.py +260 -57
- zrb/task/llm/workflow.py +76 -0
- zrb/task/llm_task.py +182 -171
- zrb/task/make_task.py +2 -3
- zrb/task/rsync_task.py +26 -11
- zrb/task/scheduler.py +4 -4
- zrb/util/attr.py +54 -39
- zrb/util/callable.py +23 -0
- zrb/util/cli/markdown.py +12 -0
- zrb/util/cli/text.py +30 -0
- zrb/util/file.py +29 -11
- zrb/util/git.py +8 -11
- zrb/util/git_diff_model.py +10 -0
- zrb/util/git_subtree.py +9 -14
- zrb/util/git_subtree_model.py +32 -0
- zrb/util/init_path.py +1 -1
- zrb/util/markdown.py +62 -0
- zrb/util/string/conversion.py +2 -2
- zrb/util/todo.py +17 -50
- zrb/util/todo_model.py +46 -0
- zrb/util/truncate.py +23 -0
- zrb/util/yaml.py +204 -0
- zrb/xcom/xcom.py +10 -0
- zrb-1.21.29.dist-info/METADATA +270 -0
- {zrb-1.8.10.dist-info → zrb-1.21.29.dist-info}/RECORD +140 -98
- {zrb-1.8.10.dist-info → zrb-1.21.29.dist-info}/WHEEL +1 -1
- zrb/config.py +0 -335
- zrb/llm_config.py +0 -411
- zrb/llm_rate_limitter.py +0 -125
- zrb/task/llm/context.py +0 -102
- zrb/task/llm/context_enrichment.py +0 -199
- zrb/task/llm/history.py +0 -211
- zrb-1.8.10.dist-info/METADATA +0 -264
- {zrb-1.8.10.dist-info → zrb-1.21.29.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from collections.abc import Callable
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from zrb.config.llm_rate_limitter import LLMRateLimitter, llm_rate_limitter
|
|
6
|
+
from zrb.context.any_context import AnyContext
|
|
7
|
+
from zrb.task.llm.error import extract_api_error_details
|
|
8
|
+
from zrb.task.llm.print_node import print_node
|
|
9
|
+
from zrb.task.llm.typing import ListOfDict
|
|
10
|
+
from zrb.util.cli.style import stylize_faint
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from pydantic_ai import Agent, Tool
|
|
14
|
+
from pydantic_ai.agent import AgentRun
|
|
15
|
+
from pydantic_ai.messages import UserContent
|
|
16
|
+
|
|
17
|
+
ToolOrCallable = Tool | Callable
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def run_agent_iteration(
|
|
21
|
+
ctx: AnyContext,
|
|
22
|
+
agent: "Agent[None, Any]",
|
|
23
|
+
user_prompt: str,
|
|
24
|
+
attachments: "list[UserContent] | None" = None,
|
|
25
|
+
history_list: ListOfDict | None = None,
|
|
26
|
+
rate_limitter: LLMRateLimitter | None = None,
|
|
27
|
+
max_retry: int = 2,
|
|
28
|
+
log_indent_level: int = 0,
|
|
29
|
+
) -> "AgentRun":
|
|
30
|
+
"""
|
|
31
|
+
Runs a single iteration of the agent execution loop.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
ctx: The task context.
|
|
35
|
+
agent: The Pydantic AI agent instance.
|
|
36
|
+
user_prompt: The user's input prompt.
|
|
37
|
+
history_list: The current conversation history.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
The agent run result object.
|
|
41
|
+
|
|
42
|
+
Raises:
|
|
43
|
+
Exception: If any error occurs during agent execution.
|
|
44
|
+
"""
|
|
45
|
+
if max_retry < 0:
|
|
46
|
+
raise ValueError("Max retry cannot be less than 0")
|
|
47
|
+
attempt = 0
|
|
48
|
+
while attempt < max_retry:
|
|
49
|
+
try:
|
|
50
|
+
return await _run_single_agent_iteration(
|
|
51
|
+
ctx=ctx,
|
|
52
|
+
agent=agent,
|
|
53
|
+
user_prompt=user_prompt,
|
|
54
|
+
attachments=[] if attachments is None else attachments,
|
|
55
|
+
history_list=[] if history_list is None else history_list,
|
|
56
|
+
rate_limitter=(
|
|
57
|
+
llm_rate_limitter if rate_limitter is None else rate_limitter
|
|
58
|
+
),
|
|
59
|
+
log_indent_level=log_indent_level,
|
|
60
|
+
)
|
|
61
|
+
except BaseException:
|
|
62
|
+
attempt += 1
|
|
63
|
+
if attempt == max_retry:
|
|
64
|
+
raise
|
|
65
|
+
raise Exception("Max retry exceeded")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
async def _run_single_agent_iteration(
|
|
69
|
+
ctx: AnyContext,
|
|
70
|
+
agent: "Agent",
|
|
71
|
+
user_prompt: str,
|
|
72
|
+
attachments: "list[UserContent]",
|
|
73
|
+
history_list: ListOfDict,
|
|
74
|
+
rate_limitter: LLMRateLimitter,
|
|
75
|
+
log_indent_level: int,
|
|
76
|
+
) -> "AgentRun":
|
|
77
|
+
from openai import APIError
|
|
78
|
+
from pydantic_ai import UsageLimits
|
|
79
|
+
from pydantic_ai.messages import ModelMessagesTypeAdapter
|
|
80
|
+
|
|
81
|
+
agent_payload = _estimate_request_payload(
|
|
82
|
+
agent, user_prompt, attachments, history_list
|
|
83
|
+
)
|
|
84
|
+
callback = _create_print_throttle_notif(ctx)
|
|
85
|
+
if rate_limitter:
|
|
86
|
+
await rate_limitter.throttle(agent_payload, callback)
|
|
87
|
+
else:
|
|
88
|
+
await llm_rate_limitter.throttle(agent_payload, callback)
|
|
89
|
+
user_prompt_with_attachments = [user_prompt] + attachments
|
|
90
|
+
async with agent:
|
|
91
|
+
async with agent.iter(
|
|
92
|
+
user_prompt=user_prompt_with_attachments,
|
|
93
|
+
message_history=ModelMessagesTypeAdapter.validate_python(history_list),
|
|
94
|
+
usage_limits=UsageLimits(request_limit=None), # We don't want limit
|
|
95
|
+
) as agent_run:
|
|
96
|
+
async for node in agent_run:
|
|
97
|
+
# Each node represents a step in the agent's execution
|
|
98
|
+
try:
|
|
99
|
+
await print_node(
|
|
100
|
+
_get_plain_printer(ctx), agent_run, node, log_indent_level
|
|
101
|
+
)
|
|
102
|
+
except APIError as e:
|
|
103
|
+
# Extract detailed error information from the response
|
|
104
|
+
error_details = extract_api_error_details(e)
|
|
105
|
+
ctx.log_error(f"API Error: {error_details}")
|
|
106
|
+
raise
|
|
107
|
+
except Exception as e:
|
|
108
|
+
ctx.log_error(f"Error processing node: {str(e)}")
|
|
109
|
+
ctx.log_error(f"Error type: {type(e).__name__}")
|
|
110
|
+
raise
|
|
111
|
+
return agent_run
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _create_print_throttle_notif(ctx: AnyContext) -> Callable[[str], None]:
|
|
115
|
+
def _print_throttle_notif(reason: str):
|
|
116
|
+
ctx.print(stylize_faint(f" ⌛>> Request Throttled: {reason}"), plain=True)
|
|
117
|
+
|
|
118
|
+
return _print_throttle_notif
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _estimate_request_payload(
|
|
122
|
+
agent: "Agent",
|
|
123
|
+
user_prompt: str,
|
|
124
|
+
attachments: "list[UserContent]",
|
|
125
|
+
history_list: ListOfDict,
|
|
126
|
+
) -> str:
|
|
127
|
+
system_prompts = agent._system_prompts if hasattr(agent, "_system_prompts") else ()
|
|
128
|
+
return json.dumps(
|
|
129
|
+
[
|
|
130
|
+
{"role": "system", "content": "\n".join(system_prompts)},
|
|
131
|
+
*history_list,
|
|
132
|
+
{"role": "user", "content": user_prompt},
|
|
133
|
+
*[_estimate_attachment_payload(attachment) for attachment in attachments],
|
|
134
|
+
]
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _estimate_attachment_payload(attachment: "UserContent") -> Any:
|
|
139
|
+
if hasattr(attachment, "url"):
|
|
140
|
+
return {"role": "user", "content": attachment.url}
|
|
141
|
+
if hasattr(attachment, "data"):
|
|
142
|
+
return {"role": "user", "content": "x" * len(attachment.data)}
|
|
143
|
+
return ""
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _get_plain_printer(ctx: AnyContext):
|
|
147
|
+
def printer(*args, **kwargs):
|
|
148
|
+
if "plain" not in kwargs:
|
|
149
|
+
kwargs["plain"] = True
|
|
150
|
+
return ctx.print(*args, **kwargs)
|
|
151
|
+
|
|
152
|
+
return printer
|
zrb/task/llm/config.py
CHANGED
|
@@ -1,25 +1,45 @@
|
|
|
1
|
-
from typing import TYPE_CHECKING,
|
|
1
|
+
from typing import TYPE_CHECKING, Callable
|
|
2
2
|
|
|
3
3
|
if TYPE_CHECKING:
|
|
4
4
|
from pydantic_ai.models import Model
|
|
5
5
|
from pydantic_ai.settings import ModelSettings
|
|
6
|
-
else:
|
|
7
|
-
Model = Any
|
|
8
|
-
ModelSettings = Any
|
|
9
6
|
|
|
10
|
-
from zrb.attr.type import StrAttr,
|
|
7
|
+
from zrb.attr.type import BoolAttr, StrAttr, StrListAttr
|
|
8
|
+
from zrb.config.llm_config import LLMConfig, llm_config
|
|
11
9
|
from zrb.context.any_context import AnyContext
|
|
12
|
-
from zrb.
|
|
13
|
-
|
|
14
|
-
|
|
10
|
+
from zrb.util.attr import get_attr, get_bool_attr, get_str_list_attr
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_yolo_mode(
|
|
14
|
+
ctx: AnyContext,
|
|
15
|
+
yolo_mode_attr: (
|
|
16
|
+
Callable[[AnyContext], list[str] | bool | None] | StrListAttr | BoolAttr | None
|
|
17
|
+
) = None,
|
|
18
|
+
render_yolo_mode: bool = True,
|
|
19
|
+
) -> bool | list[str]:
|
|
20
|
+
if yolo_mode_attr is None:
|
|
21
|
+
return llm_config.default_yolo_mode
|
|
22
|
+
try:
|
|
23
|
+
return get_bool_attr(
|
|
24
|
+
ctx,
|
|
25
|
+
yolo_mode_attr,
|
|
26
|
+
False,
|
|
27
|
+
auto_render=render_yolo_mode,
|
|
28
|
+
)
|
|
29
|
+
except Exception:
|
|
30
|
+
return get_str_list_attr(
|
|
31
|
+
ctx,
|
|
32
|
+
yolo_mode_attr,
|
|
33
|
+
auto_render=render_yolo_mode,
|
|
34
|
+
)
|
|
15
35
|
|
|
16
36
|
|
|
17
37
|
def get_model_settings(
|
|
18
38
|
ctx: AnyContext,
|
|
19
39
|
model_settings_attr: (
|
|
20
|
-
ModelSettings | Callable[[
|
|
40
|
+
"ModelSettings | Callable[[AnyContext], ModelSettings] | None"
|
|
21
41
|
) = None,
|
|
22
|
-
) -> ModelSettings | None:
|
|
42
|
+
) -> "ModelSettings | None":
|
|
23
43
|
"""Gets the model settings, resolving callables if necessary."""
|
|
24
44
|
model_settings = get_attr(ctx, model_settings_attr, None, auto_render=False)
|
|
25
45
|
if model_settings is None:
|
|
@@ -50,7 +70,7 @@ def get_model_api_key(
|
|
|
50
70
|
) -> str | None:
|
|
51
71
|
"""Gets the model API key, rendering if configured."""
|
|
52
72
|
api_key = get_attr(ctx, model_api_key_attr, None, auto_render=render_model_api_key)
|
|
53
|
-
if api_key is None and llm_config.
|
|
73
|
+
if api_key is None and llm_config.default_model_api_key is not None:
|
|
54
74
|
return llm_config.default_model_api_key
|
|
55
75
|
if isinstance(api_key, str) or api_key is None:
|
|
56
76
|
return api_key
|
|
@@ -59,18 +79,21 @@ def get_model_api_key(
|
|
|
59
79
|
|
|
60
80
|
def get_model(
|
|
61
81
|
ctx: AnyContext,
|
|
62
|
-
model_attr: Callable[[
|
|
82
|
+
model_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None",
|
|
63
83
|
render_model: bool,
|
|
64
|
-
model_base_url_attr:
|
|
84
|
+
model_base_url_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None",
|
|
65
85
|
render_model_base_url: bool = True,
|
|
66
|
-
model_api_key_attr:
|
|
86
|
+
model_api_key_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None" = None,
|
|
67
87
|
render_model_api_key: bool = True,
|
|
68
|
-
|
|
88
|
+
is_small_model: bool = False,
|
|
89
|
+
) -> "str | Model":
|
|
69
90
|
"""Gets the model instance or name, handling defaults and configuration."""
|
|
70
91
|
from pydantic_ai.models import Model
|
|
71
92
|
|
|
72
93
|
model = get_attr(ctx, model_attr, None, auto_render=render_model)
|
|
73
94
|
if model is None:
|
|
95
|
+
if is_small_model:
|
|
96
|
+
return llm_config.default_small_model
|
|
74
97
|
return llm_config.default_model
|
|
75
98
|
if isinstance(model, str):
|
|
76
99
|
model_base_url = get_model_base_url(
|
|
@@ -79,11 +102,11 @@ def get_model(
|
|
|
79
102
|
model_api_key = get_model_api_key(ctx, model_api_key_attr, render_model_api_key)
|
|
80
103
|
new_llm_config = LLMConfig(
|
|
81
104
|
default_model_name=model,
|
|
82
|
-
|
|
83
|
-
|
|
105
|
+
default_model_base_url=model_base_url,
|
|
106
|
+
default_model_api_key=model_api_key,
|
|
84
107
|
)
|
|
85
108
|
if model_base_url is None and model_api_key is None:
|
|
86
|
-
default_model_provider =
|
|
109
|
+
default_model_provider = _get_default_model_provider(is_small_model)
|
|
87
110
|
if default_model_provider is not None:
|
|
88
111
|
new_llm_config.set_default_model_provider(default_model_provider)
|
|
89
112
|
return new_llm_config.default_model
|
|
@@ -91,3 +114,9 @@ def get_model(
|
|
|
91
114
|
if isinstance(model, Model):
|
|
92
115
|
return model
|
|
93
116
|
raise ValueError(f"Invalid model type resolved: {type(model)}, value: {model}")
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _get_default_model_provider(is_small_model: bool = False):
|
|
120
|
+
if is_small_model:
|
|
121
|
+
return llm_config.default_small_model_provider
|
|
122
|
+
return llm_config.default_model_provider
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from zrb.attr.type import StrAttr
|
|
7
|
+
from zrb.config.llm_context.config import llm_context_config
|
|
8
|
+
from zrb.context.any_context import AnyContext
|
|
9
|
+
from zrb.task.llm.conversation_history_model import ConversationHistory
|
|
10
|
+
from zrb.task.llm.typing import ListOfDict
|
|
11
|
+
from zrb.util.attr import get_str_attr
|
|
12
|
+
from zrb.util.file import read_file, write_file
|
|
13
|
+
from zrb.util.markdown import make_markdown_section
|
|
14
|
+
from zrb.util.run import run_async
|
|
15
|
+
from zrb.xcom.xcom import Xcom
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _get_global_subagent_messages_xcom(ctx: AnyContext) -> Xcom:
|
|
19
|
+
if "_global_subagents" not in ctx.xcom:
|
|
20
|
+
ctx.xcom["_global_subagents"] = Xcom([{}])
|
|
21
|
+
if not isinstance(ctx.xcom["_global_subagents"], Xcom):
|
|
22
|
+
raise ValueError("ctx.xcom._global_subagents must be an Xcom")
|
|
23
|
+
return ctx.xcom["_global_subagents"]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def inject_subagent_history_into_ctx(
|
|
27
|
+
ctx: AnyContext, conversation_history: ConversationHistory
|
|
28
|
+
):
|
|
29
|
+
subagent_messages_xcom = _get_global_subagent_messages_xcom(ctx)
|
|
30
|
+
existing_subagent_history = subagent_messages_xcom.get({})
|
|
31
|
+
subagent_messages_xcom.set(
|
|
32
|
+
{**existing_subagent_history, **conversation_history.subagent_history}
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def set_ctx_subagent_history(ctx: AnyContext, subagent_name: str, messages: ListOfDict):
|
|
37
|
+
subagent_messages_xcom = _get_global_subagent_messages_xcom(ctx)
|
|
38
|
+
subagent_history = subagent_messages_xcom.get({})
|
|
39
|
+
subagent_history[subagent_name] = messages
|
|
40
|
+
subagent_messages_xcom.set(subagent_history)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_subagent_histories_from_ctx(ctx: AnyContext) -> dict[str, ListOfDict]:
|
|
44
|
+
subagent_messsages_xcom = _get_global_subagent_messages_xcom(ctx)
|
|
45
|
+
return subagent_messsages_xcom.get({})
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def inject_conversation_history_notes(conversation_history: ConversationHistory):
|
|
49
|
+
conversation_history.long_term_note = _fetch_long_term_note(
|
|
50
|
+
conversation_history.project_path
|
|
51
|
+
)
|
|
52
|
+
conversation_history.contextual_note = _fetch_contextual_note(
|
|
53
|
+
conversation_history.project_path
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _fetch_long_term_note(project_path: str) -> str:
|
|
58
|
+
contexts = llm_context_config.get_notes(cwd=project_path)
|
|
59
|
+
return contexts.get("/", "")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _fetch_contextual_note(project_path: str) -> str:
|
|
63
|
+
contexts = llm_context_config.get_notes(cwd=project_path)
|
|
64
|
+
return "\n".join(
|
|
65
|
+
[
|
|
66
|
+
make_markdown_section(header, content)
|
|
67
|
+
for header, content in contexts.items()
|
|
68
|
+
if header != "/"
|
|
69
|
+
]
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def get_history_file(
|
|
74
|
+
ctx: AnyContext,
|
|
75
|
+
conversation_history_file_attr: StrAttr | None,
|
|
76
|
+
render_history_file: bool,
|
|
77
|
+
) -> str:
|
|
78
|
+
"""Gets the path to the conversation history file, rendering if configured."""
|
|
79
|
+
return get_str_attr(
|
|
80
|
+
ctx,
|
|
81
|
+
conversation_history_file_attr,
|
|
82
|
+
"",
|
|
83
|
+
auto_render=render_history_file,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def _read_from_source(
|
|
88
|
+
ctx: AnyContext,
|
|
89
|
+
reader: (
|
|
90
|
+
Callable[[AnyContext], ConversationHistory | dict[str, Any] | list | None]
|
|
91
|
+
| None
|
|
92
|
+
),
|
|
93
|
+
file_path: str | None,
|
|
94
|
+
) -> "ConversationHistory | None":
|
|
95
|
+
# Priority 1: Reader function
|
|
96
|
+
if reader:
|
|
97
|
+
try:
|
|
98
|
+
raw_data = await run_async(reader(ctx))
|
|
99
|
+
if raw_data:
|
|
100
|
+
instance = ConversationHistory.parse_and_validate(
|
|
101
|
+
ctx, raw_data, "reader"
|
|
102
|
+
)
|
|
103
|
+
if instance:
|
|
104
|
+
return instance
|
|
105
|
+
except Exception as e:
|
|
106
|
+
ctx.log_warning(
|
|
107
|
+
f"Error executing conversation history reader: {e}. Ignoring."
|
|
108
|
+
)
|
|
109
|
+
# Priority 2: History file
|
|
110
|
+
if file_path and os.path.isfile(file_path):
|
|
111
|
+
try:
|
|
112
|
+
content = read_file(file_path)
|
|
113
|
+
raw_data = json.loads(content)
|
|
114
|
+
instance = ConversationHistory.parse_and_validate(
|
|
115
|
+
ctx, raw_data, f"file '{file_path}'"
|
|
116
|
+
)
|
|
117
|
+
if instance:
|
|
118
|
+
return instance
|
|
119
|
+
except json.JSONDecodeError:
|
|
120
|
+
ctx.log_warning(
|
|
121
|
+
f"Could not decode JSON from history file '{file_path}'. "
|
|
122
|
+
"Ignoring file content."
|
|
123
|
+
)
|
|
124
|
+
except Exception as e:
|
|
125
|
+
ctx.log_warning(
|
|
126
|
+
f"Error reading history file '{file_path}': {e}. "
|
|
127
|
+
"Ignoring file content."
|
|
128
|
+
)
|
|
129
|
+
# Fallback: Return default value
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
async def read_conversation_history(
|
|
134
|
+
ctx: AnyContext,
|
|
135
|
+
conversation_history_reader: (
|
|
136
|
+
Callable[[AnyContext], ConversationHistory | dict | list | None] | None
|
|
137
|
+
),
|
|
138
|
+
conversation_history_file_attr: StrAttr | None,
|
|
139
|
+
render_history_file: bool,
|
|
140
|
+
conversation_history_attr: (
|
|
141
|
+
ConversationHistory
|
|
142
|
+
| Callable[[AnyContext], ConversationHistory | dict | list]
|
|
143
|
+
| dict
|
|
144
|
+
| list
|
|
145
|
+
),
|
|
146
|
+
) -> ConversationHistory:
|
|
147
|
+
"""Reads conversation history from reader, file, or attribute, with validation."""
|
|
148
|
+
history_file = get_history_file(
|
|
149
|
+
ctx, conversation_history_file_attr, render_history_file
|
|
150
|
+
)
|
|
151
|
+
# Use the class method defined above
|
|
152
|
+
history_data = await _read_from_source(
|
|
153
|
+
ctx=ctx,
|
|
154
|
+
reader=conversation_history_reader,
|
|
155
|
+
file_path=history_file,
|
|
156
|
+
)
|
|
157
|
+
if history_data:
|
|
158
|
+
return history_data
|
|
159
|
+
# Priority 3: Callable or direct conversation_history attribute
|
|
160
|
+
raw_data_attr: Any = None
|
|
161
|
+
if callable(conversation_history_attr):
|
|
162
|
+
try:
|
|
163
|
+
raw_data_attr = await run_async(conversation_history_attr(ctx))
|
|
164
|
+
except Exception as e:
|
|
165
|
+
ctx.log_warning(
|
|
166
|
+
f"Error executing callable conversation_history attribute: {e}. "
|
|
167
|
+
"Ignoring."
|
|
168
|
+
)
|
|
169
|
+
if raw_data_attr is None:
|
|
170
|
+
raw_data_attr = conversation_history_attr
|
|
171
|
+
if raw_data_attr:
|
|
172
|
+
# Use the class method defined above
|
|
173
|
+
history_data = ConversationHistory.parse_and_validate(
|
|
174
|
+
ctx, raw_data_attr, "attribute"
|
|
175
|
+
)
|
|
176
|
+
if history_data:
|
|
177
|
+
return history_data
|
|
178
|
+
# Fallback: Return default value
|
|
179
|
+
return ConversationHistory()
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
async def write_conversation_history(
|
|
183
|
+
ctx: AnyContext,
|
|
184
|
+
history_data: ConversationHistory,
|
|
185
|
+
conversation_history_writer: (
|
|
186
|
+
Callable[[AnyContext, ConversationHistory], None] | None
|
|
187
|
+
),
|
|
188
|
+
conversation_history_file_attr: StrAttr | None,
|
|
189
|
+
render_history_file: bool,
|
|
190
|
+
):
|
|
191
|
+
"""Writes conversation history using the writer or to a file."""
|
|
192
|
+
if conversation_history_writer is not None:
|
|
193
|
+
await run_async(conversation_history_writer(ctx, history_data))
|
|
194
|
+
history_file = get_history_file(
|
|
195
|
+
ctx, conversation_history_file_attr, render_history_file
|
|
196
|
+
)
|
|
197
|
+
if history_file != "":
|
|
198
|
+
write_file(history_file, json.dumps(history_data.to_dict(), indent=2))
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def count_part_in_history_list(history_list: ListOfDict) -> int:
|
|
202
|
+
"""Calculates the total number of 'parts' in a history list."""
|
|
203
|
+
history_part_len = 0
|
|
204
|
+
for history in history_list:
|
|
205
|
+
if "parts" in history:
|
|
206
|
+
history_part_len += len(history["parts"])
|
|
207
|
+
else:
|
|
208
|
+
history_part_len += 1
|
|
209
|
+
return history_part_len
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from zrb.context.any_context import AnyContext
|
|
6
|
+
from zrb.task.llm.typing import ListOfDict
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ConversationHistory:
|
|
10
|
+
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
history: ListOfDict | None = None,
|
|
14
|
+
contextual_note: str | None = None,
|
|
15
|
+
long_term_note: str | None = None,
|
|
16
|
+
project_path: str | None = None,
|
|
17
|
+
subagent_history: dict[str, ListOfDict] | None = None,
|
|
18
|
+
):
|
|
19
|
+
self.history = history if history is not None else []
|
|
20
|
+
self.contextual_note = contextual_note if contextual_note is not None else ""
|
|
21
|
+
self.long_term_note = long_term_note if long_term_note is not None else ""
|
|
22
|
+
self.project_path = project_path if project_path is not None else os.getcwd()
|
|
23
|
+
self.subagent_history = subagent_history if subagent_history is not None else {}
|
|
24
|
+
|
|
25
|
+
def to_dict(self) -> dict[str, Any]:
|
|
26
|
+
return {
|
|
27
|
+
"history": self.history,
|
|
28
|
+
"contextual_note": self.contextual_note,
|
|
29
|
+
"long_term_note": self.long_term_note,
|
|
30
|
+
"subagent_history": self.subagent_history,
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
def model_dump_json(self, indent: int = 2) -> str:
|
|
34
|
+
return json.dumps(self.to_dict(), indent=indent)
|
|
35
|
+
|
|
36
|
+
@classmethod
|
|
37
|
+
def parse_and_validate(
|
|
38
|
+
cls, ctx: AnyContext, data: Any, source: str
|
|
39
|
+
) -> "ConversationHistory":
|
|
40
|
+
try:
|
|
41
|
+
if isinstance(data, cls):
|
|
42
|
+
return data # Already a valid instance
|
|
43
|
+
if isinstance(data, dict):
|
|
44
|
+
return cls(
|
|
45
|
+
history=data.get("history", data.get("messages", [])),
|
|
46
|
+
contextual_note=data.get("contextual_note", ""),
|
|
47
|
+
long_term_note=data.get("long_term_note", ""),
|
|
48
|
+
subagent_history=data.get("subagent_history", {}),
|
|
49
|
+
)
|
|
50
|
+
elif isinstance(data, list):
|
|
51
|
+
# Handle very old format (just a list) - wrap it
|
|
52
|
+
ctx.log_warning(
|
|
53
|
+
f"History from {source} contains legacy list format. "
|
|
54
|
+
"Wrapping it into the new structure. "
|
|
55
|
+
"Consider updating the source format."
|
|
56
|
+
)
|
|
57
|
+
return cls(history=data)
|
|
58
|
+
else:
|
|
59
|
+
ctx.log_warning(
|
|
60
|
+
f"History data from {source} has unexpected format "
|
|
61
|
+
f"(type: {type(data)}). Ignoring."
|
|
62
|
+
)
|
|
63
|
+
except Exception as e: # Catch validation errors too
|
|
64
|
+
ctx.log_warning(
|
|
65
|
+
f"Error validating/parsing history data from {source}: {e}. Ignoring."
|
|
66
|
+
)
|
|
67
|
+
return cls()
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: "A comprehensive workflow for software engineering tasks, including writing, modifying, and debugging code, as well as creating new applications. ALWAYS activate this workflow whenever you need to deal with software engineering tasks."
|
|
3
|
+
---
|
|
4
|
+
|
|
5
|
+
This workflow provides a structured approach to software engineering tasks. Adhere to these guidelines to deliver high-quality, idiomatic code that respects the project's existing patterns and conventions.
|
|
6
|
+
|
|
7
|
+
# Workflow Loading Strategy
|
|
8
|
+
|
|
9
|
+
This is a general-purpose coding workflow. For tasks involving specific languages or tools, you **MUST** load the relevant specialized workflows.
|
|
10
|
+
|
|
11
|
+
- **If the task involves Python:** Load the `python` workflow.
|
|
12
|
+
- **If the task involves Git:** Load the `git` workflow.
|
|
13
|
+
- **If the task involves shell scripting:** Load the `shell` workflow.
|
|
14
|
+
- **If the task involves Go:** Load the `golang` workflow.
|
|
15
|
+
- **If the task involves Java:** Load the `java` workflow.
|
|
16
|
+
- **If the task involves Javascript/Typescript:** Load the `javascript` workflow.
|
|
17
|
+
- **If the task involves HTML/CSS:** Load the `html-css` workflow.
|
|
18
|
+
- **If the task involves Rust:** Load the `rust` workflow.
|
|
19
|
+
|
|
20
|
+
Always consider if a more specific workflow is available and appropriate for the task at hand.
|
|
21
|
+
|
|
22
|
+
# Core Mandates
|
|
23
|
+
|
|
24
|
+
- **Conventions:** Rigorously adhere to existing project conventions when reading or modifying code. Analyze surrounding code, tests, and configuration first.
|
|
25
|
+
- **Libraries/Frameworks:** NEVER assume a library/framework is available or appropriate. Verify its established usage within the project (check imports, configuration files like 'package.json', 'Cargo.toml', 'requirements.txt', 'build.gradle', etc., or observe neighboring files) before employing it.
|
|
26
|
+
- **Style & Structure:** Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project.
|
|
27
|
+
- **Idiomatic Changes:** When editing, understand the local context (imports, functions/classes) to ensure your changes integrate naturally and idiomatically.
|
|
28
|
+
- **Comments:** Add code comments sparingly. Focus on *why* something is done, especially for complex logic, rather than *what* is done. Only add high-value comments if necessary for clarity or if requested by the user. Do not edit comments that are separate from the code you are changing. *NEVER* talk to the user or describe your changes through comments.
|
|
29
|
+
- **Proactiveness:** Fulfill the user's request thoroughly. When adding features or fixing bugs, this includes adding tests to ensure quality. Consider all created files, especially tests, to be permanent artifacts unless the user says otherwise.
|
|
30
|
+
- **Confirm Ambiguity/Expansion:** Do not take significant actions beyond the clear scope of the request without confirming with the user. If asked *how* to do something, explain first, don't just do it.
|
|
31
|
+
- **Explaining Changes:** After completing a code modification or file operation *do not* provide summaries unless asked.
|
|
32
|
+
- **Do Not revert changes:** Do not revert changes to the codebase unless asked to do so by the user. Only revert changes made by you if they have resulted in an error or if the user has explicitly asked you to revert the changes.
|
|
33
|
+
|
|
34
|
+
# Software Engineering Tasks
|
|
35
|
+
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this sequence:
|
|
36
|
+
1. **Understand & Strategize:** Think about the user's request and the relevant codebase context. When the task involves **complex refactoring, codebase exploration or system-wide analysis**, your **first and primary tool** must be 'codebase_investigator'. Use it to build a comprehensive understanding of the code, its structure, and dependencies. For **simple, targeted searches** (like finding a specific function name, file path, or variable declaration), you should use 'search_file_content' or 'glob' directly.
|
|
37
|
+
2. **Plan:** Build a coherent and grounded (based on the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should use an iterative development process that includes writing unit tests to verify your changes. Use output logs or debug statements as part of this process to arrive at a solution.
|
|
38
|
+
3. **Implement:** Use the available tools (e.g., 'replace_in_file', 'write_to_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
|
39
|
+
4. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
|
40
|
+
5. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
|
41
|
+
6. **Finalize:** After all verification passes, consider the task complete. Do not remove or revert any changes or created files (like tests). Await the user's next instruction.
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: "A workflow for creating, refining, and organizing textual content."
|
|
3
|
+
---
|
|
4
|
+
Follow this workflow to produce content that is not just correct, but compelling, clear, and perfectly suited to its purpose.
|
|
5
|
+
|
|
6
|
+
# Core Mandates
|
|
7
|
+
|
|
8
|
+
- **Audience-First:** Always understand who you're writing for and what they need to know
|
|
9
|
+
- **Purpose-Driven:** Every piece of content must serve a clear objective
|
|
10
|
+
- **Quality Standards:** Deliver polished, professional content that meets the highest standards
|
|
11
|
+
- **Iterative Refinement:** Content improves through multiple rounds of review and editing
|
|
12
|
+
|
|
13
|
+
# Tool Usage Guideline
|
|
14
|
+
- Use `read_from_file` to analyze existing content and style guides
|
|
15
|
+
- Use `write_to_file` for creating new content drafts
|
|
16
|
+
- Use `replace_in_file` for targeted edits and refinements
|
|
17
|
+
|
|
18
|
+
# Step 1: Understand Intent and Audience
|
|
19
|
+
|
|
20
|
+
1. **Define the Goal:** What is this text supposed to achieve? (e.g., persuade, inform, entertain, sell). If the user is vague, ask for clarification.
|
|
21
|
+
2. **Identify the Audience:** Who are you writing for? (e.g., experts, beginners, customers). This dictates your tone, vocabulary, and level of detail.
|
|
22
|
+
3. **Determine the Tone:** Choose a voice that serves the goal and resonates with the audience (e.g., formal, witty, technical, urgent).
|
|
23
|
+
4. **Analyze Existing Content:** Review any provided examples, style guides, or reference materials to understand established patterns.
|
|
24
|
+
|
|
25
|
+
# Step 2: Plan and Outline
|
|
26
|
+
|
|
27
|
+
1. **Create Logical Structure:** Develop an outline that flows naturally from introduction to conclusion
|
|
28
|
+
2. **Key Sections:** Identify main talking points and supporting arguments
|
|
29
|
+
3. **Call-to-Action:** Define what you want the reader to do or think after reading
|
|
30
|
+
4. **Get Approval:** Present the outline to the user for confirmation before proceeding
|
|
31
|
+
|
|
32
|
+
# Step 3: Draft with Purpose
|
|
33
|
+
|
|
34
|
+
1. **Hook the Reader:** Start with a strong opening that grabs attention
|
|
35
|
+
2. **Use Active Voice:** Make your writing direct and energetic
|
|
36
|
+
3. **Show, Don't Just Tell:** Use examples, stories, and data to illustrate your points
|
|
37
|
+
4. **Maintain Consistency:** Stick to the established tone and style throughout
|
|
38
|
+
|
|
39
|
+
# Step 4: Refine and Polish
|
|
40
|
+
|
|
41
|
+
1. **Read Aloud:** Catch awkward phrasing and grammatical errors
|
|
42
|
+
2. **Cut Mercilessly:** Remove anything that doesn't serve the goal
|
|
43
|
+
3. **Enhance Readability:** Use short paragraphs, headings, bullet points, and bold text
|
|
44
|
+
4. **Verify Accuracy:** Ensure all facts, figures, and claims are correct
|
|
45
|
+
|
|
46
|
+
# Step 5: Task-Specific Execution
|
|
47
|
+
|
|
48
|
+
## Summarization
|
|
49
|
+
- Distill the essence while preserving key information
|
|
50
|
+
- Be objective and ruthless in cutting fluff
|
|
51
|
+
- Maintain the original meaning and context
|
|
52
|
+
|
|
53
|
+
## Proofreading
|
|
54
|
+
- Correct grammar, spelling, and punctuation
|
|
55
|
+
- Improve sentence flow and clarity
|
|
56
|
+
- Preserve the original meaning and voice
|
|
57
|
+
|
|
58
|
+
## Refining/Editing
|
|
59
|
+
- Sharpen the author's message
|
|
60
|
+
- Strengthen arguments and improve clarity
|
|
61
|
+
- Ensure consistent tone while respecting the original voice
|
|
62
|
+
|
|
63
|
+
# Step 6: Finalize and Deliver
|
|
64
|
+
|
|
65
|
+
- Present the final content to the user
|
|
66
|
+
- Be prepared to make additional refinements based on feedback
|
|
67
|
+
- Ensure the content meets all stated objectives
|
|
68
|
+
- Confirm the content is ready for its intended use
|