zrb 1.15.3__py3-none-any.whl → 1.21.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zrb might be problematic. Click here for more details.
- zrb/__init__.py +2 -6
- zrb/attr/type.py +10 -7
- zrb/builtin/__init__.py +2 -0
- zrb/builtin/git.py +12 -1
- zrb/builtin/group.py +31 -15
- zrb/builtin/llm/attachment.py +40 -0
- zrb/builtin/llm/chat_completion.py +274 -0
- zrb/builtin/llm/chat_session.py +126 -167
- zrb/builtin/llm/chat_session_cmd.py +288 -0
- zrb/builtin/llm/chat_trigger.py +79 -0
- zrb/builtin/llm/history.py +4 -4
- zrb/builtin/llm/llm_ask.py +217 -135
- zrb/builtin/llm/tool/api.py +74 -70
- zrb/builtin/llm/tool/cli.py +35 -21
- zrb/builtin/llm/tool/code.py +55 -73
- zrb/builtin/llm/tool/file.py +278 -344
- zrb/builtin/llm/tool/note.py +84 -0
- zrb/builtin/llm/tool/rag.py +27 -34
- zrb/builtin/llm/tool/sub_agent.py +54 -41
- zrb/builtin/llm/tool/web.py +74 -98
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
- zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
- zrb/builtin/searxng/config/settings.yml +5671 -0
- zrb/builtin/searxng/start.py +21 -0
- zrb/builtin/shell/autocomplete/bash.py +4 -3
- zrb/builtin/shell/autocomplete/zsh.py +4 -3
- zrb/config/config.py +202 -27
- zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
- zrb/config/default_prompt/interactive_system_prompt.md +24 -30
- zrb/config/default_prompt/persona.md +1 -1
- zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
- zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
- zrb/config/default_prompt/summarization_prompt.md +57 -16
- zrb/config/default_prompt/system_prompt.md +36 -30
- zrb/config/llm_config.py +119 -23
- zrb/config/llm_context/config.py +127 -90
- zrb/config/llm_context/config_parser.py +1 -7
- zrb/config/llm_context/workflow.py +81 -0
- zrb/config/llm_rate_limitter.py +100 -47
- zrb/context/any_shared_context.py +7 -1
- zrb/context/context.py +8 -2
- zrb/context/shared_context.py +3 -7
- zrb/group/any_group.py +3 -3
- zrb/group/group.py +3 -3
- zrb/input/any_input.py +5 -1
- zrb/input/base_input.py +18 -6
- zrb/input/option_input.py +13 -1
- zrb/input/text_input.py +7 -24
- zrb/runner/cli.py +21 -20
- zrb/runner/common_util.py +24 -19
- zrb/runner/web_route/task_input_api_route.py +5 -5
- zrb/runner/web_util/user.py +7 -3
- zrb/session/any_session.py +12 -6
- zrb/session/session.py +39 -18
- zrb/task/any_task.py +24 -3
- zrb/task/base/context.py +17 -9
- zrb/task/base/execution.py +15 -8
- zrb/task/base/lifecycle.py +8 -4
- zrb/task/base/monitoring.py +12 -7
- zrb/task/base_task.py +69 -5
- zrb/task/base_trigger.py +12 -5
- zrb/task/llm/agent.py +128 -167
- zrb/task/llm/agent_runner.py +152 -0
- zrb/task/llm/config.py +39 -20
- zrb/task/llm/conversation_history.py +110 -29
- zrb/task/llm/conversation_history_model.py +4 -179
- zrb/task/llm/default_workflow/coding/workflow.md +41 -0
- zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
- zrb/task/llm/default_workflow/git/workflow.md +118 -0
- zrb/task/llm/default_workflow/golang/workflow.md +128 -0
- zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
- zrb/task/llm/default_workflow/java/workflow.md +146 -0
- zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
- zrb/task/llm/default_workflow/python/workflow.md +160 -0
- zrb/task/llm/default_workflow/researching/workflow.md +153 -0
- zrb/task/llm/default_workflow/rust/workflow.md +162 -0
- zrb/task/llm/default_workflow/shell/workflow.md +299 -0
- zrb/task/llm/file_replacement.py +206 -0
- zrb/task/llm/file_tool_model.py +57 -0
- zrb/task/llm/history_processor.py +206 -0
- zrb/task/llm/history_summarization.py +2 -193
- zrb/task/llm/print_node.py +184 -64
- zrb/task/llm/prompt.py +175 -179
- zrb/task/llm/subagent_conversation_history.py +41 -0
- zrb/task/llm/tool_wrapper.py +226 -85
- zrb/task/llm/workflow.py +76 -0
- zrb/task/llm_task.py +109 -71
- zrb/task/make_task.py +2 -3
- zrb/task/rsync_task.py +25 -10
- zrb/task/scheduler.py +4 -4
- zrb/util/attr.py +54 -39
- zrb/util/cli/markdown.py +12 -0
- zrb/util/cli/text.py +30 -0
- zrb/util/file.py +12 -3
- zrb/util/git.py +2 -2
- zrb/util/{llm/prompt.py → markdown.py} +2 -3
- zrb/util/string/conversion.py +1 -1
- zrb/util/truncate.py +23 -0
- zrb/util/yaml.py +204 -0
- zrb/xcom/xcom.py +10 -0
- {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/METADATA +38 -18
- {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/RECORD +105 -79
- {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/WHEEL +1 -1
- zrb/task/llm/default_workflow/coding.md +0 -24
- zrb/task/llm/default_workflow/copywriting.md +0 -17
- zrb/task/llm/default_workflow/researching.md +0 -18
- {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from collections.abc import Callable
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
|
+
|
|
5
|
+
from zrb.config.llm_rate_limitter import LLMRateLimitter, llm_rate_limitter
|
|
6
|
+
from zrb.context.any_context import AnyContext
|
|
7
|
+
from zrb.task.llm.error import extract_api_error_details
|
|
8
|
+
from zrb.task.llm.print_node import print_node
|
|
9
|
+
from zrb.task.llm.typing import ListOfDict
|
|
10
|
+
from zrb.util.cli.style import stylize_faint
|
|
11
|
+
|
|
12
|
+
if TYPE_CHECKING:
|
|
13
|
+
from pydantic_ai import Agent, Tool
|
|
14
|
+
from pydantic_ai.agent import AgentRun
|
|
15
|
+
from pydantic_ai.messages import UserContent
|
|
16
|
+
|
|
17
|
+
ToolOrCallable = Tool | Callable
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def run_agent_iteration(
|
|
21
|
+
ctx: AnyContext,
|
|
22
|
+
agent: "Agent[None, Any]",
|
|
23
|
+
user_prompt: str,
|
|
24
|
+
attachments: "list[UserContent] | None" = None,
|
|
25
|
+
history_list: ListOfDict | None = None,
|
|
26
|
+
rate_limitter: LLMRateLimitter | None = None,
|
|
27
|
+
max_retry: int = 2,
|
|
28
|
+
log_indent_level: int = 0,
|
|
29
|
+
) -> "AgentRun":
|
|
30
|
+
"""
|
|
31
|
+
Runs a single iteration of the agent execution loop.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
ctx: The task context.
|
|
35
|
+
agent: The Pydantic AI agent instance.
|
|
36
|
+
user_prompt: The user's input prompt.
|
|
37
|
+
history_list: The current conversation history.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
The agent run result object.
|
|
41
|
+
|
|
42
|
+
Raises:
|
|
43
|
+
Exception: If any error occurs during agent execution.
|
|
44
|
+
"""
|
|
45
|
+
if max_retry < 0:
|
|
46
|
+
raise ValueError("Max retry cannot be less than 0")
|
|
47
|
+
attempt = 0
|
|
48
|
+
while attempt < max_retry:
|
|
49
|
+
try:
|
|
50
|
+
return await _run_single_agent_iteration(
|
|
51
|
+
ctx=ctx,
|
|
52
|
+
agent=agent,
|
|
53
|
+
user_prompt=user_prompt,
|
|
54
|
+
attachments=[] if attachments is None else attachments,
|
|
55
|
+
history_list=[] if history_list is None else history_list,
|
|
56
|
+
rate_limitter=(
|
|
57
|
+
llm_rate_limitter if rate_limitter is None else rate_limitter
|
|
58
|
+
),
|
|
59
|
+
log_indent_level=log_indent_level,
|
|
60
|
+
)
|
|
61
|
+
except BaseException:
|
|
62
|
+
attempt += 1
|
|
63
|
+
if attempt == max_retry:
|
|
64
|
+
raise
|
|
65
|
+
raise Exception("Max retry exceeded")
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
async def _run_single_agent_iteration(
|
|
69
|
+
ctx: AnyContext,
|
|
70
|
+
agent: "Agent",
|
|
71
|
+
user_prompt: str,
|
|
72
|
+
attachments: "list[UserContent]",
|
|
73
|
+
history_list: ListOfDict,
|
|
74
|
+
rate_limitter: LLMRateLimitter,
|
|
75
|
+
log_indent_level: int,
|
|
76
|
+
) -> "AgentRun":
|
|
77
|
+
from openai import APIError
|
|
78
|
+
from pydantic_ai import UsageLimits
|
|
79
|
+
from pydantic_ai.messages import ModelMessagesTypeAdapter
|
|
80
|
+
|
|
81
|
+
agent_payload = _estimate_request_payload(
|
|
82
|
+
agent, user_prompt, attachments, history_list
|
|
83
|
+
)
|
|
84
|
+
callback = _create_print_throttle_notif(ctx)
|
|
85
|
+
if rate_limitter:
|
|
86
|
+
await rate_limitter.throttle(agent_payload, callback)
|
|
87
|
+
else:
|
|
88
|
+
await llm_rate_limitter.throttle(agent_payload, callback)
|
|
89
|
+
user_prompt_with_attachments = [user_prompt] + attachments
|
|
90
|
+
async with agent:
|
|
91
|
+
async with agent.iter(
|
|
92
|
+
user_prompt=user_prompt_with_attachments,
|
|
93
|
+
message_history=ModelMessagesTypeAdapter.validate_python(history_list),
|
|
94
|
+
usage_limits=UsageLimits(request_limit=None), # We don't want limit
|
|
95
|
+
) as agent_run:
|
|
96
|
+
async for node in agent_run:
|
|
97
|
+
# Each node represents a step in the agent's execution
|
|
98
|
+
try:
|
|
99
|
+
await print_node(
|
|
100
|
+
_get_plain_printer(ctx), agent_run, node, log_indent_level
|
|
101
|
+
)
|
|
102
|
+
except APIError as e:
|
|
103
|
+
# Extract detailed error information from the response
|
|
104
|
+
error_details = extract_api_error_details(e)
|
|
105
|
+
ctx.log_error(f"API Error: {error_details}")
|
|
106
|
+
raise
|
|
107
|
+
except Exception as e:
|
|
108
|
+
ctx.log_error(f"Error processing node: {str(e)}")
|
|
109
|
+
ctx.log_error(f"Error type: {type(e).__name__}")
|
|
110
|
+
raise
|
|
111
|
+
return agent_run
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _create_print_throttle_notif(ctx: AnyContext) -> Callable[[str], None]:
|
|
115
|
+
def _print_throttle_notif(reason: str):
|
|
116
|
+
ctx.print(stylize_faint(f" ⌛>> Request Throttled: {reason}"), plain=True)
|
|
117
|
+
|
|
118
|
+
return _print_throttle_notif
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _estimate_request_payload(
|
|
122
|
+
agent: "Agent",
|
|
123
|
+
user_prompt: str,
|
|
124
|
+
attachments: "list[UserContent]",
|
|
125
|
+
history_list: ListOfDict,
|
|
126
|
+
) -> str:
|
|
127
|
+
system_prompts = agent._system_prompts if hasattr(agent, "_system_prompts") else ()
|
|
128
|
+
return json.dumps(
|
|
129
|
+
[
|
|
130
|
+
{"role": "system", "content": "\n".join(system_prompts)},
|
|
131
|
+
*history_list,
|
|
132
|
+
{"role": "user", "content": user_prompt},
|
|
133
|
+
*[_estimate_attachment_payload(attachment) for attachment in attachments],
|
|
134
|
+
]
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _estimate_attachment_payload(attachment: "UserContent") -> Any:
|
|
139
|
+
if hasattr(attachment, "url"):
|
|
140
|
+
return {"role": "user", "content": attachment.url}
|
|
141
|
+
if hasattr(attachment, "data"):
|
|
142
|
+
return {"role": "user", "content": "x" * len(attachment.data)}
|
|
143
|
+
return ""
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _get_plain_printer(ctx: AnyContext):
|
|
147
|
+
def printer(*args, **kwargs):
|
|
148
|
+
if "plain" not in kwargs:
|
|
149
|
+
kwargs["plain"] = True
|
|
150
|
+
return ctx.print(*args, **kwargs)
|
|
151
|
+
|
|
152
|
+
return printer
|
zrb/task/llm/config.py
CHANGED
|
@@ -4,30 +4,40 @@ if TYPE_CHECKING:
|
|
|
4
4
|
from pydantic_ai.models import Model
|
|
5
5
|
from pydantic_ai.settings import ModelSettings
|
|
6
6
|
|
|
7
|
-
from zrb.attr.type import BoolAttr, StrAttr,
|
|
7
|
+
from zrb.attr.type import BoolAttr, StrAttr, StrListAttr
|
|
8
8
|
from zrb.config.llm_config import LLMConfig, llm_config
|
|
9
9
|
from zrb.context.any_context import AnyContext
|
|
10
|
-
from zrb.
|
|
11
|
-
from zrb.util.attr import get_attr, get_bool_attr
|
|
10
|
+
from zrb.util.attr import get_attr, get_bool_attr, get_str_list_attr
|
|
12
11
|
|
|
13
12
|
|
|
14
|
-
def
|
|
13
|
+
def get_yolo_mode(
|
|
15
14
|
ctx: AnyContext,
|
|
16
|
-
|
|
15
|
+
yolo_mode_attr: (
|
|
16
|
+
Callable[[AnyContext], list[str] | bool | None] | StrListAttr | BoolAttr | None
|
|
17
|
+
) = None,
|
|
17
18
|
render_yolo_mode: bool = True,
|
|
18
|
-
):
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
19
|
+
) -> bool | list[str]:
|
|
20
|
+
if yolo_mode_attr is None:
|
|
21
|
+
return llm_config.default_yolo_mode
|
|
22
|
+
try:
|
|
23
|
+
return get_bool_attr(
|
|
24
|
+
ctx,
|
|
25
|
+
yolo_mode_attr,
|
|
26
|
+
False,
|
|
27
|
+
auto_render=render_yolo_mode,
|
|
28
|
+
)
|
|
29
|
+
except Exception:
|
|
30
|
+
return get_str_list_attr(
|
|
31
|
+
ctx,
|
|
32
|
+
yolo_mode_attr,
|
|
33
|
+
auto_render=render_yolo_mode,
|
|
34
|
+
)
|
|
25
35
|
|
|
26
36
|
|
|
27
37
|
def get_model_settings(
|
|
28
38
|
ctx: AnyContext,
|
|
29
39
|
model_settings_attr: (
|
|
30
|
-
"ModelSettings | Callable[[
|
|
40
|
+
"ModelSettings | Callable[[AnyContext], ModelSettings] | None"
|
|
31
41
|
) = None,
|
|
32
42
|
) -> "ModelSettings | None":
|
|
33
43
|
"""Gets the model settings, resolving callables if necessary."""
|
|
@@ -60,7 +70,7 @@ def get_model_api_key(
|
|
|
60
70
|
) -> str | None:
|
|
61
71
|
"""Gets the model API key, rendering if configured."""
|
|
62
72
|
api_key = get_attr(ctx, model_api_key_attr, None, auto_render=render_model_api_key)
|
|
63
|
-
if api_key is None and llm_config.
|
|
73
|
+
if api_key is None and llm_config.default_model_api_key is not None:
|
|
64
74
|
return llm_config.default_model_api_key
|
|
65
75
|
if isinstance(api_key, str) or api_key is None:
|
|
66
76
|
return api_key
|
|
@@ -69,18 +79,21 @@ def get_model_api_key(
|
|
|
69
79
|
|
|
70
80
|
def get_model(
|
|
71
81
|
ctx: AnyContext,
|
|
72
|
-
model_attr: "Callable[[
|
|
82
|
+
model_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None",
|
|
73
83
|
render_model: bool,
|
|
74
|
-
model_base_url_attr:
|
|
84
|
+
model_base_url_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None",
|
|
75
85
|
render_model_base_url: bool = True,
|
|
76
|
-
model_api_key_attr:
|
|
86
|
+
model_api_key_attr: "Callable[[AnyContext], Model | str | None] | Model | str | None" = None,
|
|
77
87
|
render_model_api_key: bool = True,
|
|
88
|
+
is_small_model: bool = False,
|
|
78
89
|
) -> "str | Model":
|
|
79
90
|
"""Gets the model instance or name, handling defaults and configuration."""
|
|
80
91
|
from pydantic_ai.models import Model
|
|
81
92
|
|
|
82
93
|
model = get_attr(ctx, model_attr, None, auto_render=render_model)
|
|
83
94
|
if model is None:
|
|
95
|
+
if is_small_model:
|
|
96
|
+
return llm_config.default_small_model
|
|
84
97
|
return llm_config.default_model
|
|
85
98
|
if isinstance(model, str):
|
|
86
99
|
model_base_url = get_model_base_url(
|
|
@@ -89,11 +102,11 @@ def get_model(
|
|
|
89
102
|
model_api_key = get_model_api_key(ctx, model_api_key_attr, render_model_api_key)
|
|
90
103
|
new_llm_config = LLMConfig(
|
|
91
104
|
default_model_name=model,
|
|
92
|
-
|
|
93
|
-
|
|
105
|
+
default_model_base_url=model_base_url,
|
|
106
|
+
default_model_api_key=model_api_key,
|
|
94
107
|
)
|
|
95
108
|
if model_base_url is None and model_api_key is None:
|
|
96
|
-
default_model_provider =
|
|
109
|
+
default_model_provider = _get_default_model_provider(is_small_model)
|
|
97
110
|
if default_model_provider is not None:
|
|
98
111
|
new_llm_config.set_default_model_provider(default_model_provider)
|
|
99
112
|
return new_llm_config.default_model
|
|
@@ -101,3 +114,9 @@ def get_model(
|
|
|
101
114
|
if isinstance(model, Model):
|
|
102
115
|
return model
|
|
103
116
|
raise ValueError(f"Invalid model type resolved: {type(model)}, value: {model}")
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _get_default_model_provider(is_small_model: bool = False):
|
|
120
|
+
if is_small_model:
|
|
121
|
+
return llm_config.default_small_model_provider
|
|
122
|
+
return llm_config.default_model_provider
|
|
@@ -1,16 +1,73 @@
|
|
|
1
1
|
import json
|
|
2
|
+
import os
|
|
2
3
|
from collections.abc import Callable
|
|
3
|
-
from copy import deepcopy
|
|
4
4
|
from typing import Any
|
|
5
5
|
|
|
6
6
|
from zrb.attr.type import StrAttr
|
|
7
|
+
from zrb.config.llm_context.config import llm_context_config
|
|
7
8
|
from zrb.context.any_context import AnyContext
|
|
8
|
-
from zrb.context.any_shared_context import AnySharedContext
|
|
9
9
|
from zrb.task.llm.conversation_history_model import ConversationHistory
|
|
10
10
|
from zrb.task.llm.typing import ListOfDict
|
|
11
11
|
from zrb.util.attr import get_str_attr
|
|
12
|
-
from zrb.util.file import write_file
|
|
12
|
+
from zrb.util.file import read_file, write_file
|
|
13
|
+
from zrb.util.markdown import make_markdown_section
|
|
13
14
|
from zrb.util.run import run_async
|
|
15
|
+
from zrb.xcom.xcom import Xcom
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _get_global_subagent_messages_xcom(ctx: AnyContext) -> Xcom:
|
|
19
|
+
if "_global_subagents" not in ctx.xcom:
|
|
20
|
+
ctx.xcom["_global_subagents"] = Xcom([{}])
|
|
21
|
+
if not isinstance(ctx.xcom["_global_subagents"], Xcom):
|
|
22
|
+
raise ValueError("ctx.xcom._global_subagents must be an Xcom")
|
|
23
|
+
return ctx.xcom["_global_subagents"]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def inject_subagent_history_into_ctx(
|
|
27
|
+
ctx: AnyContext, conversation_history: ConversationHistory
|
|
28
|
+
):
|
|
29
|
+
subagent_messages_xcom = _get_global_subagent_messages_xcom(ctx)
|
|
30
|
+
existing_subagent_history = subagent_messages_xcom.get({})
|
|
31
|
+
subagent_messages_xcom.set(
|
|
32
|
+
{**existing_subagent_history, **conversation_history.subagent_history}
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def set_ctx_subagent_history(ctx: AnyContext, subagent_name: str, messages: ListOfDict):
|
|
37
|
+
subagent_messages_xcom = _get_global_subagent_messages_xcom(ctx)
|
|
38
|
+
subagent_history = subagent_messages_xcom.get({})
|
|
39
|
+
subagent_history[subagent_name] = messages
|
|
40
|
+
subagent_messages_xcom.set(subagent_history)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_subagent_histories_from_ctx(ctx: AnyContext) -> dict[str, ListOfDict]:
|
|
44
|
+
subagent_messsages_xcom = _get_global_subagent_messages_xcom(ctx)
|
|
45
|
+
return subagent_messsages_xcom.get({})
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def inject_conversation_history_notes(conversation_history: ConversationHistory):
|
|
49
|
+
conversation_history.long_term_note = _fetch_long_term_note(
|
|
50
|
+
conversation_history.project_path
|
|
51
|
+
)
|
|
52
|
+
conversation_history.contextual_note = _fetch_contextual_note(
|
|
53
|
+
conversation_history.project_path
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _fetch_long_term_note(project_path: str) -> str:
|
|
58
|
+
contexts = llm_context_config.get_notes(cwd=project_path)
|
|
59
|
+
return contexts.get("/", "")
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _fetch_contextual_note(project_path: str) -> str:
|
|
63
|
+
contexts = llm_context_config.get_notes(cwd=project_path)
|
|
64
|
+
return "\n".join(
|
|
65
|
+
[
|
|
66
|
+
make_markdown_section(header, content)
|
|
67
|
+
for header, content in contexts.items()
|
|
68
|
+
if header != "/"
|
|
69
|
+
]
|
|
70
|
+
)
|
|
14
71
|
|
|
15
72
|
|
|
16
73
|
def get_history_file(
|
|
@@ -27,16 +84,62 @@ def get_history_file(
|
|
|
27
84
|
)
|
|
28
85
|
|
|
29
86
|
|
|
87
|
+
async def _read_from_source(
|
|
88
|
+
ctx: AnyContext,
|
|
89
|
+
reader: (
|
|
90
|
+
Callable[[AnyContext], ConversationHistory | dict[str, Any] | list | None]
|
|
91
|
+
| None
|
|
92
|
+
),
|
|
93
|
+
file_path: str | None,
|
|
94
|
+
) -> "ConversationHistory | None":
|
|
95
|
+
# Priority 1: Reader function
|
|
96
|
+
if reader:
|
|
97
|
+
try:
|
|
98
|
+
raw_data = await run_async(reader(ctx))
|
|
99
|
+
if raw_data:
|
|
100
|
+
instance = ConversationHistory.parse_and_validate(
|
|
101
|
+
ctx, raw_data, "reader"
|
|
102
|
+
)
|
|
103
|
+
if instance:
|
|
104
|
+
return instance
|
|
105
|
+
except Exception as e:
|
|
106
|
+
ctx.log_warning(
|
|
107
|
+
f"Error executing conversation history reader: {e}. Ignoring."
|
|
108
|
+
)
|
|
109
|
+
# Priority 2: History file
|
|
110
|
+
if file_path and os.path.isfile(file_path):
|
|
111
|
+
try:
|
|
112
|
+
content = read_file(file_path)
|
|
113
|
+
raw_data = json.loads(content)
|
|
114
|
+
instance = ConversationHistory.parse_and_validate(
|
|
115
|
+
ctx, raw_data, f"file '{file_path}'"
|
|
116
|
+
)
|
|
117
|
+
if instance:
|
|
118
|
+
return instance
|
|
119
|
+
except json.JSONDecodeError:
|
|
120
|
+
ctx.log_warning(
|
|
121
|
+
f"Could not decode JSON from history file '{file_path}'. "
|
|
122
|
+
"Ignoring file content."
|
|
123
|
+
)
|
|
124
|
+
except Exception as e:
|
|
125
|
+
ctx.log_warning(
|
|
126
|
+
f"Error reading history file '{file_path}': {e}. "
|
|
127
|
+
"Ignoring file content."
|
|
128
|
+
)
|
|
129
|
+
# Fallback: Return default value
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
|
|
30
133
|
async def read_conversation_history(
|
|
31
134
|
ctx: AnyContext,
|
|
32
135
|
conversation_history_reader: (
|
|
33
|
-
Callable[[
|
|
136
|
+
Callable[[AnyContext], ConversationHistory | dict | list | None] | None
|
|
34
137
|
),
|
|
35
138
|
conversation_history_file_attr: StrAttr | None,
|
|
36
139
|
render_history_file: bool,
|
|
37
140
|
conversation_history_attr: (
|
|
38
141
|
ConversationHistory
|
|
39
|
-
| Callable[[
|
|
142
|
+
| Callable[[AnyContext], ConversationHistory | dict | list]
|
|
40
143
|
| dict
|
|
41
144
|
| list
|
|
42
145
|
),
|
|
@@ -46,7 +149,7 @@ async def read_conversation_history(
|
|
|
46
149
|
ctx, conversation_history_file_attr, render_history_file
|
|
47
150
|
)
|
|
48
151
|
# Use the class method defined above
|
|
49
|
-
history_data = await
|
|
152
|
+
history_data = await _read_from_source(
|
|
50
153
|
ctx=ctx,
|
|
51
154
|
reader=conversation_history_reader,
|
|
52
155
|
file_path=history_file,
|
|
@@ -80,7 +183,7 @@ async def write_conversation_history(
|
|
|
80
183
|
ctx: AnyContext,
|
|
81
184
|
history_data: ConversationHistory,
|
|
82
185
|
conversation_history_writer: (
|
|
83
|
-
Callable[[
|
|
186
|
+
Callable[[AnyContext, ConversationHistory], None] | None
|
|
84
187
|
),
|
|
85
188
|
conversation_history_file_attr: StrAttr | None,
|
|
86
189
|
render_history_file: bool,
|
|
@@ -95,28 +198,6 @@ async def write_conversation_history(
|
|
|
95
198
|
write_file(history_file, json.dumps(history_data.to_dict(), indent=2))
|
|
96
199
|
|
|
97
200
|
|
|
98
|
-
def replace_system_prompt_in_history(
|
|
99
|
-
history_list: ListOfDict, replacement: str = "<main LLM system prompt>"
|
|
100
|
-
) -> ListOfDict:
|
|
101
|
-
"""
|
|
102
|
-
Returns a new history list where any part with part_kind 'system-prompt'
|
|
103
|
-
has its 'content' replaced with the given replacement string.
|
|
104
|
-
Args:
|
|
105
|
-
history: List of history items (each item is a dict with a 'parts' list).
|
|
106
|
-
replacement: The string to use in place of system-prompt content.
|
|
107
|
-
|
|
108
|
-
Returns:
|
|
109
|
-
A deep-copied list of history items with system-prompt content replaced.
|
|
110
|
-
"""
|
|
111
|
-
new_history = deepcopy(history_list)
|
|
112
|
-
for item in new_history:
|
|
113
|
-
parts = item.get("parts", [])
|
|
114
|
-
for part in parts:
|
|
115
|
-
if part.get("part_kind") == "system-prompt":
|
|
116
|
-
part["content"] = replacement
|
|
117
|
-
return new_history
|
|
118
|
-
|
|
119
|
-
|
|
120
201
|
def count_part_in_history_list(history_list: ListOfDict) -> int:
|
|
121
202
|
"""Calculates the total number of 'parts' in a history list."""
|
|
122
203
|
history_part_len = 0
|
|
@@ -1,90 +1,38 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import os
|
|
3
|
-
from collections.abc import Callable
|
|
4
3
|
from typing import Any
|
|
5
4
|
|
|
6
|
-
from zrb.config.llm_context.config import llm_context_config
|
|
7
5
|
from zrb.context.any_context import AnyContext
|
|
8
6
|
from zrb.task.llm.typing import ListOfDict
|
|
9
|
-
from zrb.util.file import read_file
|
|
10
|
-
from zrb.util.llm.prompt import make_prompt_section
|
|
11
|
-
from zrb.util.run import run_async
|
|
12
7
|
|
|
13
8
|
|
|
14
9
|
class ConversationHistory:
|
|
15
10
|
|
|
16
11
|
def __init__(
|
|
17
12
|
self,
|
|
18
|
-
past_conversation_summary: str = "",
|
|
19
|
-
past_conversation_transcript: str = "",
|
|
20
13
|
history: ListOfDict | None = None,
|
|
21
14
|
contextual_note: str | None = None,
|
|
22
15
|
long_term_note: str | None = None,
|
|
23
16
|
project_path: str | None = None,
|
|
17
|
+
subagent_history: dict[str, ListOfDict] | None = None,
|
|
24
18
|
):
|
|
25
|
-
self.past_conversation_transcript = past_conversation_transcript
|
|
26
|
-
self.past_conversation_summary = past_conversation_summary
|
|
27
19
|
self.history = history if history is not None else []
|
|
28
20
|
self.contextual_note = contextual_note if contextual_note is not None else ""
|
|
29
21
|
self.long_term_note = long_term_note if long_term_note is not None else ""
|
|
30
22
|
self.project_path = project_path if project_path is not None else os.getcwd()
|
|
23
|
+
self.subagent_history = subagent_history if subagent_history is not None else {}
|
|
31
24
|
|
|
32
25
|
def to_dict(self) -> dict[str, Any]:
|
|
33
26
|
return {
|
|
34
|
-
"past_conversation_summary": self.past_conversation_summary,
|
|
35
|
-
"past_conversation_transcript": self.past_conversation_transcript,
|
|
36
27
|
"history": self.history,
|
|
37
28
|
"contextual_note": self.contextual_note,
|
|
38
29
|
"long_term_note": self.long_term_note,
|
|
30
|
+
"subagent_history": self.subagent_history,
|
|
39
31
|
}
|
|
40
32
|
|
|
41
33
|
def model_dump_json(self, indent: int = 2) -> str:
|
|
42
34
|
return json.dumps(self.to_dict(), indent=indent)
|
|
43
35
|
|
|
44
|
-
@classmethod
|
|
45
|
-
async def read_from_source(
|
|
46
|
-
cls,
|
|
47
|
-
ctx: AnyContext,
|
|
48
|
-
reader: Callable[[AnyContext], dict[str, Any] | list | None] | None,
|
|
49
|
-
file_path: str | None,
|
|
50
|
-
) -> "ConversationHistory | None":
|
|
51
|
-
# Priority 1: Reader function
|
|
52
|
-
if reader:
|
|
53
|
-
try:
|
|
54
|
-
raw_data = await run_async(reader(ctx))
|
|
55
|
-
if raw_data:
|
|
56
|
-
instance = cls.parse_and_validate(ctx, raw_data, "reader")
|
|
57
|
-
if instance:
|
|
58
|
-
return instance
|
|
59
|
-
except Exception as e:
|
|
60
|
-
ctx.log_warning(
|
|
61
|
-
f"Error executing conversation history reader: {e}. Ignoring."
|
|
62
|
-
)
|
|
63
|
-
# Priority 2: History file
|
|
64
|
-
if file_path and os.path.isfile(file_path):
|
|
65
|
-
try:
|
|
66
|
-
content = read_file(file_path)
|
|
67
|
-
raw_data = json.loads(content)
|
|
68
|
-
instance = cls.parse_and_validate(ctx, raw_data, f"file '{file_path}'")
|
|
69
|
-
if instance:
|
|
70
|
-
return instance
|
|
71
|
-
except json.JSONDecodeError:
|
|
72
|
-
ctx.log_warning(
|
|
73
|
-
f"Could not decode JSON from history file '{file_path}'. "
|
|
74
|
-
"Ignoring file content."
|
|
75
|
-
)
|
|
76
|
-
except Exception as e:
|
|
77
|
-
ctx.log_warning(
|
|
78
|
-
f"Error reading history file '{file_path}': {e}. "
|
|
79
|
-
"Ignoring file content."
|
|
80
|
-
)
|
|
81
|
-
# Fallback: Return default value
|
|
82
|
-
return None
|
|
83
|
-
|
|
84
|
-
def fetch_newest_notes(self):
|
|
85
|
-
self._fetch_long_term_note()
|
|
86
|
-
self._fetch_contextual_note()
|
|
87
|
-
|
|
88
36
|
@classmethod
|
|
89
37
|
def parse_and_validate(
|
|
90
38
|
cls, ctx: AnyContext, data: Any, source: str
|
|
@@ -93,15 +41,11 @@ class ConversationHistory:
|
|
|
93
41
|
if isinstance(data, cls):
|
|
94
42
|
return data # Already a valid instance
|
|
95
43
|
if isinstance(data, dict):
|
|
96
|
-
# This handles both the new format and the old {'context': ..., 'history': ...}
|
|
97
44
|
return cls(
|
|
98
|
-
past_conversation_summary=data.get("past_conversation_summary", ""),
|
|
99
|
-
past_conversation_transcript=data.get(
|
|
100
|
-
"past_conversation_transcript", ""
|
|
101
|
-
),
|
|
102
45
|
history=data.get("history", data.get("messages", [])),
|
|
103
46
|
contextual_note=data.get("contextual_note", ""),
|
|
104
47
|
long_term_note=data.get("long_term_note", ""),
|
|
48
|
+
subagent_history=data.get("subagent_history", {}),
|
|
105
49
|
)
|
|
106
50
|
elif isinstance(data, list):
|
|
107
51
|
# Handle very old format (just a list) - wrap it
|
|
@@ -121,122 +65,3 @@ class ConversationHistory:
|
|
|
121
65
|
f"Error validating/parsing history data from {source}: {e}. Ignoring."
|
|
122
66
|
)
|
|
123
67
|
return cls()
|
|
124
|
-
|
|
125
|
-
def write_past_conversation_summary(self, past_conversation_summary: str):
|
|
126
|
-
"""
|
|
127
|
-
Write or update the past conversation summary.
|
|
128
|
-
|
|
129
|
-
Use this tool to store or update a summary of previous conversations for
|
|
130
|
-
future reference. This is useful for providing context to LLMs or other tools
|
|
131
|
-
that need a concise history.
|
|
132
|
-
|
|
133
|
-
Args:
|
|
134
|
-
past_conversation_summary (str): The summary text to store.
|
|
135
|
-
|
|
136
|
-
Returns:
|
|
137
|
-
str: A JSON object indicating the success or failure of the operation.
|
|
138
|
-
|
|
139
|
-
Raises:
|
|
140
|
-
Exception: If the summary cannot be written.
|
|
141
|
-
"""
|
|
142
|
-
self.past_conversation_summary = past_conversation_summary
|
|
143
|
-
return json.dumps({"success": True})
|
|
144
|
-
|
|
145
|
-
def write_past_conversation_transcript(self, past_conversation_transcript: str):
|
|
146
|
-
"""
|
|
147
|
-
Write or update the past conversation transcript.
|
|
148
|
-
|
|
149
|
-
Use this tool to store or update the full transcript of previous conversations.
|
|
150
|
-
This is useful for providing detailed context to LLMs or for record-keeping.
|
|
151
|
-
|
|
152
|
-
Args:
|
|
153
|
-
past_conversation_transcript (str): The transcript text to store.
|
|
154
|
-
|
|
155
|
-
Returns:
|
|
156
|
-
str: A JSON object indicating the success or failure of the operation.
|
|
157
|
-
|
|
158
|
-
Raises:
|
|
159
|
-
Exception: If the transcript cannot be written.
|
|
160
|
-
"""
|
|
161
|
-
self.past_conversation_transcript = past_conversation_transcript
|
|
162
|
-
return json.dumps({"success": True})
|
|
163
|
-
|
|
164
|
-
def read_long_term_note(self) -> str:
|
|
165
|
-
"""
|
|
166
|
-
Read the content of the long-term references.
|
|
167
|
-
|
|
168
|
-
This tool helps you retrieve knowledge or notes stored for long-term reference.
|
|
169
|
-
If the note does not exist, you may want to create it using the write tool.
|
|
170
|
-
|
|
171
|
-
Returns:
|
|
172
|
-
str: JSON with content of the notes.
|
|
173
|
-
|
|
174
|
-
Raises:
|
|
175
|
-
Exception: If the note cannot be read.
|
|
176
|
-
"""
|
|
177
|
-
return json.dumps({"content": self._fetch_long_term_note()})
|
|
178
|
-
|
|
179
|
-
def write_long_term_note(self, content: str) -> str:
|
|
180
|
-
"""
|
|
181
|
-
Write the entire content of the long-term references.
|
|
182
|
-
This will overwrite any existing long-term notes.
|
|
183
|
-
|
|
184
|
-
Args:
|
|
185
|
-
content (str): The full content of the long-term notes.
|
|
186
|
-
|
|
187
|
-
Returns:
|
|
188
|
-
str: JSON indicating success.
|
|
189
|
-
"""
|
|
190
|
-
llm_context_config.write_context(content, context_path="/")
|
|
191
|
-
return json.dumps({"success": True})
|
|
192
|
-
|
|
193
|
-
def read_contextual_note(self) -> str:
|
|
194
|
-
"""
|
|
195
|
-
Read the content of the contextual references for the current project.
|
|
196
|
-
|
|
197
|
-
This tool helps you retrieve knowledge or notes stored for contextual reference.
|
|
198
|
-
If the note does not exist, you may want to create it using the write tool.
|
|
199
|
-
|
|
200
|
-
Returns:
|
|
201
|
-
str: JSON with content of the notes.
|
|
202
|
-
|
|
203
|
-
Raises:
|
|
204
|
-
Exception: If the note cannot be read.
|
|
205
|
-
"""
|
|
206
|
-
return json.dumps({"content": self._fetch_contextual_note()})
|
|
207
|
-
|
|
208
|
-
def write_contextual_note(
|
|
209
|
-
self, content: str, context_path: str | None = None
|
|
210
|
-
) -> str:
|
|
211
|
-
"""
|
|
212
|
-
Write the entire content of the contextual references for a specific path.
|
|
213
|
-
This will overwrite any existing contextual notes for that path.
|
|
214
|
-
|
|
215
|
-
Args:
|
|
216
|
-
content (str): The full content of the contextual notes.
|
|
217
|
-
context_path (str, optional): The directory path for the context.
|
|
218
|
-
Defaults to the current project path.
|
|
219
|
-
|
|
220
|
-
Returns:
|
|
221
|
-
str: JSON indicating success.
|
|
222
|
-
"""
|
|
223
|
-
if context_path is None:
|
|
224
|
-
context_path = self.project_path
|
|
225
|
-
llm_context_config.write_context(content, context_path=context_path)
|
|
226
|
-
return json.dumps({"success": True})
|
|
227
|
-
|
|
228
|
-
def _fetch_long_term_note(self):
|
|
229
|
-
contexts = llm_context_config.get_contexts(cwd=self.project_path)
|
|
230
|
-
self.long_term_note = contexts.get("/", "")
|
|
231
|
-
return self.long_term_note
|
|
232
|
-
|
|
233
|
-
def _fetch_contextual_note(self):
|
|
234
|
-
contexts = llm_context_config.get_contexts(cwd=self.project_path)
|
|
235
|
-
self.contextual_note = "\n".join(
|
|
236
|
-
[
|
|
237
|
-
make_prompt_section(header, content)
|
|
238
|
-
for header, content in contexts.items()
|
|
239
|
-
if header != "/"
|
|
240
|
-
]
|
|
241
|
-
)
|
|
242
|
-
return self.contextual_note
|