zrb 1.21.29__py3-none-any.whl → 2.0.0a4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zrb might be problematic. Click here for more details.
- zrb/__init__.py +118 -129
- zrb/builtin/__init__.py +54 -2
- zrb/builtin/llm/chat.py +147 -0
- zrb/callback/callback.py +8 -1
- zrb/cmd/cmd_result.py +2 -1
- zrb/config/config.py +491 -280
- zrb/config/helper.py +84 -0
- zrb/config/web_auth_config.py +50 -35
- zrb/context/any_shared_context.py +13 -2
- zrb/context/context.py +31 -3
- zrb/context/print_fn.py +13 -0
- zrb/context/shared_context.py +14 -1
- zrb/input/option_input.py +30 -2
- zrb/llm/agent/__init__.py +9 -0
- zrb/llm/agent/agent.py +215 -0
- zrb/llm/agent/summarizer.py +20 -0
- zrb/llm/app/__init__.py +10 -0
- zrb/llm/app/completion.py +281 -0
- zrb/llm/app/confirmation/allow_tool.py +66 -0
- zrb/llm/app/confirmation/handler.py +178 -0
- zrb/llm/app/confirmation/replace_confirmation.py +77 -0
- zrb/llm/app/keybinding.py +34 -0
- zrb/llm/app/layout.py +117 -0
- zrb/llm/app/lexer.py +155 -0
- zrb/llm/app/redirection.py +28 -0
- zrb/llm/app/style.py +16 -0
- zrb/llm/app/ui.py +733 -0
- zrb/llm/config/__init__.py +4 -0
- zrb/llm/config/config.py +122 -0
- zrb/llm/config/limiter.py +247 -0
- zrb/llm/history_manager/__init__.py +4 -0
- zrb/llm/history_manager/any_history_manager.py +23 -0
- zrb/llm/history_manager/file_history_manager.py +91 -0
- zrb/llm/history_processor/summarizer.py +108 -0
- zrb/llm/note/__init__.py +3 -0
- zrb/llm/note/manager.py +122 -0
- zrb/llm/prompt/__init__.py +29 -0
- zrb/llm/prompt/claude_compatibility.py +92 -0
- zrb/llm/prompt/compose.py +55 -0
- zrb/llm/prompt/default.py +51 -0
- zrb/llm/prompt/markdown/mandate.md +23 -0
- zrb/llm/prompt/markdown/persona.md +3 -0
- zrb/llm/prompt/markdown/summarizer.md +21 -0
- zrb/llm/prompt/note.py +41 -0
- zrb/llm/prompt/system_context.py +46 -0
- zrb/llm/prompt/zrb.py +41 -0
- zrb/llm/skill/__init__.py +3 -0
- zrb/llm/skill/manager.py +86 -0
- zrb/llm/task/__init__.py +4 -0
- zrb/llm/task/llm_chat_task.py +316 -0
- zrb/llm/task/llm_task.py +245 -0
- zrb/llm/tool/__init__.py +39 -0
- zrb/llm/tool/bash.py +75 -0
- zrb/llm/tool/code.py +266 -0
- zrb/llm/tool/file.py +419 -0
- zrb/llm/tool/note.py +70 -0
- zrb/{builtin/llm → llm}/tool/rag.py +8 -5
- zrb/llm/tool/search/brave.py +53 -0
- zrb/llm/tool/search/searxng.py +47 -0
- zrb/llm/tool/search/serpapi.py +47 -0
- zrb/llm/tool/skill.py +19 -0
- zrb/llm/tool/sub_agent.py +70 -0
- zrb/llm/tool/web.py +97 -0
- zrb/llm/tool/zrb_task.py +66 -0
- zrb/llm/util/attachment.py +101 -0
- zrb/llm/util/prompt.py +104 -0
- zrb/llm/util/stream_response.py +178 -0
- zrb/session/any_session.py +0 -3
- zrb/session/session.py +1 -1
- zrb/task/base/context.py +25 -13
- zrb/task/base/execution.py +52 -47
- zrb/task/base/lifecycle.py +7 -4
- zrb/task/base_task.py +48 -49
- zrb/task/base_trigger.py +4 -1
- zrb/task/cmd_task.py +6 -0
- zrb/task/http_check.py +11 -5
- zrb/task/make_task.py +3 -0
- zrb/task/rsync_task.py +5 -0
- zrb/task/scaffolder.py +7 -4
- zrb/task/scheduler.py +3 -0
- zrb/task/tcp_check.py +6 -4
- zrb/util/ascii_art/art/bee.txt +17 -0
- zrb/util/ascii_art/art/cat.txt +9 -0
- zrb/util/ascii_art/art/ghost.txt +16 -0
- zrb/util/ascii_art/art/panda.txt +17 -0
- zrb/util/ascii_art/art/rose.txt +14 -0
- zrb/util/ascii_art/art/unicorn.txt +15 -0
- zrb/util/ascii_art/banner.py +92 -0
- zrb/util/cli/markdown.py +22 -2
- zrb/util/cmd/command.py +33 -10
- zrb/util/file.py +51 -32
- zrb/util/match.py +78 -0
- zrb/util/run.py +3 -3
- {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/METADATA +9 -15
- {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/RECORD +100 -128
- zrb/attr/__init__.py +0 -0
- zrb/builtin/llm/attachment.py +0 -40
- zrb/builtin/llm/chat_completion.py +0 -274
- zrb/builtin/llm/chat_session.py +0 -270
- zrb/builtin/llm/chat_session_cmd.py +0 -288
- zrb/builtin/llm/chat_trigger.py +0 -79
- zrb/builtin/llm/history.py +0 -71
- zrb/builtin/llm/input.py +0 -27
- zrb/builtin/llm/llm_ask.py +0 -269
- zrb/builtin/llm/previous-session.js +0 -21
- zrb/builtin/llm/tool/__init__.py +0 -0
- zrb/builtin/llm/tool/api.py +0 -75
- zrb/builtin/llm/tool/cli.py +0 -52
- zrb/builtin/llm/tool/code.py +0 -236
- zrb/builtin/llm/tool/file.py +0 -560
- zrb/builtin/llm/tool/note.py +0 -84
- zrb/builtin/llm/tool/sub_agent.py +0 -150
- zrb/builtin/llm/tool/web.py +0 -171
- zrb/builtin/project/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/template/app_template/module/my_module/service/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/common/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/permission/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/__init__.py +0 -0
- zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/__init__.py +0 -0
- zrb/builtin/project/create/__init__.py +0 -0
- zrb/builtin/shell/__init__.py +0 -0
- zrb/builtin/shell/autocomplete/__init__.py +0 -0
- zrb/callback/__init__.py +0 -0
- zrb/cmd/__init__.py +0 -0
- zrb/config/default_prompt/interactive_system_prompt.md +0 -29
- zrb/config/default_prompt/persona.md +0 -1
- zrb/config/default_prompt/summarization_prompt.md +0 -57
- zrb/config/default_prompt/system_prompt.md +0 -38
- zrb/config/llm_config.py +0 -339
- zrb/config/llm_context/config.py +0 -166
- zrb/config/llm_context/config_parser.py +0 -40
- zrb/config/llm_context/workflow.py +0 -81
- zrb/config/llm_rate_limitter.py +0 -190
- zrb/content_transformer/__init__.py +0 -0
- zrb/context/__init__.py +0 -0
- zrb/dot_dict/__init__.py +0 -0
- zrb/env/__init__.py +0 -0
- zrb/group/__init__.py +0 -0
- zrb/input/__init__.py +0 -0
- zrb/runner/__init__.py +0 -0
- zrb/runner/web_route/__init__.py +0 -0
- zrb/runner/web_route/home_page/__init__.py +0 -0
- zrb/session/__init__.py +0 -0
- zrb/session_state_log/__init__.py +0 -0
- zrb/session_state_logger/__init__.py +0 -0
- zrb/task/__init__.py +0 -0
- zrb/task/base/__init__.py +0 -0
- zrb/task/llm/__init__.py +0 -0
- zrb/task/llm/agent.py +0 -204
- zrb/task/llm/agent_runner.py +0 -152
- zrb/task/llm/config.py +0 -122
- zrb/task/llm/conversation_history.py +0 -209
- zrb/task/llm/conversation_history_model.py +0 -67
- zrb/task/llm/default_workflow/coding/workflow.md +0 -41
- zrb/task/llm/default_workflow/copywriting/workflow.md +0 -68
- zrb/task/llm/default_workflow/git/workflow.md +0 -118
- zrb/task/llm/default_workflow/golang/workflow.md +0 -128
- zrb/task/llm/default_workflow/html-css/workflow.md +0 -135
- zrb/task/llm/default_workflow/java/workflow.md +0 -146
- zrb/task/llm/default_workflow/javascript/workflow.md +0 -158
- zrb/task/llm/default_workflow/python/workflow.md +0 -160
- zrb/task/llm/default_workflow/researching/workflow.md +0 -153
- zrb/task/llm/default_workflow/rust/workflow.md +0 -162
- zrb/task/llm/default_workflow/shell/workflow.md +0 -299
- zrb/task/llm/error.py +0 -95
- zrb/task/llm/file_replacement.py +0 -206
- zrb/task/llm/file_tool_model.py +0 -57
- zrb/task/llm/history_processor.py +0 -206
- zrb/task/llm/history_summarization.py +0 -25
- zrb/task/llm/print_node.py +0 -221
- zrb/task/llm/prompt.py +0 -321
- zrb/task/llm/subagent_conversation_history.py +0 -41
- zrb/task/llm/tool_wrapper.py +0 -361
- zrb/task/llm/typing.py +0 -3
- zrb/task/llm/workflow.py +0 -76
- zrb/task/llm_task.py +0 -379
- zrb/task_status/__init__.py +0 -0
- zrb/util/__init__.py +0 -0
- zrb/util/cli/__init__.py +0 -0
- zrb/util/cmd/__init__.py +0 -0
- zrb/util/codemod/__init__.py +0 -0
- zrb/util/string/__init__.py +0 -0
- zrb/xcom/__init__.py +0 -0
- /zrb/{config/default_prompt/file_extractor_system_prompt.md → llm/prompt/markdown/file_extractor.md} +0 -0
- /zrb/{config/default_prompt/repo_extractor_system_prompt.md → llm/prompt/markdown/repo_extractor.md} +0 -0
- /zrb/{config/default_prompt/repo_summarizer_system_prompt.md → llm/prompt/markdown/repo_summarizer.md} +0 -0
- {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/WHEEL +0 -0
- {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
|
|
4
|
+
from zrb.attr.type import BoolAttr, StrAttr, fstring
|
|
5
|
+
from zrb.config.config import CFG
|
|
6
|
+
from zrb.context.any_context import AnyContext
|
|
7
|
+
from zrb.context.print_fn import PrintFn
|
|
8
|
+
from zrb.context.shared_context import SharedContext
|
|
9
|
+
from zrb.env.any_env import AnyEnv
|
|
10
|
+
from zrb.input.any_input import AnyInput
|
|
11
|
+
from zrb.input.bool_input import BoolInput
|
|
12
|
+
from zrb.input.str_input import StrInput
|
|
13
|
+
from zrb.llm.app.confirmation.handler import ConfirmationMiddleware
|
|
14
|
+
from zrb.llm.config.config import LLMConfig
|
|
15
|
+
from zrb.llm.config.config import llm_config as default_llm_config
|
|
16
|
+
from zrb.llm.config.limiter import LLMLimiter
|
|
17
|
+
from zrb.llm.history_manager.any_history_manager import AnyHistoryManager
|
|
18
|
+
from zrb.llm.history_manager.file_history_manager import FileHistoryManager
|
|
19
|
+
from zrb.llm.prompt.compose import PromptManager
|
|
20
|
+
from zrb.llm.task.llm_task import LLMTask
|
|
21
|
+
from zrb.llm.util.attachment import get_attachments
|
|
22
|
+
from zrb.session.session import Session
|
|
23
|
+
from zrb.task.any_task import AnyTask
|
|
24
|
+
from zrb.task.base_task import BaseTask
|
|
25
|
+
from zrb.util.attr import get_attr, get_bool_attr, get_str_attr
|
|
26
|
+
from zrb.util.string.name import get_random_name
|
|
27
|
+
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from pydantic_ai import Tool, UserContent
|
|
30
|
+
from pydantic_ai._agent_graph import HistoryProcessor
|
|
31
|
+
from pydantic_ai.models import Model
|
|
32
|
+
from pydantic_ai.settings import ModelSettings
|
|
33
|
+
from pydantic_ai.tools import ToolFuncEither
|
|
34
|
+
from pydantic_ai.toolsets import AbstractToolset
|
|
35
|
+
from rich.theme import Theme
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class LLMChatTask(BaseTask):
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
name: str,
|
|
43
|
+
color: int | None = None,
|
|
44
|
+
icon: str | None = None,
|
|
45
|
+
description: str | None = None,
|
|
46
|
+
cli_only: bool = False,
|
|
47
|
+
input: list[AnyInput | None] | AnyInput | None = None,
|
|
48
|
+
env: list[AnyEnv | None] | AnyEnv | None = None,
|
|
49
|
+
system_prompt: (
|
|
50
|
+
"Callable[[AnyContext], str | fstring | None] | str | None"
|
|
51
|
+
) = None,
|
|
52
|
+
render_system_prompt: bool = False,
|
|
53
|
+
prompt_manager: PromptManager | None = None,
|
|
54
|
+
tools: list["Tool | ToolFuncEither"] = [],
|
|
55
|
+
toolsets: list["AbstractToolset[None]"] = [],
|
|
56
|
+
message: StrAttr | None = None,
|
|
57
|
+
render_message: bool = True,
|
|
58
|
+
attachment: "UserContent | list[UserContent] | Callable[[AnyContext], UserContent | list[UserContent]] | None" = None, # noqa
|
|
59
|
+
history_processors: list["HistoryProcessor"] = [],
|
|
60
|
+
llm_config: LLMConfig | None = None,
|
|
61
|
+
llm_limitter: LLMLimiter | None = None,
|
|
62
|
+
model: (
|
|
63
|
+
"Callable[[AnyContext], Model | str | fstring | None] | Model | None"
|
|
64
|
+
) = None,
|
|
65
|
+
render_model: bool = True,
|
|
66
|
+
model_settings: (
|
|
67
|
+
"ModelSettings | Callable[[AnyContext], ModelSettings] | None"
|
|
68
|
+
) = None,
|
|
69
|
+
conversation_name: StrAttr | None = None,
|
|
70
|
+
render_conversation_name: bool = True,
|
|
71
|
+
history_manager: AnyHistoryManager | None = None,
|
|
72
|
+
tool_confirmation: Callable[[Any], Any] | None = None,
|
|
73
|
+
yolo: BoolAttr = False,
|
|
74
|
+
ui_summarize_commands: list[str] = [],
|
|
75
|
+
ui_attach_commands: list[str] = [],
|
|
76
|
+
ui_exit_commands: list[str] = [],
|
|
77
|
+
ui_info_commands: list[str] = [],
|
|
78
|
+
ui_save_commands: list[str] = [],
|
|
79
|
+
ui_load_commands: list[str] = [],
|
|
80
|
+
ui_redirect_output_commands: list[str] = [],
|
|
81
|
+
ui_yolo_toggle_commands: list[str] = [],
|
|
82
|
+
ui_exec_commands: list[str] = [],
|
|
83
|
+
ui_greeting: StrAttr | None = None,
|
|
84
|
+
render_ui_greeting: bool = True,
|
|
85
|
+
ui_assistant_name: StrAttr | None = None,
|
|
86
|
+
render_ui_assistant_name: bool = True,
|
|
87
|
+
ui_jargon: StrAttr | None = None,
|
|
88
|
+
render_ui_jargon: bool = True,
|
|
89
|
+
ui_ascii_art: StrAttr | None = None,
|
|
90
|
+
render_ui_ascii_art_name: bool = True,
|
|
91
|
+
triggers: list[Callable[[], Any]] = [],
|
|
92
|
+
confirmation_middlewares: list[ConfirmationMiddleware] = [],
|
|
93
|
+
markdown_theme: "Theme | None" = None,
|
|
94
|
+
interactive: BoolAttr = True,
|
|
95
|
+
execute_condition: bool | str | Callable[[AnyContext], bool] = True,
|
|
96
|
+
retries: int = 2,
|
|
97
|
+
retry_period: float = 0,
|
|
98
|
+
readiness_check: list[AnyTask] | AnyTask | None = None,
|
|
99
|
+
readiness_check_delay: float = 0.5,
|
|
100
|
+
readiness_check_period: float = 5,
|
|
101
|
+
readiness_failure_threshold: int = 1,
|
|
102
|
+
readiness_timeout: int = 60,
|
|
103
|
+
monitor_readiness: bool = False,
|
|
104
|
+
upstream: list[AnyTask] | AnyTask | None = None,
|
|
105
|
+
fallback: list[AnyTask] | AnyTask | None = None,
|
|
106
|
+
successor: list[AnyTask] | AnyTask | None = None,
|
|
107
|
+
print_fn: PrintFn | None = None,
|
|
108
|
+
):
|
|
109
|
+
super().__init__(
|
|
110
|
+
name=name,
|
|
111
|
+
color=color,
|
|
112
|
+
icon=icon,
|
|
113
|
+
description=description,
|
|
114
|
+
cli_only=cli_only,
|
|
115
|
+
input=input,
|
|
116
|
+
env=env,
|
|
117
|
+
execute_condition=execute_condition,
|
|
118
|
+
retries=retries,
|
|
119
|
+
retry_period=retry_period,
|
|
120
|
+
readiness_check=readiness_check,
|
|
121
|
+
readiness_check_delay=readiness_check_delay,
|
|
122
|
+
readiness_check_period=readiness_check_period,
|
|
123
|
+
readiness_failure_threshold=readiness_failure_threshold,
|
|
124
|
+
readiness_timeout=readiness_timeout,
|
|
125
|
+
monitor_readiness=monitor_readiness,
|
|
126
|
+
upstream=upstream,
|
|
127
|
+
fallback=fallback,
|
|
128
|
+
successor=successor,
|
|
129
|
+
print_fn=print_fn,
|
|
130
|
+
)
|
|
131
|
+
self._llm_config = default_llm_config if llm_config is None else llm_config
|
|
132
|
+
self._llm_limitter = llm_limitter
|
|
133
|
+
self._system_prompt = system_prompt
|
|
134
|
+
self._render_system_prompt = render_system_prompt
|
|
135
|
+
self._prompt_manager = prompt_manager
|
|
136
|
+
self._tools = tools
|
|
137
|
+
self._toolsets = toolsets
|
|
138
|
+
self._message = message
|
|
139
|
+
self._render_message = render_message
|
|
140
|
+
self._attachment = attachment
|
|
141
|
+
self._history_processors = history_processors
|
|
142
|
+
self._model = model
|
|
143
|
+
self._render_model = render_model
|
|
144
|
+
self._model_settings = model_settings
|
|
145
|
+
self._conversation_name = conversation_name
|
|
146
|
+
self._render_conversation_name = render_conversation_name
|
|
147
|
+
self._history_manager = (
|
|
148
|
+
FileHistoryManager(history_dir=CFG.LLM_HISTORY_DIR)
|
|
149
|
+
if history_manager is None
|
|
150
|
+
else history_manager
|
|
151
|
+
)
|
|
152
|
+
self._tool_confirmation = tool_confirmation
|
|
153
|
+
self._yolo = yolo
|
|
154
|
+
self._ui_summarize_commands = ui_summarize_commands
|
|
155
|
+
self._ui_attach_commands = ui_attach_commands
|
|
156
|
+
self._ui_exit_commands = ui_exit_commands
|
|
157
|
+
self._ui_info_commands = ui_info_commands
|
|
158
|
+
self._ui_save_commands = ui_save_commands
|
|
159
|
+
self._ui_load_commands = ui_load_commands
|
|
160
|
+
self._ui_redirect_output_commands = ui_redirect_output_commands
|
|
161
|
+
self._ui_yolo_toggle_commands = ui_yolo_toggle_commands
|
|
162
|
+
self._ui_exec_commands = ui_exec_commands
|
|
163
|
+
self._ui_greeting = ui_greeting
|
|
164
|
+
self._render_ui_greeting = render_ui_greeting
|
|
165
|
+
self._ui_assistant_name = ui_assistant_name
|
|
166
|
+
self._render_ui_assistant_name = render_ui_assistant_name
|
|
167
|
+
self._ui_jargon = ui_jargon
|
|
168
|
+
self._render_ui_jargon = render_ui_jargon
|
|
169
|
+
self._ui_ascii_art_name = ui_ascii_art
|
|
170
|
+
self._render_ui_ascii_art_name = render_ui_ascii_art_name
|
|
171
|
+
self._triggers = triggers
|
|
172
|
+
self._confirmation_middlewares = confirmation_middlewares
|
|
173
|
+
self._markdown_theme = markdown_theme
|
|
174
|
+
self._interactive = interactive
|
|
175
|
+
|
|
176
|
+
@property
|
|
177
|
+
def prompt_manager(self) -> PromptManager:
|
|
178
|
+
if self._prompt_manager is None:
|
|
179
|
+
raise ValueError(f"Task {self.name} doesn't have prompt_manager")
|
|
180
|
+
return self._prompt_manager
|
|
181
|
+
|
|
182
|
+
def add_toolset(self, *toolset: "AbstractToolset"):
|
|
183
|
+
self.append_toolset(*toolset)
|
|
184
|
+
|
|
185
|
+
def append_toolset(self, *toolset: "AbstractToolset"):
|
|
186
|
+
self._toolsets += list(toolset)
|
|
187
|
+
|
|
188
|
+
def add_tool(self, *tool: "Tool | ToolFuncEither"):
|
|
189
|
+
self.append_tool(*tool)
|
|
190
|
+
|
|
191
|
+
def append_tool(self, *tool: "Tool | ToolFuncEither"):
|
|
192
|
+
self._tools += list(tool)
|
|
193
|
+
|
|
194
|
+
def add_history_processor(self, *processor: "HistoryProcessor"):
|
|
195
|
+
self.append_history_processor(*processor)
|
|
196
|
+
|
|
197
|
+
def append_history_processor(self, *processor: "HistoryProcessor"):
|
|
198
|
+
self._history_processors += list(processor)
|
|
199
|
+
|
|
200
|
+
def add_confirmation_middleware(self, *middleware: ConfirmationMiddleware):
|
|
201
|
+
self.prepend_confirmation_middleware(*middleware)
|
|
202
|
+
|
|
203
|
+
def prepend_confirmation_middleware(self, *middleware: ConfirmationMiddleware):
|
|
204
|
+
self._confirmation_middlewares = (
|
|
205
|
+
list(middleware) + self._confirmation_middlewares
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
def add_trigger(self, *trigger: Callable[[], Any]):
|
|
209
|
+
self.append_trigger(*trigger)
|
|
210
|
+
|
|
211
|
+
def append_trigger(self, *trigger: Callable[[], Any]):
|
|
212
|
+
self._triggers += trigger
|
|
213
|
+
|
|
214
|
+
async def _exec_action(self, ctx: AnyContext) -> Any:
|
|
215
|
+
from zrb.llm.app.lexer import CLIStyleLexer
|
|
216
|
+
from zrb.llm.app.ui import UI
|
|
217
|
+
|
|
218
|
+
initial_conversation_name = self._get_conversation_name(ctx)
|
|
219
|
+
initial_yolo = get_bool_attr(ctx, self._yolo, False)
|
|
220
|
+
initial_message = get_attr(ctx, self._message, "", self._render_message)
|
|
221
|
+
initial_attachments = get_attachments(ctx, self._attachment)
|
|
222
|
+
ui_greeting = get_str_attr(ctx, self._ui_greeting, "", self._render_ui_greeting)
|
|
223
|
+
ui_assistant_name = get_str_attr(
|
|
224
|
+
ctx, self._ui_assistant_name, "", self._render_ui_assistant_name
|
|
225
|
+
)
|
|
226
|
+
ui_jargon = get_str_attr(ctx, self._ui_jargon, "", self._render_ui_jargon)
|
|
227
|
+
ascii_art = get_str_attr(
|
|
228
|
+
ctx, self._ui_ascii_art_name, "", self._render_ui_ascii_art_name
|
|
229
|
+
)
|
|
230
|
+
interactive = get_bool_attr(ctx, self._interactive, True)
|
|
231
|
+
|
|
232
|
+
llm_task_core = LLMTask(
|
|
233
|
+
name=f"{self.name}-process",
|
|
234
|
+
input=[
|
|
235
|
+
StrInput("message", "Message"),
|
|
236
|
+
StrInput("session", "Conversation Session"),
|
|
237
|
+
BoolInput("yolo", "YOLO Mode"),
|
|
238
|
+
StrInput("attachments", "Attachments"),
|
|
239
|
+
],
|
|
240
|
+
env=self.envs,
|
|
241
|
+
system_prompt=self._system_prompt,
|
|
242
|
+
render_system_prompt=self._render_system_prompt,
|
|
243
|
+
prompt_manager=self._prompt_manager,
|
|
244
|
+
tools=self._tools,
|
|
245
|
+
toolsets=self._toolsets,
|
|
246
|
+
history_processors=self._history_processors,
|
|
247
|
+
llm_config=self._llm_config,
|
|
248
|
+
llm_limitter=self._llm_limitter,
|
|
249
|
+
model=self._model,
|
|
250
|
+
render_model=self._render_model,
|
|
251
|
+
model_settings=self._model_settings,
|
|
252
|
+
history_manager=self._history_manager,
|
|
253
|
+
message="{ctx.input.message}",
|
|
254
|
+
conversation_name="{ctx.input.session}",
|
|
255
|
+
yolo="{ctx.input.yolo}",
|
|
256
|
+
attachment=lambda ctx: ctx.input.attachments,
|
|
257
|
+
summarize_command=self._ui_summarize_commands,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
if not interactive:
|
|
261
|
+
session_input = {
|
|
262
|
+
"message": initial_message,
|
|
263
|
+
"session": initial_conversation_name,
|
|
264
|
+
"yolo": initial_yolo,
|
|
265
|
+
"attachments": initial_attachments,
|
|
266
|
+
}
|
|
267
|
+
shared_ctx = SharedContext(
|
|
268
|
+
input=session_input,
|
|
269
|
+
print_fn=ctx.shared_print, # Use current task's print function
|
|
270
|
+
)
|
|
271
|
+
session = Session(shared_ctx)
|
|
272
|
+
return await llm_task_core.async_run(session)
|
|
273
|
+
|
|
274
|
+
ui = UI(
|
|
275
|
+
greeting=ui_greeting,
|
|
276
|
+
assistant_name=ui_assistant_name,
|
|
277
|
+
ascii_art=ascii_art,
|
|
278
|
+
jargon=ui_jargon,
|
|
279
|
+
output_lexer=CLIStyleLexer(),
|
|
280
|
+
llm_task=llm_task_core,
|
|
281
|
+
history_manager=self._history_manager,
|
|
282
|
+
initial_message=initial_message,
|
|
283
|
+
initial_attachments=initial_attachments,
|
|
284
|
+
conversation_session_name=initial_conversation_name,
|
|
285
|
+
yolo=initial_yolo,
|
|
286
|
+
triggers=self._triggers,
|
|
287
|
+
confirmation_middlewares=self._confirmation_middlewares,
|
|
288
|
+
markdown_theme=self._markdown_theme,
|
|
289
|
+
summarize_commands=self._ui_summarize_commands,
|
|
290
|
+
attach_commands=self._ui_attach_commands,
|
|
291
|
+
exit_commands=self._ui_exit_commands,
|
|
292
|
+
info_commands=self._ui_info_commands,
|
|
293
|
+
save_commands=self._ui_save_commands,
|
|
294
|
+
load_commands=self._ui_load_commands,
|
|
295
|
+
yolo_toggle_commands=self._ui_yolo_toggle_commands,
|
|
296
|
+
redirect_output_commands=self._ui_redirect_output_commands,
|
|
297
|
+
exec_commands=self._ui_exec_commands,
|
|
298
|
+
model=self._get_model(ctx),
|
|
299
|
+
)
|
|
300
|
+
await ui.run_async()
|
|
301
|
+
return ui.last_output
|
|
302
|
+
|
|
303
|
+
def _get_conversation_name(self, ctx: AnyContext) -> str:
|
|
304
|
+
conversation_name = str(
|
|
305
|
+
get_attr(ctx, self._conversation_name, "", self._render_conversation_name)
|
|
306
|
+
)
|
|
307
|
+
if conversation_name.strip() == "":
|
|
308
|
+
conversation_name = get_random_name()
|
|
309
|
+
return conversation_name
|
|
310
|
+
|
|
311
|
+
def _get_model(self, ctx: AnyContext) -> "str | Model":
|
|
312
|
+
model = self._model
|
|
313
|
+
rendered_model = get_attr(ctx, model, None, auto_render=self._render_model)
|
|
314
|
+
if rendered_model is not None:
|
|
315
|
+
return rendered_model
|
|
316
|
+
return self._llm_config.model
|
zrb/llm/task/llm_task.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
from collections.abc import Callable
|
|
2
|
+
from typing import TYPE_CHECKING, Any
|
|
3
|
+
|
|
4
|
+
from zrb.attr.type import BoolAttr, StrAttr, fstring
|
|
5
|
+
from zrb.config.config import CFG
|
|
6
|
+
from zrb.context.any_context import AnyContext
|
|
7
|
+
from zrb.context.print_fn import PrintFn
|
|
8
|
+
from zrb.env.any_env import AnyEnv
|
|
9
|
+
from zrb.input.any_input import AnyInput
|
|
10
|
+
from zrb.llm.agent.agent import create_agent, run_agent
|
|
11
|
+
from zrb.llm.config.config import LLMConfig
|
|
12
|
+
from zrb.llm.config.config import llm_config as default_llm_config
|
|
13
|
+
from zrb.llm.config.limiter import LLMLimiter
|
|
14
|
+
from zrb.llm.config.limiter import llm_limiter as default_llm_limitter
|
|
15
|
+
from zrb.llm.history_manager.any_history_manager import AnyHistoryManager
|
|
16
|
+
from zrb.llm.history_manager.file_history_manager import FileHistoryManager
|
|
17
|
+
from zrb.llm.history_processor.summarizer import (
|
|
18
|
+
summarize_history,
|
|
19
|
+
)
|
|
20
|
+
from zrb.llm.prompt.compose import PromptManager
|
|
21
|
+
from zrb.llm.util.attachment import get_attachments
|
|
22
|
+
from zrb.llm.util.stream_response import (
|
|
23
|
+
create_event_handler,
|
|
24
|
+
create_faint_printer,
|
|
25
|
+
)
|
|
26
|
+
from zrb.task.any_task import AnyTask
|
|
27
|
+
from zrb.task.base_task import BaseTask
|
|
28
|
+
from zrb.util.attr import get_attr, get_bool_attr
|
|
29
|
+
from zrb.util.string.name import get_random_name
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from pydantic_ai import Tool, UserContent
|
|
33
|
+
from pydantic_ai._agent_graph import HistoryProcessor
|
|
34
|
+
from pydantic_ai.models import Model
|
|
35
|
+
from pydantic_ai.settings import ModelSettings
|
|
36
|
+
from pydantic_ai.tools import ToolFuncEither
|
|
37
|
+
from pydantic_ai.toolsets import AbstractToolset
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class LLMTask(BaseTask):
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
name: str,
|
|
45
|
+
color: int | None = None,
|
|
46
|
+
icon: str | None = None,
|
|
47
|
+
description: str | None = None,
|
|
48
|
+
cli_only: bool = False,
|
|
49
|
+
input: list[AnyInput | None] | AnyInput | None = None,
|
|
50
|
+
env: list[AnyEnv | None] | AnyEnv | None = None,
|
|
51
|
+
system_prompt: (
|
|
52
|
+
"Callable[[AnyContext], str | fstring | None] | str | None"
|
|
53
|
+
) = None,
|
|
54
|
+
render_system_prompt: bool = False,
|
|
55
|
+
prompt_manager: PromptManager | None = None,
|
|
56
|
+
tools: list["Tool | ToolFuncEither"] = [],
|
|
57
|
+
toolsets: list["AbstractToolset[None]"] = [],
|
|
58
|
+
message: StrAttr | None = None,
|
|
59
|
+
render_message: bool = True,
|
|
60
|
+
attachment: "UserContent | list[UserContent] | Callable[[AnyContext], UserContent | list[UserContent]] | None" = None, # noqa
|
|
61
|
+
history_processors: list["HistoryProcessor"] = [],
|
|
62
|
+
llm_config: LLMConfig | None = None,
|
|
63
|
+
llm_limitter: LLMLimiter | None = None,
|
|
64
|
+
model: (
|
|
65
|
+
"Callable[[AnyContext], Model | str | fstring | None] | Model | None"
|
|
66
|
+
) = None,
|
|
67
|
+
render_model: bool = True,
|
|
68
|
+
model_settings: (
|
|
69
|
+
"ModelSettings | Callable[[AnyContext], ModelSettings] | None"
|
|
70
|
+
) = None,
|
|
71
|
+
conversation_name: StrAttr | None = None,
|
|
72
|
+
render_conversation_name: bool = True,
|
|
73
|
+
history_manager: AnyHistoryManager | None = None,
|
|
74
|
+
tool_confirmation: Callable[[Any], Any] | None = None,
|
|
75
|
+
yolo: BoolAttr = False,
|
|
76
|
+
summarize_command: list[str] = [],
|
|
77
|
+
execute_condition: bool | str | Callable[[AnyContext], bool] = True,
|
|
78
|
+
retries: int = 2,
|
|
79
|
+
retry_period: float = 0,
|
|
80
|
+
readiness_check: list[AnyTask] | AnyTask | None = None,
|
|
81
|
+
readiness_check_delay: float = 0.5,
|
|
82
|
+
readiness_check_period: float = 5,
|
|
83
|
+
readiness_failure_threshold: int = 1,
|
|
84
|
+
readiness_timeout: int = 60,
|
|
85
|
+
monitor_readiness: bool = False,
|
|
86
|
+
upstream: list[AnyTask] | AnyTask | None = None,
|
|
87
|
+
fallback: list[AnyTask] | AnyTask | None = None,
|
|
88
|
+
successor: list[AnyTask] | AnyTask | None = None,
|
|
89
|
+
print_fn: PrintFn | None = None,
|
|
90
|
+
):
|
|
91
|
+
super().__init__(
|
|
92
|
+
name=name,
|
|
93
|
+
color=color,
|
|
94
|
+
icon=icon,
|
|
95
|
+
description=description,
|
|
96
|
+
cli_only=cli_only,
|
|
97
|
+
input=input,
|
|
98
|
+
env=env,
|
|
99
|
+
execute_condition=execute_condition,
|
|
100
|
+
retries=retries,
|
|
101
|
+
retry_period=retry_period,
|
|
102
|
+
readiness_check=readiness_check,
|
|
103
|
+
readiness_check_delay=readiness_check_delay,
|
|
104
|
+
readiness_check_period=readiness_check_period,
|
|
105
|
+
readiness_failure_threshold=readiness_failure_threshold,
|
|
106
|
+
readiness_timeout=readiness_timeout,
|
|
107
|
+
monitor_readiness=monitor_readiness,
|
|
108
|
+
upstream=upstream,
|
|
109
|
+
fallback=fallback,
|
|
110
|
+
successor=successor,
|
|
111
|
+
print_fn=print_fn,
|
|
112
|
+
)
|
|
113
|
+
self._llm_config = default_llm_config if llm_config is None else llm_config
|
|
114
|
+
self._llm_limitter = (
|
|
115
|
+
default_llm_limitter if llm_limitter is None else llm_limitter
|
|
116
|
+
)
|
|
117
|
+
self._system_prompt = system_prompt
|
|
118
|
+
self._render_system_prompt = render_system_prompt
|
|
119
|
+
self._prompt_manager = prompt_manager
|
|
120
|
+
self._tools = tools
|
|
121
|
+
self._toolsets = toolsets
|
|
122
|
+
self._message = message
|
|
123
|
+
self._render_message = render_message
|
|
124
|
+
self._attachment = attachment
|
|
125
|
+
self._history_processors = history_processors
|
|
126
|
+
self._model = model
|
|
127
|
+
self._render_model = render_model
|
|
128
|
+
self._model_settings = model_settings
|
|
129
|
+
self._conversation_name = conversation_name
|
|
130
|
+
self._render_conversation_name = render_conversation_name
|
|
131
|
+
self._history_manager = (
|
|
132
|
+
FileHistoryManager(history_dir=CFG.LLM_HISTORY_DIR)
|
|
133
|
+
if history_manager is None
|
|
134
|
+
else history_manager
|
|
135
|
+
)
|
|
136
|
+
self._tool_confirmation = tool_confirmation
|
|
137
|
+
self._yolo = yolo
|
|
138
|
+
self._summarize_command = summarize_command
|
|
139
|
+
|
|
140
|
+
@property
|
|
141
|
+
def prompt_manager(self) -> PromptManager:
|
|
142
|
+
if self._prompt_manager is None:
|
|
143
|
+
raise ValueError(f"Task {self.name} doesn't have prompt_manager")
|
|
144
|
+
return self._prompt_manager
|
|
145
|
+
|
|
146
|
+
def add_toolset(self, *toolset: "AbstractToolset"):
|
|
147
|
+
self.append_toolset(*toolset)
|
|
148
|
+
|
|
149
|
+
def append_toolset(self, *toolset: "AbstractToolset"):
|
|
150
|
+
self._toolsets += list(toolset)
|
|
151
|
+
|
|
152
|
+
def add_tool(self, *tool: "Tool | ToolFuncEither"):
|
|
153
|
+
self.append_tool(*tool)
|
|
154
|
+
|
|
155
|
+
def append_tool(self, *tool: "Tool | ToolFuncEither"):
|
|
156
|
+
self._tools += list(tool)
|
|
157
|
+
|
|
158
|
+
def add_history_processor(self, *processor: "HistoryProcessor"):
|
|
159
|
+
self.append_history_processor(*processor)
|
|
160
|
+
|
|
161
|
+
def append_history_processor(self, *processor: "HistoryProcessor"):
|
|
162
|
+
self._history_processors += list(processor)
|
|
163
|
+
|
|
164
|
+
async def _exec_action(self, ctx: AnyContext) -> Any:
|
|
165
|
+
conversation_name = self._get_conversation_name(ctx)
|
|
166
|
+
message_history = self._history_manager.load(conversation_name)
|
|
167
|
+
user_message = get_attr(ctx, self._message, "", self._render_message)
|
|
168
|
+
user_attachments = get_attachments(ctx, self._attachment)
|
|
169
|
+
|
|
170
|
+
if (
|
|
171
|
+
isinstance(user_message, str)
|
|
172
|
+
and user_message.strip() in self._summarize_command
|
|
173
|
+
):
|
|
174
|
+
ctx.print("Compressing conversation history...", plain=True)
|
|
175
|
+
new_history = await summarize_history(message_history)
|
|
176
|
+
self._history_manager.update(conversation_name, new_history)
|
|
177
|
+
self._history_manager.save(conversation_name)
|
|
178
|
+
return "Conversation history compressed."
|
|
179
|
+
|
|
180
|
+
yolo = get_bool_attr(ctx, self._yolo, False)
|
|
181
|
+
system_prompt = self._get_system_prompt(ctx)
|
|
182
|
+
ctx.log_debug(f"SYSTEM PROMPT: {system_prompt}")
|
|
183
|
+
agent = create_agent(
|
|
184
|
+
model=self._get_model(ctx),
|
|
185
|
+
system_prompt=self._get_system_prompt(ctx),
|
|
186
|
+
tools=self._tools,
|
|
187
|
+
toolsets=self._toolsets,
|
|
188
|
+
model_settings=self._get_model_settings(ctx),
|
|
189
|
+
history_processors=self._history_processors,
|
|
190
|
+
yolo=yolo,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
print_event = create_faint_printer(ctx)
|
|
194
|
+
handle_event = create_event_handler(
|
|
195
|
+
print_event,
|
|
196
|
+
show_tool_call_detail=CFG.LLM_SHOW_TOOL_CALL_PREPARATION,
|
|
197
|
+
show_tool_result=CFG.LLM_SHOW_TOOL_CALL_RESULT,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
output, new_history = await run_agent(
|
|
201
|
+
agent=agent,
|
|
202
|
+
message=user_message,
|
|
203
|
+
message_history=message_history,
|
|
204
|
+
limiter=self._llm_limitter,
|
|
205
|
+
attachments=user_attachments,
|
|
206
|
+
print_fn=ctx.print,
|
|
207
|
+
event_handler=handle_event,
|
|
208
|
+
tool_confirmation=self._tool_confirmation,
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
self._history_manager.update(conversation_name, new_history)
|
|
212
|
+
self._history_manager.save(conversation_name)
|
|
213
|
+
ctx.log_debug(f"All messages: {new_history}")
|
|
214
|
+
|
|
215
|
+
return output
|
|
216
|
+
|
|
217
|
+
def _get_system_prompt(self, ctx: AnyContext) -> str:
|
|
218
|
+
if self._prompt_manager is None:
|
|
219
|
+
return str(
|
|
220
|
+
get_attr(ctx, self._system_prompt, "", self._render_system_prompt)
|
|
221
|
+
)
|
|
222
|
+
compose_prompt = self._prompt_manager.compose_prompt()
|
|
223
|
+
return compose_prompt(ctx)
|
|
224
|
+
|
|
225
|
+
def _get_conversation_name(self, ctx: AnyContext) -> str:
|
|
226
|
+
conversation_name = str(
|
|
227
|
+
get_attr(ctx, self._conversation_name, "", self._render_conversation_name)
|
|
228
|
+
)
|
|
229
|
+
if conversation_name.strip() == "":
|
|
230
|
+
conversation_name = get_random_name()
|
|
231
|
+
return conversation_name
|
|
232
|
+
|
|
233
|
+
def _get_model_settings(self, ctx: AnyContext) -> "ModelSettings | None":
|
|
234
|
+
model_settings = self._model_settings
|
|
235
|
+
rendered_model_settings = get_attr(ctx, model_settings, None)
|
|
236
|
+
if rendered_model_settings is not None:
|
|
237
|
+
return rendered_model_settings
|
|
238
|
+
return self._llm_config.model_settings
|
|
239
|
+
|
|
240
|
+
def _get_model(self, ctx: AnyContext) -> "str | Model":
|
|
241
|
+
model = self._model
|
|
242
|
+
rendered_model = get_attr(ctx, model, None, auto_render=self._render_model)
|
|
243
|
+
if rendered_model is not None:
|
|
244
|
+
return rendered_model
|
|
245
|
+
return self._llm_config.model
|
zrb/llm/tool/__init__.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from zrb.llm.tool.bash import run_shell_command
|
|
2
|
+
from zrb.llm.tool.code import analyze_code
|
|
3
|
+
from zrb.llm.tool.file import (
|
|
4
|
+
analyze_file,
|
|
5
|
+
list_files,
|
|
6
|
+
read_file,
|
|
7
|
+
read_files,
|
|
8
|
+
replace_in_file,
|
|
9
|
+
search_files,
|
|
10
|
+
write_file,
|
|
11
|
+
write_files,
|
|
12
|
+
)
|
|
13
|
+
from zrb.llm.tool.note import create_note_tools
|
|
14
|
+
from zrb.llm.tool.rag import create_rag_from_directory
|
|
15
|
+
from zrb.llm.tool.skill import create_activate_skill_tool
|
|
16
|
+
from zrb.llm.tool.sub_agent import create_sub_agent_tool
|
|
17
|
+
from zrb.llm.tool.web import open_web_page, search_internet
|
|
18
|
+
from zrb.llm.tool.zrb_task import create_list_zrb_task_tool, create_run_zrb_task_tool
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"run_shell_command",
|
|
22
|
+
"analyze_code",
|
|
23
|
+
"list_files",
|
|
24
|
+
"read_file",
|
|
25
|
+
"read_files",
|
|
26
|
+
"write_file",
|
|
27
|
+
"write_files",
|
|
28
|
+
"replace_in_file",
|
|
29
|
+
"search_files",
|
|
30
|
+
"analyze_file",
|
|
31
|
+
"create_note_tools",
|
|
32
|
+
"create_rag_from_directory",
|
|
33
|
+
"create_activate_skill_tool",
|
|
34
|
+
"create_sub_agent_tool",
|
|
35
|
+
"open_web_page",
|
|
36
|
+
"search_internet",
|
|
37
|
+
"create_list_zrb_task_tool",
|
|
38
|
+
"create_run_zrb_task_tool",
|
|
39
|
+
]
|
zrb/llm/tool/bash.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
from zrb.util.cli.style import stylize_faint
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
async def run_shell_command(command: str, timeout: int = 30) -> str:
|
|
8
|
+
"""
|
|
9
|
+
Executes a shell command on the host system and returns its combined stdout and stderr.
|
|
10
|
+
This is a powerful tool for running builds, tests, or system utilities.
|
|
11
|
+
|
|
12
|
+
**CRITICAL SAFETY:**
|
|
13
|
+
- DO NOT run destructive commands (e.g., `rm -rf /`) without absolute certainty.
|
|
14
|
+
- Prefer specialized tools (like `read_file` or `write_file`) for file operations.
|
|
15
|
+
|
|
16
|
+
**USAGE GUIDELINES:**
|
|
17
|
+
- Use non-interactive commands.
|
|
18
|
+
- If a command is expected to produce massive output, use `timeout` or pipe to a file.
|
|
19
|
+
- The output is streamed to the console in real-time.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
command (str): The full shell command to execute.
|
|
23
|
+
timeout (int): Maximum wait time in seconds before terminating the process. Defaults to 30.
|
|
24
|
+
"""
|
|
25
|
+
ANSI_ESCAPE = re.compile(
|
|
26
|
+
r"(?:\x1B\[[0-?]*[ -/]*[@-~])|" # CSI (Control Sequence Introducer)
|
|
27
|
+
r"(?:\x1B\][^\a\x1b]*[\a\x1b])|" # OSC (Operating System Command)
|
|
28
|
+
r"(?:\x1B[0-9=>])" # Simple 2-byte (DECSC, DECRC, etc.)
|
|
29
|
+
)
|
|
30
|
+
try:
|
|
31
|
+
process = await asyncio.create_subprocess_shell(
|
|
32
|
+
command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
stdout_lines = []
|
|
36
|
+
stderr_lines = []
|
|
37
|
+
|
|
38
|
+
async def read_stream(stream, lines_list, prefix=""):
|
|
39
|
+
while True:
|
|
40
|
+
line = await stream.readline()
|
|
41
|
+
if not line:
|
|
42
|
+
break
|
|
43
|
+
decoded = line.decode()
|
|
44
|
+
if decoded:
|
|
45
|
+
shown = ANSI_ESCAPE.sub("", decoded)
|
|
46
|
+
shown = stylize_faint(shown)
|
|
47
|
+
print(f"{prefix} {shown}", end="") # Stream to console
|
|
48
|
+
lines_list.append(decoded)
|
|
49
|
+
|
|
50
|
+
# Wait for the process to complete or timeout
|
|
51
|
+
try:
|
|
52
|
+
await asyncio.wait_for(
|
|
53
|
+
asyncio.gather(
|
|
54
|
+
read_stream(process.stdout, stdout_lines, ""),
|
|
55
|
+
read_stream(process.stderr, stderr_lines, "[stderr] "),
|
|
56
|
+
process.wait(),
|
|
57
|
+
),
|
|
58
|
+
timeout=timeout,
|
|
59
|
+
)
|
|
60
|
+
except asyncio.TimeoutError:
|
|
61
|
+
if process.returncode is None:
|
|
62
|
+
try:
|
|
63
|
+
process.terminate()
|
|
64
|
+
await process.wait()
|
|
65
|
+
except ProcessLookupError:
|
|
66
|
+
pass
|
|
67
|
+
return f"Error: Command timed out after {timeout} seconds."
|
|
68
|
+
|
|
69
|
+
output = "\n".join(stdout_lines)
|
|
70
|
+
error = "\n".join(stderr_lines)
|
|
71
|
+
|
|
72
|
+
return f"Exit Code: {process.returncode}\nStdout:\n{output}\nStderr:\n{error}"
|
|
73
|
+
|
|
74
|
+
except Exception as e:
|
|
75
|
+
return f"Error executing command: {e}"
|