zrb 1.21.37__py3-none-any.whl → 1.21.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of zrb might be problematic. Click here for more details.
- zrb/builtin/llm/chat_completion.py +46 -0
- zrb/builtin/llm/chat_session.py +89 -29
- zrb/builtin/llm/chat_session_cmd.py +87 -11
- zrb/builtin/llm/chat_trigger.py +92 -5
- zrb/builtin/llm/history.py +14 -7
- zrb/builtin/llm/llm_ask.py +16 -7
- zrb/builtin/llm/tool/file.py +3 -2
- zrb/builtin/llm/tool/search/brave.py +2 -2
- zrb/builtin/llm/tool/search/searxng.py +2 -2
- zrb/builtin/llm/tool/search/serpapi.py +2 -2
- zrb/builtin/llm/xcom_names.py +3 -0
- zrb/callback/callback.py +8 -1
- zrb/config/config.py +1 -1
- zrb/context/context.py +11 -0
- zrb/task/base/context.py +25 -13
- zrb/task/base/execution.py +52 -47
- zrb/task/base/lifecycle.py +1 -1
- zrb/task/base_task.py +31 -45
- zrb/task/base_trigger.py +0 -1
- zrb/task/llm/agent.py +39 -31
- zrb/task/llm/agent_runner.py +59 -1
- zrb/task/llm/default_workflow/researching/workflow.md +2 -0
- zrb/task/llm/print_node.py +15 -2
- zrb/task/llm/prompt.py +70 -40
- zrb/task/llm/workflow.py +13 -1
- zrb/task/llm_task.py +83 -28
- zrb/util/run.py +3 -3
- {zrb-1.21.37.dist-info → zrb-1.21.43.dist-info}/METADATA +1 -1
- {zrb-1.21.37.dist-info → zrb-1.21.43.dist-info}/RECORD +31 -30
- {zrb-1.21.37.dist-info → zrb-1.21.43.dist-info}/WHEEL +0 -0
- {zrb-1.21.37.dist-info → zrb-1.21.43.dist-info}/entry_points.txt +0 -0
|
@@ -11,16 +11,24 @@ from zrb.builtin.llm.chat_session_cmd import (
|
|
|
11
11
|
CLEAR_SUB_CMD,
|
|
12
12
|
HELP_CMD,
|
|
13
13
|
HELP_CMD_DESC,
|
|
14
|
+
LOAD_CMD,
|
|
15
|
+
LOAD_CMD_DESC,
|
|
14
16
|
MULTILINE_END_CMD,
|
|
15
17
|
MULTILINE_END_CMD_DESC,
|
|
16
18
|
MULTILINE_START_CMD,
|
|
17
19
|
MULTILINE_START_CMD_DESC,
|
|
18
20
|
QUIT_CMD,
|
|
19
21
|
QUIT_CMD_DESC,
|
|
22
|
+
RESPONSE_CMD,
|
|
23
|
+
RESPONSE_CMD_DESC,
|
|
24
|
+
RESPONSE_SAVE_SUB_CMD_DESC,
|
|
20
25
|
RUN_CLI_CMD,
|
|
21
26
|
RUN_CLI_CMD_DESC,
|
|
22
27
|
SAVE_CMD,
|
|
23
28
|
SAVE_CMD_DESC,
|
|
29
|
+
SAVE_SUB_CMD,
|
|
30
|
+
SESSION_CMD,
|
|
31
|
+
SESSION_CMD_DESC,
|
|
24
32
|
SET_SUB_CMD,
|
|
25
33
|
WORKFLOW_ADD_SUB_CMD_DESC,
|
|
26
34
|
WORKFLOW_CLEAR_SUB_CMD_DESC,
|
|
@@ -33,6 +41,7 @@ from zrb.builtin.llm.chat_session_cmd import (
|
|
|
33
41
|
YOLO_SET_FALSE_CMD_DESC,
|
|
34
42
|
YOLO_SET_TRUE_CMD_DESC,
|
|
35
43
|
)
|
|
44
|
+
from zrb.config.config import CFG
|
|
36
45
|
from zrb.util.match import fuzzy_match
|
|
37
46
|
|
|
38
47
|
if TYPE_CHECKING:
|
|
@@ -52,10 +61,39 @@ def get_chat_completer() -> "Completer":
|
|
|
52
61
|
yield completion
|
|
53
62
|
for completion in self._complete_slash_file_command(document):
|
|
54
63
|
yield completion
|
|
64
|
+
for completion in self._complete_slash_session_load_command(document):
|
|
65
|
+
yield completion
|
|
55
66
|
# Appendix
|
|
56
67
|
for completion in self._complete_appendix(document):
|
|
57
68
|
yield completion
|
|
58
69
|
|
|
70
|
+
def _complete_slash_session_load_command(self, document: Document):
|
|
71
|
+
text = document.text_before_cursor
|
|
72
|
+
prefixes = []
|
|
73
|
+
for cmd in LOAD_CMD:
|
|
74
|
+
prefixes.append(f"{cmd} ")
|
|
75
|
+
|
|
76
|
+
for prefix in prefixes:
|
|
77
|
+
if text.startswith(prefix):
|
|
78
|
+
pattern = text[len(prefix) :]
|
|
79
|
+
# Use fuzzy_path_search but in LLM_HISTORY_DIR/save-point
|
|
80
|
+
save_point_dir = os.path.join(CFG.LLM_HISTORY_DIR, "save-point")
|
|
81
|
+
if not os.path.isdir(save_point_dir):
|
|
82
|
+
return
|
|
83
|
+
|
|
84
|
+
potential_options = self._fuzzy_path_search(
|
|
85
|
+
pattern, root=save_point_dir, dirs=False, files=True
|
|
86
|
+
)
|
|
87
|
+
for option in potential_options:
|
|
88
|
+
if option.startswith(save_point_dir):
|
|
89
|
+
rel_option = os.path.relpath(option, save_point_dir)
|
|
90
|
+
else:
|
|
91
|
+
rel_option = option
|
|
92
|
+
yield Completion(
|
|
93
|
+
f"{prefix}{rel_option}",
|
|
94
|
+
start_position=-len(text),
|
|
95
|
+
)
|
|
96
|
+
|
|
59
97
|
def _complete_slash_file_command(self, document: Document):
|
|
60
98
|
text = document.text_before_cursor
|
|
61
99
|
prefixes = []
|
|
@@ -118,8 +156,16 @@ def get_chat_completer() -> "Completer":
|
|
|
118
156
|
cmd_options[f"{cmd} {subcmd}"] = WORKFLOW_CLEAR_SUB_CMD_DESC
|
|
119
157
|
for subcmd in SET_SUB_CMD:
|
|
120
158
|
cmd_options[f"{cmd} {subcmd}"] = WORKFLOW_SET_SUB_CMD_DESC
|
|
159
|
+
for cmd in SESSION_CMD:
|
|
160
|
+
cmd_options[cmd] = SESSION_CMD_DESC
|
|
121
161
|
for cmd in SAVE_CMD:
|
|
122
162
|
cmd_options[cmd] = SAVE_CMD_DESC
|
|
163
|
+
for cmd in LOAD_CMD:
|
|
164
|
+
cmd_options[cmd] = LOAD_CMD_DESC
|
|
165
|
+
for cmd in RESPONSE_CMD:
|
|
166
|
+
cmd_options[cmd] = RESPONSE_CMD_DESC
|
|
167
|
+
for subcmd in SAVE_SUB_CMD:
|
|
168
|
+
cmd_options[f"{cmd} {subcmd}"] = RESPONSE_SAVE_SUB_CMD_DESC
|
|
123
169
|
for cmd in ATTACHMENT_CMD:
|
|
124
170
|
cmd_options[cmd] = ATTACHMENT_CMD_DESC
|
|
125
171
|
for subcmd in ADD_SUB_CMD:
|
zrb/builtin/llm/chat_session.py
CHANGED
|
@@ -5,27 +5,39 @@ from typing import TYPE_CHECKING, Any
|
|
|
5
5
|
from zrb.builtin.llm.chat_session_cmd import (
|
|
6
6
|
ATTACHMENT_CMD,
|
|
7
7
|
HELP_CMD,
|
|
8
|
+
LOAD_CMD,
|
|
8
9
|
MULTILINE_END_CMD,
|
|
9
10
|
MULTILINE_START_CMD,
|
|
10
11
|
QUIT_CMD,
|
|
12
|
+
RESPONSE_CMD,
|
|
11
13
|
RUN_CLI_CMD,
|
|
12
14
|
SAVE_CMD,
|
|
15
|
+
SESSION_CMD,
|
|
13
16
|
WORKFLOW_CMD,
|
|
14
17
|
YOLO_CMD,
|
|
15
18
|
get_new_attachments,
|
|
16
19
|
get_new_workflows,
|
|
17
20
|
get_new_yolo_mode,
|
|
21
|
+
handle_response_cmd,
|
|
22
|
+
handle_session,
|
|
18
23
|
is_command_match,
|
|
19
24
|
print_commands,
|
|
20
25
|
print_current_attachments,
|
|
21
26
|
print_current_workflows,
|
|
22
27
|
print_current_yolo_mode,
|
|
23
28
|
run_cli_command,
|
|
24
|
-
save_final_result,
|
|
25
29
|
)
|
|
26
30
|
from zrb.builtin.llm.chat_trigger import llm_chat_trigger
|
|
31
|
+
from zrb.builtin.llm.history import get_last_session_name
|
|
32
|
+
from zrb.builtin.llm.xcom_names import (
|
|
33
|
+
LLM_ASK_ERROR_XCOM_NAME,
|
|
34
|
+
LLM_ASK_RESULT_XCOM_NAME,
|
|
35
|
+
LLM_ASK_SESSION_XCOM_NAME,
|
|
36
|
+
)
|
|
27
37
|
from zrb.config.llm_config import llm_config
|
|
28
38
|
from zrb.context.any_context import AnyContext
|
|
39
|
+
from zrb.context.any_shared_context import AnySharedContext
|
|
40
|
+
from zrb.task.llm.workflow import get_llm_loaded_workflow_xcom
|
|
29
41
|
from zrb.util.cli.markdown import render_markdown
|
|
30
42
|
|
|
31
43
|
if TYPE_CHECKING:
|
|
@@ -44,30 +56,39 @@ async def read_user_prompt(ctx: AnyContext) -> str:
|
|
|
44
56
|
reader: PromptSession[Any] | StreamReader = await _setup_input_reader(is_tty)
|
|
45
57
|
multiline_mode = False
|
|
46
58
|
is_first_time = True
|
|
47
|
-
current_workflows: str = ctx.input.
|
|
59
|
+
current_workflows: str = ctx.input.workflow
|
|
48
60
|
current_yolo_mode: bool | str = ctx.input.yolo
|
|
49
61
|
current_attachments: str = ctx.input.attach
|
|
50
62
|
user_inputs: list[str] = []
|
|
51
63
|
final_result: str = ""
|
|
52
64
|
should_end = False
|
|
65
|
+
start_new: bool = ctx.input.start_new
|
|
66
|
+
if not start_new and ctx.input.previous_session == "":
|
|
67
|
+
session = ctx.session
|
|
68
|
+
if session is not None:
|
|
69
|
+
# Automatically inject last session name as previous session
|
|
70
|
+
last_session_name = get_last_session_name()
|
|
71
|
+
session.shared_ctx.input["previous_session"] = last_session_name
|
|
72
|
+
session.shared_ctx.input["previous-session"] = last_session_name
|
|
73
|
+
current_session_name: str | None = ctx.input.previous_session
|
|
53
74
|
while not should_end:
|
|
54
|
-
await asyncio.sleep(0
|
|
55
|
-
previous_session_name: str | None = (
|
|
56
|
-
ctx.input.previous_session if is_first_time else ""
|
|
57
|
-
)
|
|
58
|
-
start_new: bool = ctx.input.start_new if is_first_time else False
|
|
75
|
+
await asyncio.sleep(0)
|
|
59
76
|
if is_first_time and ctx.input.message.strip() != "":
|
|
60
77
|
user_input = ctx.input.message
|
|
61
78
|
else:
|
|
62
79
|
# Get user input based on mode
|
|
63
80
|
if not multiline_mode:
|
|
64
81
|
ctx.print("💬 >>", plain=True)
|
|
65
|
-
user_input = await llm_chat_trigger.wait(
|
|
82
|
+
user_input = await llm_chat_trigger.wait(
|
|
83
|
+
ctx, reader, current_session_name, is_first_time
|
|
84
|
+
)
|
|
66
85
|
if not multiline_mode:
|
|
67
86
|
ctx.print("", plain=True)
|
|
68
87
|
# At this point, is_first_time has to be False
|
|
69
88
|
if is_first_time:
|
|
70
89
|
is_first_time = False
|
|
90
|
+
# Add additional workflows activated by LLM in the previous session
|
|
91
|
+
current_workflows = _get_new_workflows_from_xcom(ctx, current_workflows)
|
|
71
92
|
# Handle user input (including slash commands)
|
|
72
93
|
if multiline_mode:
|
|
73
94
|
if is_command_match(user_input, MULTILINE_END_CMD):
|
|
@@ -87,8 +108,8 @@ async def read_user_prompt(ctx: AnyContext) -> str:
|
|
|
87
108
|
current_workflows = get_new_workflows(current_workflows, user_input)
|
|
88
109
|
print_current_workflows(ctx, current_workflows)
|
|
89
110
|
continue
|
|
90
|
-
elif is_command_match(user_input,
|
|
91
|
-
|
|
111
|
+
elif is_command_match(user_input, RESPONSE_CMD):
|
|
112
|
+
handle_response_cmd(ctx, user_input, final_result)
|
|
92
113
|
continue
|
|
93
114
|
elif is_command_match(user_input, ATTACHMENT_CMD):
|
|
94
115
|
current_attachments = get_new_attachments(
|
|
@@ -106,20 +127,30 @@ async def read_user_prompt(ctx: AnyContext) -> str:
|
|
|
106
127
|
elif is_command_match(user_input, HELP_CMD):
|
|
107
128
|
print_commands(ctx)
|
|
108
129
|
continue
|
|
130
|
+
elif (
|
|
131
|
+
is_command_match(user_input, SESSION_CMD)
|
|
132
|
+
or is_command_match(user_input, SAVE_CMD)
|
|
133
|
+
or is_command_match(user_input, LOAD_CMD)
|
|
134
|
+
):
|
|
135
|
+
current_session_name, start_new = handle_session(
|
|
136
|
+
ctx, current_session_name, start_new, user_input
|
|
137
|
+
)
|
|
109
138
|
else:
|
|
110
139
|
user_inputs.append(user_input)
|
|
111
140
|
# Trigger LLM
|
|
112
141
|
user_prompt = "\n".join(user_inputs)
|
|
113
142
|
user_inputs = []
|
|
114
|
-
result = await _trigger_ask_and_wait_for_result(
|
|
143
|
+
result, current_session_name = await _trigger_ask_and_wait_for_result(
|
|
115
144
|
ctx=ctx,
|
|
116
145
|
user_prompt=user_prompt,
|
|
117
146
|
attach=current_attachments,
|
|
118
147
|
workflows=current_workflows,
|
|
119
148
|
yolo_mode=current_yolo_mode,
|
|
120
|
-
previous_session_name=
|
|
149
|
+
previous_session_name=current_session_name,
|
|
121
150
|
start_new=start_new,
|
|
122
151
|
)
|
|
152
|
+
# After the first trigger, we no longer need to force start_new
|
|
153
|
+
start_new = False
|
|
123
154
|
current_attachments = ""
|
|
124
155
|
final_result = final_result if result is None else result
|
|
125
156
|
if ctx.is_web_mode or not is_tty:
|
|
@@ -127,6 +158,23 @@ async def read_user_prompt(ctx: AnyContext) -> str:
|
|
|
127
158
|
return final_result
|
|
128
159
|
|
|
129
160
|
|
|
161
|
+
def _get_new_workflows_from_xcom(ctx: AnyContext, current_workflows: str):
|
|
162
|
+
llm_loaded_workflow_xcom = get_llm_loaded_workflow_xcom(ctx)
|
|
163
|
+
new_workflow_names = [
|
|
164
|
+
workflow_name.strip()
|
|
165
|
+
for workflow_name in current_workflows.split(",")
|
|
166
|
+
if workflow_name.strip() != ""
|
|
167
|
+
]
|
|
168
|
+
while len(llm_loaded_workflow_xcom) > 0:
|
|
169
|
+
additional_workflow_names = [
|
|
170
|
+
workflow_name
|
|
171
|
+
for workflow_name in llm_loaded_workflow_xcom.pop()
|
|
172
|
+
if workflow_name not in new_workflow_names
|
|
173
|
+
]
|
|
174
|
+
new_workflow_names += additional_workflow_names
|
|
175
|
+
return ",".join(new_workflow_names)
|
|
176
|
+
|
|
177
|
+
|
|
130
178
|
async def _setup_input_reader(
|
|
131
179
|
is_interactive: bool,
|
|
132
180
|
) -> "PromptSession[Any] | StreamReader":
|
|
@@ -151,7 +199,7 @@ async def _trigger_ask_and_wait_for_result(
|
|
|
151
199
|
yolo_mode: bool | str,
|
|
152
200
|
previous_session_name: str | None = None,
|
|
153
201
|
start_new: bool = False,
|
|
154
|
-
) -> str | None:
|
|
202
|
+
) -> tuple[str | None, str | None]:
|
|
155
203
|
"""
|
|
156
204
|
Triggers the LLM ask task and waits for the result via XCom.
|
|
157
205
|
|
|
@@ -162,22 +210,27 @@ async def _trigger_ask_and_wait_for_result(
|
|
|
162
210
|
start_new: Whether to start a new conversation (optional).
|
|
163
211
|
|
|
164
212
|
Returns:
|
|
165
|
-
The result from the LLM task
|
|
213
|
+
The result from the LLM task and the session name.
|
|
166
214
|
"""
|
|
167
215
|
if user_prompt.strip() == "":
|
|
168
|
-
return None
|
|
216
|
+
return None, previous_session_name
|
|
169
217
|
await _trigger_ask(
|
|
170
218
|
ctx, user_prompt, attach, workflows, yolo_mode, previous_session_name, start_new
|
|
171
219
|
)
|
|
172
220
|
result = await _wait_ask_result(ctx)
|
|
221
|
+
|
|
222
|
+
resolved_session_name = previous_session_name
|
|
223
|
+
if result is not None:
|
|
224
|
+
resolved_session_name = await _wait_ask_session_name(ctx)
|
|
225
|
+
|
|
173
226
|
md_result = render_markdown(result) if result is not None else ""
|
|
174
227
|
ctx.print("\n🤖 >>", plain=True)
|
|
175
228
|
ctx.print(md_result, plain=True)
|
|
176
229
|
ctx.print("", plain=True)
|
|
177
|
-
return result
|
|
230
|
+
return result, resolved_session_name
|
|
178
231
|
|
|
179
232
|
|
|
180
|
-
def get_llm_ask_input_mapping(callback_ctx: AnyContext):
|
|
233
|
+
def get_llm_ask_input_mapping(callback_ctx: AnyContext | AnySharedContext):
|
|
181
234
|
"""
|
|
182
235
|
Generates the input mapping for the LLM ask task from the callback context.
|
|
183
236
|
|
|
@@ -200,7 +253,7 @@ def get_llm_ask_input_mapping(callback_ctx: AnyContext):
|
|
|
200
253
|
"previous-session": data.get("previous_session_name"),
|
|
201
254
|
"message": data.get("message"),
|
|
202
255
|
"attach": data.get("attach"),
|
|
203
|
-
"
|
|
256
|
+
"workflow": data.get("workflow"),
|
|
204
257
|
"yolo": data.get("yolo"),
|
|
205
258
|
}
|
|
206
259
|
|
|
@@ -223,15 +276,13 @@ async def _trigger_ask(
|
|
|
223
276
|
previous_session_name: The name of the previous chat session (optional).
|
|
224
277
|
start_new: Whether to start a new conversation (optional).
|
|
225
278
|
"""
|
|
226
|
-
if previous_session_name is None:
|
|
227
|
-
previous_session_name = await _wait_ask_session_name(ctx)
|
|
228
279
|
ctx.xcom["ask_trigger"].push(
|
|
229
280
|
{
|
|
230
281
|
"previous_session_name": previous_session_name,
|
|
231
282
|
"start_new": start_new,
|
|
232
283
|
"message": user_prompt,
|
|
233
284
|
"attach": attach,
|
|
234
|
-
"
|
|
285
|
+
"workflow": workflows,
|
|
235
286
|
"yolo": yolo_mode,
|
|
236
287
|
}
|
|
237
288
|
)
|
|
@@ -247,12 +298,18 @@ async def _wait_ask_result(ctx: AnyContext) -> str | None:
|
|
|
247
298
|
Returns:
|
|
248
299
|
The result string from the LLM task.
|
|
249
300
|
"""
|
|
250
|
-
while
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
301
|
+
while (
|
|
302
|
+
LLM_ASK_RESULT_XCOM_NAME not in ctx.xcom
|
|
303
|
+
or len(ctx.xcom[LLM_ASK_RESULT_XCOM_NAME]) == 0
|
|
304
|
+
):
|
|
305
|
+
await asyncio.sleep(0)
|
|
306
|
+
if (
|
|
307
|
+
LLM_ASK_ERROR_XCOM_NAME in ctx.xcom
|
|
308
|
+
and len(ctx.xcom[LLM_ASK_ERROR_XCOM_NAME]) > 0
|
|
309
|
+
):
|
|
310
|
+
ctx.xcom[LLM_ASK_ERROR_XCOM_NAME].pop()
|
|
254
311
|
return None
|
|
255
|
-
return ctx.xcom.
|
|
312
|
+
return ctx.xcom[LLM_ASK_RESULT_XCOM_NAME].pop()
|
|
256
313
|
|
|
257
314
|
|
|
258
315
|
async def _wait_ask_session_name(ctx: AnyContext) -> str:
|
|
@@ -265,6 +322,9 @@ async def _wait_ask_session_name(ctx: AnyContext) -> str:
|
|
|
265
322
|
Returns:
|
|
266
323
|
The session name string.
|
|
267
324
|
"""
|
|
268
|
-
while
|
|
269
|
-
|
|
270
|
-
|
|
325
|
+
while (
|
|
326
|
+
LLM_ASK_SESSION_XCOM_NAME not in ctx.xcom
|
|
327
|
+
or len(ctx.xcom[LLM_ASK_SESSION_XCOM_NAME]) == 0
|
|
328
|
+
):
|
|
329
|
+
await asyncio.sleep(0)
|
|
330
|
+
return ctx.xcom[LLM_ASK_SESSION_XCOM_NAME].pop()
|
|
@@ -12,7 +12,7 @@ from zrb.util.cli.style import (
|
|
|
12
12
|
stylize_faint,
|
|
13
13
|
)
|
|
14
14
|
from zrb.util.cmd.command import run_command
|
|
15
|
-
from zrb.util.file import write_file
|
|
15
|
+
from zrb.util.file import read_file, write_file
|
|
16
16
|
from zrb.util.markdown import make_markdown_section
|
|
17
17
|
from zrb.util.string.conversion import FALSE_STRS, TRUE_STRS, to_boolean
|
|
18
18
|
|
|
@@ -20,14 +20,20 @@ MULTILINE_START_CMD = ["/multi", "/multiline"]
|
|
|
20
20
|
MULTILINE_END_CMD = ["/end"]
|
|
21
21
|
QUIT_CMD = ["/bye", "/quit", "/q", "/exit"]
|
|
22
22
|
WORKFLOW_CMD = ["/workflow", "/workflows", "/skill", "/skills", "/w"]
|
|
23
|
+
RESPONSE_CMD = ["/response", "/res"]
|
|
23
24
|
SAVE_CMD = ["/save", "/s"]
|
|
25
|
+
LOAD_CMD = ["/load", "/l"]
|
|
24
26
|
ATTACHMENT_CMD = ["/attachment", "/attachments", "/attach"]
|
|
25
27
|
YOLO_CMD = ["/yolo"]
|
|
26
28
|
HELP_CMD = ["/help", "/info"]
|
|
29
|
+
RUN_CLI_CMD = ["/run", "/exec", "/execute", "/cmd", "/cli", "!"]
|
|
30
|
+
SESSION_CMD = ["/session", "/conversation", "/convo"]
|
|
31
|
+
|
|
27
32
|
ADD_SUB_CMD = ["add"]
|
|
28
33
|
SET_SUB_CMD = ["set"]
|
|
29
34
|
CLEAR_SUB_CMD = ["clear"]
|
|
30
|
-
|
|
35
|
+
SAVE_SUB_CMD = ["save"]
|
|
36
|
+
LOAD_SUB_CMD = ["load"]
|
|
31
37
|
|
|
32
38
|
# Command display constants
|
|
33
39
|
MULTILINE_START_CMD_DESC = "Start multiline input"
|
|
@@ -35,14 +41,20 @@ MULTILINE_END_CMD_DESC = "End multiline input"
|
|
|
35
41
|
QUIT_CMD_DESC = "Quit from chat session"
|
|
36
42
|
WORKFLOW_CMD_DESC = "Show active workflows"
|
|
37
43
|
WORKFLOW_ADD_SUB_CMD_DESC = (
|
|
38
|
-
"Add active workflow "
|
|
39
|
-
f"(e.g., `{WORKFLOW_CMD[0]} {ADD_SUB_CMD[0]} coding,researching`)"
|
|
44
|
+
"Add active workflow " f"(e.g., `{WORKFLOW_CMD[0]} {ADD_SUB_CMD[0]} coding`)"
|
|
40
45
|
)
|
|
41
46
|
WORKFLOW_SET_SUB_CMD_DESC = (
|
|
42
|
-
"Set active workflows "
|
|
47
|
+
"Set active workflows "
|
|
48
|
+
f"(e.g., `{WORKFLOW_CMD[0]} {SET_SUB_CMD[0]} coding,researching`)"
|
|
43
49
|
)
|
|
44
50
|
WORKFLOW_CLEAR_SUB_CMD_DESC = "Deactivate all workflows"
|
|
45
|
-
|
|
51
|
+
RESPONSE_CMD_DESC = "Manage response"
|
|
52
|
+
RESPONSE_SAVE_SUB_CMD_DESC = (
|
|
53
|
+
"Save last response to a file "
|
|
54
|
+
f"(e.g., `{RESPONSE_CMD[0]} {SAVE_SUB_CMD[0]} conclusion.md`)"
|
|
55
|
+
)
|
|
56
|
+
SAVE_CMD_DESC = "Save current session " f"(e.g., `{SAVE_CMD[0]} save-point`)"
|
|
57
|
+
LOAD_CMD_DESC = "Load session " f"(e.g., `{LOAD_CMD[0]} save-point`)"
|
|
46
58
|
ATTACHMENT_CMD_DESC = "Show current attachment"
|
|
47
59
|
ATTACHMENT_ADD_SUB_CMD_DESC = (
|
|
48
60
|
"Attach a file " f"(e.g., `{ATTACHMENT_CMD[0]} {ADD_SUB_CMD[0]} ./logo.png`)"
|
|
@@ -61,6 +73,7 @@ YOLO_SET_TRUE_CMD_DESC = "Activate YOLO mode for all tools"
|
|
|
61
73
|
YOLO_SET_FALSE_CMD_DESC = "Deactivate YOLO mode for all tools"
|
|
62
74
|
RUN_CLI_CMD_DESC = "Run a non-interactive CLI command"
|
|
63
75
|
HELP_CMD_DESC = "Show info/help"
|
|
76
|
+
SESSION_CMD_DESC = "Show current session"
|
|
64
77
|
|
|
65
78
|
|
|
66
79
|
def print_current_yolo_mode(
|
|
@@ -75,7 +88,9 @@ def print_current_yolo_mode(
|
|
|
75
88
|
|
|
76
89
|
def print_current_attachments(ctx: AnyContext, current_attachments_value: str) -> None:
|
|
77
90
|
attachments_str = (
|
|
78
|
-
|
|
91
|
+
", ".join(current_attachments_value.split(","))
|
|
92
|
+
if current_attachments_value != ""
|
|
93
|
+
else "*Not Set*"
|
|
79
94
|
)
|
|
80
95
|
ctx.print(render_markdown(f"📎 Current attachments: {attachments_str}"), plain=True)
|
|
81
96
|
ctx.print("", plain=True)
|
|
@@ -89,7 +104,7 @@ def print_current_workflows(ctx: AnyContext, current_workflows_value: str) -> No
|
|
|
89
104
|
else "*No Available Workflow*"
|
|
90
105
|
)
|
|
91
106
|
current_workflows_str = (
|
|
92
|
-
current_workflows_value
|
|
107
|
+
", ".join(current_workflows_value.split(","))
|
|
93
108
|
if current_workflows_value != ""
|
|
94
109
|
else "*No Active Workflow*"
|
|
95
110
|
)
|
|
@@ -107,8 +122,10 @@ def print_current_workflows(ctx: AnyContext, current_workflows_value: str) -> No
|
|
|
107
122
|
ctx.print("", plain=True)
|
|
108
123
|
|
|
109
124
|
|
|
110
|
-
def
|
|
111
|
-
|
|
125
|
+
def handle_response_cmd(ctx: AnyContext, user_input: str, final_result: str) -> None:
|
|
126
|
+
if not is_command_match(user_input, RESPONSE_CMD, SAVE_SUB_CMD):
|
|
127
|
+
return
|
|
128
|
+
save_path = get_command_param(user_input, RESPONSE_CMD, SAVE_SUB_CMD)
|
|
112
129
|
save_path = os.path.expanduser(save_path)
|
|
113
130
|
if os.path.exists(save_path):
|
|
114
131
|
ctx.print(
|
|
@@ -206,6 +223,59 @@ def get_new_workflows(old_workflow: str, user_input: str) -> str:
|
|
|
206
223
|
return _normalize_comma_separated_str(old_workflow)
|
|
207
224
|
|
|
208
225
|
|
|
226
|
+
def handle_session(
|
|
227
|
+
ctx: AnyContext,
|
|
228
|
+
current_session_name: str | None,
|
|
229
|
+
current_start_new: bool,
|
|
230
|
+
user_input: str,
|
|
231
|
+
) -> tuple[str | None, bool]:
|
|
232
|
+
if is_command_match(user_input, SAVE_CMD):
|
|
233
|
+
save_point = get_command_param(user_input, SAVE_CMD)
|
|
234
|
+
if not save_point:
|
|
235
|
+
ctx.print(render_markdown("️⚠️ Save point name is required."), plain=True)
|
|
236
|
+
return current_session_name, current_start_new
|
|
237
|
+
if not current_session_name:
|
|
238
|
+
ctx.print(
|
|
239
|
+
render_markdown(
|
|
240
|
+
"⚠️ No active session to save. Please start a conversation first."
|
|
241
|
+
),
|
|
242
|
+
plain=True,
|
|
243
|
+
)
|
|
244
|
+
return current_session_name, current_start_new
|
|
245
|
+
save_point_path = os.path.join(CFG.LLM_HISTORY_DIR, "save-point", save_point)
|
|
246
|
+
write_file(save_point_path, current_session_name)
|
|
247
|
+
ctx.print(
|
|
248
|
+
render_markdown(
|
|
249
|
+
f"Session saved to save-point: {save_point} ({current_session_name})"
|
|
250
|
+
),
|
|
251
|
+
plain=True,
|
|
252
|
+
)
|
|
253
|
+
return current_session_name, current_start_new
|
|
254
|
+
if is_command_match(user_input, LOAD_CMD):
|
|
255
|
+
save_point = get_command_param(user_input, LOAD_CMD)
|
|
256
|
+
if not save_point:
|
|
257
|
+
ctx.print(render_markdown("⚠️ Save point name is required."), plain=True)
|
|
258
|
+
return current_session_name, current_start_new
|
|
259
|
+
save_point_path = os.path.join(CFG.LLM_HISTORY_DIR, "save-point", save_point)
|
|
260
|
+
if not os.path.exists(save_point_path):
|
|
261
|
+
ctx.print(
|
|
262
|
+
render_markdown(f"⚠️ Save point '{save_point}' not found."), plain=True
|
|
263
|
+
)
|
|
264
|
+
return current_session_name, current_start_new
|
|
265
|
+
current_session_name = read_file(save_point_path).strip()
|
|
266
|
+
ctx.print(
|
|
267
|
+
render_markdown(f"Loaded session: {current_session_name}"), plain=True
|
|
268
|
+
)
|
|
269
|
+
# When loading a session, we shouldn't start a new one
|
|
270
|
+
return current_session_name, False
|
|
271
|
+
if is_command_match(user_input, SESSION_CMD):
|
|
272
|
+
ctx.print(
|
|
273
|
+
render_markdown(f"Current session: {current_session_name}"), plain=True
|
|
274
|
+
)
|
|
275
|
+
return current_session_name, current_start_new
|
|
276
|
+
return current_session_name, current_start_new
|
|
277
|
+
|
|
278
|
+
|
|
209
279
|
def _normalize_comma_separated_str(comma_separated_str: str) -> str:
|
|
210
280
|
return ",".join(
|
|
211
281
|
[
|
|
@@ -269,7 +339,13 @@ def print_commands(ctx: AnyContext):
|
|
|
269
339
|
WORKFLOW_SET_SUB_CMD_DESC,
|
|
270
340
|
),
|
|
271
341
|
_show_subcommand(CLEAR_SUB_CMD[0], "", WORKFLOW_CLEAR_SUB_CMD_DESC),
|
|
272
|
-
_show_command(
|
|
342
|
+
_show_command(SESSION_CMD[0], SESSION_CMD_DESC),
|
|
343
|
+
_show_command(SAVE_CMD[0], SAVE_CMD_DESC),
|
|
344
|
+
_show_command(LOAD_CMD[0], LOAD_CMD_DESC),
|
|
345
|
+
_show_command(RESPONSE_CMD[0], RESPONSE_CMD_DESC),
|
|
346
|
+
_show_subcommand(
|
|
347
|
+
SAVE_SUB_CMD[0], "<file-path>", RESPONSE_SAVE_SUB_CMD_DESC
|
|
348
|
+
),
|
|
273
349
|
_show_command(YOLO_CMD[0], YOLO_CMD_DESC),
|
|
274
350
|
_show_subcommand(SET_SUB_CMD[0], "true", YOLO_SET_TRUE_CMD_DESC),
|
|
275
351
|
_show_subcommand(SET_SUB_CMD[0], "false", YOLO_SET_FALSE_CMD_DESC),
|
zrb/builtin/llm/chat_trigger.py
CHANGED
|
@@ -4,7 +4,9 @@ from asyncio import StreamReader
|
|
|
4
4
|
from typing import TYPE_CHECKING, Any, Callable, Coroutine
|
|
5
5
|
|
|
6
6
|
from zrb.builtin.llm.chat_completion import get_chat_completer
|
|
7
|
+
from zrb.config.llm_config import llm_config
|
|
7
8
|
from zrb.context.any_context import AnyContext
|
|
9
|
+
from zrb.util.git import get_current_branch
|
|
8
10
|
from zrb.util.run import run_async
|
|
9
11
|
|
|
10
12
|
if TYPE_CHECKING:
|
|
@@ -27,10 +29,20 @@ class LLMChatTrigger:
|
|
|
27
29
|
self._triggers.append(single_trigger)
|
|
28
30
|
|
|
29
31
|
async def wait(
|
|
30
|
-
self,
|
|
32
|
+
self,
|
|
33
|
+
ctx: AnyContext,
|
|
34
|
+
reader: "PromptSession[Any] | StreamReader",
|
|
35
|
+
current_session_name: str | None,
|
|
36
|
+
is_first_time: bool,
|
|
31
37
|
) -> str:
|
|
32
38
|
trigger_tasks = [
|
|
33
|
-
asyncio.create_task(
|
|
39
|
+
asyncio.create_task(
|
|
40
|
+
run_async(
|
|
41
|
+
self._read_next_line(
|
|
42
|
+
ctx, reader, current_session_name, is_first_time
|
|
43
|
+
)
|
|
44
|
+
)
|
|
45
|
+
)
|
|
34
46
|
] + [asyncio.create_task(run_async(trigger(ctx))) for trigger in self._triggers]
|
|
35
47
|
final_result: str = ""
|
|
36
48
|
try:
|
|
@@ -47,22 +59,33 @@ class LLMChatTrigger:
|
|
|
47
59
|
except asyncio.CancelledError:
|
|
48
60
|
ctx.print("Task cancelled.", plain=True)
|
|
49
61
|
final_result = "/bye"
|
|
62
|
+
for task in trigger_tasks:
|
|
63
|
+
task.cancel()
|
|
50
64
|
except KeyboardInterrupt:
|
|
51
65
|
ctx.print("KeyboardInterrupt detected. Exiting...", plain=True)
|
|
52
66
|
final_result = "/bye"
|
|
67
|
+
for task in trigger_tasks:
|
|
68
|
+
task.cancel()
|
|
53
69
|
return final_result
|
|
54
70
|
|
|
55
71
|
async def _read_next_line(
|
|
56
|
-
self,
|
|
72
|
+
self,
|
|
73
|
+
ctx: AnyContext,
|
|
74
|
+
reader: "PromptSession[Any] | StreamReader",
|
|
75
|
+
current_session_name: str | None,
|
|
76
|
+
is_first_time: bool,
|
|
57
77
|
) -> str:
|
|
58
78
|
"""Reads one line of input using the provided reader."""
|
|
59
79
|
from prompt_toolkit import PromptSession
|
|
60
80
|
|
|
61
81
|
try:
|
|
62
82
|
if isinstance(reader, PromptSession):
|
|
63
|
-
bottom_toolbar =
|
|
83
|
+
bottom_toolbar = await self._get_bottom_toolbar(
|
|
84
|
+
ctx, current_session_name, is_first_time
|
|
85
|
+
)
|
|
64
86
|
return await reader.prompt_async(
|
|
65
|
-
completer=get_chat_completer(),
|
|
87
|
+
completer=get_chat_completer(),
|
|
88
|
+
bottom_toolbar=bottom_toolbar,
|
|
66
89
|
)
|
|
67
90
|
line_bytes = await reader.readline()
|
|
68
91
|
if not line_bytes:
|
|
@@ -74,5 +97,69 @@ class LLMChatTrigger:
|
|
|
74
97
|
ctx.print("KeyboardInterrupt detected. Exiting...", plain=True)
|
|
75
98
|
return "/bye"
|
|
76
99
|
|
|
100
|
+
async def _get_bottom_toolbar(
|
|
101
|
+
self, ctx: AnyContext, current_session_name: str | None, is_first_time: bool
|
|
102
|
+
) -> str:
|
|
103
|
+
import shutil
|
|
104
|
+
|
|
105
|
+
terminal_width = shutil.get_terminal_size().columns
|
|
106
|
+
previous_session_name = self._get_previous_session_name(
|
|
107
|
+
ctx, current_session_name, is_first_time
|
|
108
|
+
)
|
|
109
|
+
current_branch = await self._get_current_branch()
|
|
110
|
+
current_model = self._get_current_model(ctx)
|
|
111
|
+
left_text = f"📌 {os.getcwd()} ({current_branch}) | 🧠 {current_model}"
|
|
112
|
+
right_text = f"📚 Previous Session: {previous_session_name}"
|
|
113
|
+
padding = (
|
|
114
|
+
terminal_width
|
|
115
|
+
- self._get_display_width(left_text)
|
|
116
|
+
- self._get_display_width(right_text)
|
|
117
|
+
- 1
|
|
118
|
+
)
|
|
119
|
+
if padding > 0:
|
|
120
|
+
return f"{left_text}{' ' * padding}{right_text}"
|
|
121
|
+
return f"{left_text} {right_text}"
|
|
122
|
+
|
|
123
|
+
def _get_display_width(self, text: str) -> int:
|
|
124
|
+
import unicodedata
|
|
125
|
+
|
|
126
|
+
width = 0
|
|
127
|
+
for char in text:
|
|
128
|
+
eaw = unicodedata.east_asian_width(char)
|
|
129
|
+
if eaw in ("F", "W"): # Fullwidth or Wide
|
|
130
|
+
width += 2
|
|
131
|
+
elif eaw == "A": # Ambiguous
|
|
132
|
+
width += 1 # Usually 1 in non-East Asian contexts
|
|
133
|
+
else: # Narrow, Halfwidth, Neutral
|
|
134
|
+
width += 1
|
|
135
|
+
return width
|
|
136
|
+
|
|
137
|
+
def _get_current_model(self, ctx: AnyContext) -> str:
|
|
138
|
+
if "model" in ctx.input and ctx.input.model:
|
|
139
|
+
return ctx.input.model
|
|
140
|
+
return str(llm_config.default_model_name)
|
|
141
|
+
|
|
142
|
+
def _get_previous_session_name(
|
|
143
|
+
self, ctx: AnyContext, current_session_name: str | None, is_first_time: bool
|
|
144
|
+
) -> str:
|
|
145
|
+
if is_first_time:
|
|
146
|
+
start_new: bool = ctx.input.start_new
|
|
147
|
+
if (
|
|
148
|
+
not start_new
|
|
149
|
+
and "previous_session" in ctx.input
|
|
150
|
+
and ctx.input.previous_session is not None
|
|
151
|
+
):
|
|
152
|
+
return ctx.input.previous_session
|
|
153
|
+
return "<No Session>"
|
|
154
|
+
if not current_session_name:
|
|
155
|
+
return "<No Session>"
|
|
156
|
+
return current_session_name
|
|
157
|
+
|
|
158
|
+
async def _get_current_branch(self) -> str:
|
|
159
|
+
try:
|
|
160
|
+
return await get_current_branch(os.getcwd(), print_method=lambda x: x)
|
|
161
|
+
except Exception:
|
|
162
|
+
return "<Not a git repo>"
|
|
163
|
+
|
|
77
164
|
|
|
78
165
|
llm_chat_trigger = LLMChatTrigger()
|