zrb 1.17.2__py3-none-any.whl → 1.17.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zrb/builtin/llm/tool/cli.py +5 -1
- zrb/builtin/llm/tool/file.py +8 -7
- zrb/config/config.py +5 -0
- zrb/config/llm_rate_limitter.py +42 -19
- zrb/task/llm/prompt.py +105 -43
- zrb/task/llm/tool_wrapper.py +10 -0
- {zrb-1.17.2.dist-info → zrb-1.17.3.dist-info}/METADATA +1 -1
- {zrb-1.17.2.dist-info → zrb-1.17.3.dist-info}/RECORD +10 -10
- {zrb-1.17.2.dist-info → zrb-1.17.3.dist-info}/WHEEL +0 -0
- {zrb-1.17.2.dist-info → zrb-1.17.3.dist-info}/entry_points.txt +0 -0
zrb/builtin/llm/tool/cli.py
CHANGED
|
@@ -4,7 +4,8 @@ from typing import Any
|
|
|
4
4
|
|
|
5
5
|
def run_shell_command(command: str) -> dict[str, Any]:
|
|
6
6
|
"""
|
|
7
|
-
Executes a shell command on the user's local machine and returns
|
|
7
|
+
Executes a non interactive shell command on the user's local machine and returns
|
|
8
|
+
the output.
|
|
8
9
|
|
|
9
10
|
This tool is powerful and should be used for tasks that require interacting
|
|
10
11
|
with the command line, such as running scripts, managing system processes,
|
|
@@ -15,6 +16,9 @@ def run_shell_command(command: str) -> dict[str, Any]:
|
|
|
15
16
|
modify files or system state (e.g., `git`, `npm`, `pip`, `docker`), you
|
|
16
17
|
MUST explain what the command does and ask the user for confirmation.
|
|
17
18
|
|
|
19
|
+
**Note:** Make sure to run any server or long running process, as background process.
|
|
20
|
+
(e.g., python -m http.server &)
|
|
21
|
+
|
|
18
22
|
Args:
|
|
19
23
|
command (str): The exact shell command to execute.
|
|
20
24
|
|
zrb/builtin/llm/tool/file.py
CHANGED
|
@@ -500,10 +500,13 @@ async def analyze_file(
|
|
|
500
500
|
tools=[read_from_file, search_files],
|
|
501
501
|
)
|
|
502
502
|
payload = json.dumps(
|
|
503
|
-
{
|
|
503
|
+
{
|
|
504
|
+
"instruction": query,
|
|
505
|
+
"file_path": abs_path,
|
|
506
|
+
"file_content": llm_rate_limitter.clip_prompt(file_content, token_limit),
|
|
507
|
+
}
|
|
504
508
|
)
|
|
505
|
-
|
|
506
|
-
return await _analyze_file(ctx, clipped_payload)
|
|
509
|
+
return await _analyze_file(ctx, payload)
|
|
507
510
|
|
|
508
511
|
|
|
509
512
|
def read_many_files(paths: list[str]) -> dict[str, str]:
|
|
@@ -593,11 +596,9 @@ def write_many_files(files: list[FileToWrite]) -> dict[str, Any]:
|
|
|
593
596
|
errors = {}
|
|
594
597
|
# 4. Access the data using dictionary key-lookup syntax.
|
|
595
598
|
for file in files:
|
|
599
|
+
path = file["path"]
|
|
600
|
+
content = file["content"]
|
|
596
601
|
try:
|
|
597
|
-
# Use file['path'] and file['content'] instead of file.path
|
|
598
|
-
path = file["path"]
|
|
599
|
-
content = file["content"]
|
|
600
|
-
|
|
601
602
|
abs_path = os.path.abspath(os.path.expanduser(path))
|
|
602
603
|
directory = os.path.dirname(abs_path)
|
|
603
604
|
if directory and not os.path.exists(directory):
|
zrb/config/config.py
CHANGED
|
@@ -313,6 +313,11 @@ class Config:
|
|
|
313
313
|
"""Maximum number of tokens allowed per individual LLM request."""
|
|
314
314
|
return int(self._getenv("LLM_MAX_TOKENS_PER_REQUEST", "100000"))
|
|
315
315
|
|
|
316
|
+
@property
|
|
317
|
+
def LLM_MAX_TOKENS_PER_TOOL_CALL_RESULT(self) -> int:
|
|
318
|
+
"""Maximum number of tokens allowed per tool call result."""
|
|
319
|
+
return int(self._getenv("LLM_MAX_TOKENS_PER_TOOL_CALL_RESULT", "75000"))
|
|
320
|
+
|
|
316
321
|
@property
|
|
317
322
|
def LLM_THROTTLE_SLEEP(self) -> float:
|
|
318
323
|
"""Number of seconds to sleep when throttling is required."""
|
zrb/config/llm_rate_limitter.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import json
|
|
2
3
|
import time
|
|
3
4
|
from collections import deque
|
|
4
|
-
from typing import Callable
|
|
5
|
+
from typing import Any, Callable
|
|
5
6
|
|
|
6
7
|
from zrb.config.config import CFG
|
|
7
8
|
|
|
@@ -17,16 +18,18 @@ class LLMRateLimiter:
|
|
|
17
18
|
max_requests_per_minute: int | None = None,
|
|
18
19
|
max_tokens_per_minute: int | None = None,
|
|
19
20
|
max_tokens_per_request: int | None = None,
|
|
21
|
+
max_tokens_per_tool_call_result: int | None = None,
|
|
20
22
|
throttle_sleep: float | None = None,
|
|
21
23
|
use_tiktoken: bool | None = None,
|
|
22
|
-
|
|
24
|
+
tiktoken_encoding_name: str | None = None,
|
|
23
25
|
):
|
|
24
26
|
self._max_requests_per_minute = max_requests_per_minute
|
|
25
27
|
self._max_tokens_per_minute = max_tokens_per_minute
|
|
26
28
|
self._max_tokens_per_request = max_tokens_per_request
|
|
29
|
+
self._max_tokens_per_tool_call_result = max_tokens_per_tool_call_result
|
|
27
30
|
self._throttle_sleep = throttle_sleep
|
|
28
31
|
self._use_tiktoken = use_tiktoken
|
|
29
|
-
self._tiktoken_encoding_name =
|
|
32
|
+
self._tiktoken_encoding_name = tiktoken_encoding_name
|
|
30
33
|
self.request_times = deque()
|
|
31
34
|
self.token_times = deque()
|
|
32
35
|
|
|
@@ -48,6 +51,12 @@ class LLMRateLimiter:
|
|
|
48
51
|
return self._max_tokens_per_request
|
|
49
52
|
return CFG.LLM_MAX_TOKENS_PER_REQUEST
|
|
50
53
|
|
|
54
|
+
@property
|
|
55
|
+
def max_tokens_per_tool_call_result(self) -> int:
|
|
56
|
+
if self._max_tokens_per_tool_call_result is not None:
|
|
57
|
+
return self._max_tokens_per_tool_call_result
|
|
58
|
+
return CFG.LLM_MAX_TOKENS_PER_TOOL_CALL_RESULT
|
|
59
|
+
|
|
51
60
|
@property
|
|
52
61
|
def throttle_sleep(self) -> float:
|
|
53
62
|
if self._throttle_sleep is not None:
|
|
@@ -75,48 +84,56 @@ class LLMRateLimiter:
|
|
|
75
84
|
def set_max_tokens_per_request(self, value: int):
|
|
76
85
|
self._max_tokens_per_request = value
|
|
77
86
|
|
|
87
|
+
def set_max_tokens_per_tool_call_result(self, value: int):
|
|
88
|
+
self._max_tokens_per_tool_call_result = value
|
|
89
|
+
|
|
78
90
|
def set_throttle_sleep(self, value: float):
|
|
79
91
|
self._throttle_sleep = value
|
|
80
92
|
|
|
81
|
-
def count_token(self, prompt:
|
|
93
|
+
def count_token(self, prompt: Any) -> int:
|
|
94
|
+
str_prompt = self._prompt_to_str(prompt)
|
|
82
95
|
if not self.use_tiktoken:
|
|
83
|
-
return self._fallback_count_token(
|
|
96
|
+
return self._fallback_count_token(str_prompt)
|
|
84
97
|
try:
|
|
85
98
|
import tiktoken
|
|
86
99
|
|
|
87
100
|
enc = tiktoken.get_encoding(self.tiktoken_encoding_name)
|
|
88
|
-
return len(enc.encode(
|
|
101
|
+
return len(enc.encode(str_prompt))
|
|
89
102
|
except Exception:
|
|
90
|
-
return self._fallback_count_token(
|
|
103
|
+
return self._fallback_count_token(str_prompt)
|
|
91
104
|
|
|
92
|
-
def _fallback_count_token(self,
|
|
93
|
-
return len(
|
|
105
|
+
def _fallback_count_token(self, str_prompt: str) -> int:
|
|
106
|
+
return len(str_prompt) // 4
|
|
94
107
|
|
|
95
|
-
def clip_prompt(self, prompt:
|
|
108
|
+
def clip_prompt(self, prompt: Any, limit: int) -> str:
|
|
109
|
+
str_prompt = self._prompt_to_str(prompt)
|
|
96
110
|
if not self.use_tiktoken:
|
|
97
|
-
return self._fallback_clip_prompt(
|
|
111
|
+
return self._fallback_clip_prompt(str_prompt, limit)
|
|
98
112
|
try:
|
|
99
113
|
import tiktoken
|
|
100
114
|
|
|
101
|
-
enc = tiktoken.get_encoding(
|
|
102
|
-
tokens = enc.encode(
|
|
115
|
+
enc = tiktoken.get_encoding(self.tiktoken_encoding_name)
|
|
116
|
+
tokens = enc.encode(str_prompt)
|
|
103
117
|
if len(tokens) <= limit:
|
|
104
|
-
return
|
|
118
|
+
return str_prompt
|
|
105
119
|
truncated = tokens[: limit - 3]
|
|
106
120
|
clipped_text = enc.decode(truncated)
|
|
107
121
|
return clipped_text + "..."
|
|
108
122
|
except Exception:
|
|
109
|
-
return self._fallback_clip_prompt(
|
|
123
|
+
return self._fallback_clip_prompt(str_prompt, limit)
|
|
110
124
|
|
|
111
|
-
def _fallback_clip_prompt(self,
|
|
125
|
+
def _fallback_clip_prompt(self, str_prompt: str, limit: int) -> str:
|
|
112
126
|
char_limit = limit * 4 if limit * 4 <= 10 else limit * 4 - 10
|
|
113
|
-
return
|
|
127
|
+
return str_prompt[:char_limit] + "..."
|
|
114
128
|
|
|
115
129
|
async def throttle(
|
|
116
|
-
self,
|
|
130
|
+
self,
|
|
131
|
+
prompt: Any,
|
|
132
|
+
throttle_notif_callback: Callable | None = None,
|
|
117
133
|
):
|
|
118
134
|
now = time.time()
|
|
119
|
-
|
|
135
|
+
str_prompt = self._prompt_to_str(prompt)
|
|
136
|
+
tokens = self.count_token(str_prompt)
|
|
120
137
|
# Clean up old entries
|
|
121
138
|
while self.request_times and now - self.request_times[0] > 60:
|
|
122
139
|
self.request_times.popleft()
|
|
@@ -144,5 +161,11 @@ class LLMRateLimiter:
|
|
|
144
161
|
self.request_times.append(now)
|
|
145
162
|
self.token_times.append((now, tokens))
|
|
146
163
|
|
|
164
|
+
def _prompt_to_str(self, prompt: Any) -> str:
|
|
165
|
+
try:
|
|
166
|
+
return json.dumps(prompt)
|
|
167
|
+
except Exception:
|
|
168
|
+
return f"{prompt}"
|
|
169
|
+
|
|
147
170
|
|
|
148
171
|
llm_rate_limitter = LLMRateLimiter()
|
zrb/task/llm/prompt.py
CHANGED
|
@@ -2,11 +2,12 @@ import os
|
|
|
2
2
|
import platform
|
|
3
3
|
import re
|
|
4
4
|
from datetime import datetime, timezone
|
|
5
|
-
from typing import TYPE_CHECKING, Callable
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Callable
|
|
6
6
|
|
|
7
7
|
from zrb.attr.type import StrAttr, StrListAttr
|
|
8
|
-
from zrb.config.llm_config import llm_config
|
|
8
|
+
from zrb.config.llm_config import llm_config
|
|
9
9
|
from zrb.config.llm_context.config import llm_context_config
|
|
10
|
+
from zrb.config.llm_rate_limitter import llm_rate_limitter
|
|
10
11
|
from zrb.context.any_context import AnyContext
|
|
11
12
|
from zrb.context.any_shared_context import AnySharedContext
|
|
12
13
|
from zrb.task.llm.conversation_history_model import ConversationHistory
|
|
@@ -173,7 +174,43 @@ def get_system_and_user_prompt(
|
|
|
173
174
|
render_modes: bool = False,
|
|
174
175
|
conversation_history: ConversationHistory | None = None,
|
|
175
176
|
) -> tuple[str, str]:
|
|
176
|
-
|
|
177
|
+
if conversation_history is None:
|
|
178
|
+
conversation_history = ConversationHistory()
|
|
179
|
+
modified_user_message, user_message_context = _extract_user_prompt_components(
|
|
180
|
+
user_message
|
|
181
|
+
)
|
|
182
|
+
new_system_prompt = _construct_system_prompt(
|
|
183
|
+
ctx=ctx,
|
|
184
|
+
persona_attr=persona_attr,
|
|
185
|
+
render_persona=render_persona,
|
|
186
|
+
system_prompt_attr=system_prompt_attr,
|
|
187
|
+
render_system_prompt=render_system_prompt,
|
|
188
|
+
special_instruction_prompt_attr=special_instruction_prompt_attr,
|
|
189
|
+
render_special_instruction_prompt=render_special_instruction_prompt,
|
|
190
|
+
modes_attr=modes_attr,
|
|
191
|
+
render_modes=render_modes,
|
|
192
|
+
conversation_history=conversation_history,
|
|
193
|
+
)
|
|
194
|
+
user_message_context["Token Counter"] = _get_token_usage_info(
|
|
195
|
+
new_system_prompt, user_message, conversation_history
|
|
196
|
+
)
|
|
197
|
+
return new_system_prompt, _contruct_user_message_prompt(
|
|
198
|
+
modified_user_message, user_message_context
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _construct_system_prompt(
|
|
203
|
+
ctx: AnyContext,
|
|
204
|
+
persona_attr: StrAttr | None = None,
|
|
205
|
+
render_persona: bool = False,
|
|
206
|
+
system_prompt_attr: StrAttr | None = None,
|
|
207
|
+
render_system_prompt: bool = False,
|
|
208
|
+
special_instruction_prompt_attr: StrAttr | None = None,
|
|
209
|
+
render_special_instruction_prompt: bool = False,
|
|
210
|
+
modes_attr: StrListAttr | None = None,
|
|
211
|
+
render_modes: bool = False,
|
|
212
|
+
conversation_history: ConversationHistory | None = None,
|
|
213
|
+
) -> str:
|
|
177
214
|
persona = get_persona(ctx, persona_attr, render_persona)
|
|
178
215
|
base_system_prompt = get_base_system_prompt(
|
|
179
216
|
ctx, system_prompt_attr, render_system_prompt
|
|
@@ -185,10 +222,9 @@ def get_system_and_user_prompt(
|
|
|
185
222
|
project_context_prompt = get_project_context_prompt()
|
|
186
223
|
if conversation_history is None:
|
|
187
224
|
conversation_history = ConversationHistory()
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
new_system_prompt = "\n".join(
|
|
225
|
+
current_directory = os.getcwd()
|
|
226
|
+
directory_tree = _generate_directory_tree(current_directory, max_depth=2)
|
|
227
|
+
return "\n".join(
|
|
192
228
|
[
|
|
193
229
|
make_prompt_section("Persona", persona),
|
|
194
230
|
make_prompt_section("System Prompt", base_system_prompt),
|
|
@@ -229,10 +265,42 @@ def get_system_and_user_prompt(
|
|
|
229
265
|
),
|
|
230
266
|
),
|
|
231
267
|
make_prompt_section("Project Context", project_context_prompt),
|
|
232
|
-
make_prompt_section(
|
|
268
|
+
make_prompt_section(
|
|
269
|
+
"Conversation Environment",
|
|
270
|
+
"\n".join(
|
|
271
|
+
[
|
|
272
|
+
make_prompt_section("Current OS", platform.system()),
|
|
273
|
+
make_prompt_section("OS Version", platform.version()),
|
|
274
|
+
make_prompt_section(
|
|
275
|
+
"Python Version", platform.python_version()
|
|
276
|
+
),
|
|
277
|
+
make_prompt_section(
|
|
278
|
+
"Directory Tree (depth=2)", directory_tree, as_code=True
|
|
279
|
+
),
|
|
280
|
+
]
|
|
281
|
+
),
|
|
282
|
+
),
|
|
283
|
+
]
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _contruct_user_message_prompt(
|
|
288
|
+
modified_user_message: str, user_message_context: dict[str, str]
|
|
289
|
+
) -> str:
|
|
290
|
+
return "\n".join(
|
|
291
|
+
[
|
|
292
|
+
make_prompt_section("User Message", modified_user_message),
|
|
293
|
+
make_prompt_section(
|
|
294
|
+
"Context",
|
|
295
|
+
"\n".join(
|
|
296
|
+
[
|
|
297
|
+
make_prompt_section(key, val)
|
|
298
|
+
for key, val in user_message_context.items()
|
|
299
|
+
]
|
|
300
|
+
),
|
|
301
|
+
),
|
|
233
302
|
]
|
|
234
303
|
)
|
|
235
|
-
return new_system_prompt, new_user_message
|
|
236
304
|
|
|
237
305
|
|
|
238
306
|
def _generate_directory_tree(
|
|
@@ -258,13 +326,13 @@ def _generate_directory_tree(
|
|
|
258
326
|
|
|
259
327
|
for i, entry in enumerate(entries):
|
|
260
328
|
if i >= max_children:
|
|
261
|
-
tree_lines.append(f"{prefix}
|
|
329
|
+
tree_lines.append(f"{prefix}└─... (more)")
|
|
262
330
|
break
|
|
263
331
|
is_last = i == len(entries) - 1
|
|
264
|
-
connector = "
|
|
332
|
+
connector = "└─" if is_last else "├─"
|
|
265
333
|
tree_lines.append(f"{prefix}{connector}{entry.name}")
|
|
266
334
|
if entry.is_dir():
|
|
267
|
-
new_prefix = prefix + ("
|
|
335
|
+
new_prefix = prefix + (" " if is_last else "│ ")
|
|
268
336
|
recurse(entry.path, depth + 1, new_prefix)
|
|
269
337
|
|
|
270
338
|
tree_lines.append(os.path.basename(dir_path))
|
|
@@ -272,7 +340,24 @@ def _generate_directory_tree(
|
|
|
272
340
|
return "\n".join(tree_lines)
|
|
273
341
|
|
|
274
342
|
|
|
275
|
-
def
|
|
343
|
+
def _get_token_usage_info(
|
|
344
|
+
system_prompt: str, user_message: str, conversation_history: ConversationHistory
|
|
345
|
+
) -> str:
|
|
346
|
+
system_prompt_token_count = llm_rate_limitter.count_token(system_prompt)
|
|
347
|
+
user_message_token_count = llm_rate_limitter.count_token(user_message)
|
|
348
|
+
conversation_history_token_count = llm_rate_limitter.count_token(
|
|
349
|
+
conversation_history.history
|
|
350
|
+
)
|
|
351
|
+
total_token_count = (
|
|
352
|
+
system_prompt_token_count
|
|
353
|
+
+ user_message_token_count
|
|
354
|
+
+ conversation_history_token_count
|
|
355
|
+
)
|
|
356
|
+
max_token = llm_rate_limitter.max_tokens_per_request
|
|
357
|
+
return f"{total_token_count} tokens of {max_token} tokens"
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def _extract_user_prompt_components(user_message: str) -> tuple[str, dict[str, Any]]:
|
|
276
361
|
modified_user_message = user_message
|
|
277
362
|
# Match “@” + any non-space/comma sequence that contains at least one “/”
|
|
278
363
|
pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\?\!\s]+)"
|
|
@@ -297,41 +382,18 @@ def extract_conversation_environment(user_message: str) -> tuple[str, str]:
|
|
|
297
382
|
apendixes.append(
|
|
298
383
|
make_prompt_section(
|
|
299
384
|
f"{placeholder} ({ref_type} path: `{resource_path}`)",
|
|
300
|
-
content,
|
|
385
|
+
"\n".join(content) if isinstance(content, list) else content,
|
|
301
386
|
as_code=True,
|
|
302
387
|
)
|
|
303
388
|
)
|
|
304
389
|
current_directory = os.getcwd()
|
|
305
|
-
directory_tree = _generate_directory_tree(current_directory, max_depth=2)
|
|
306
|
-
conversation_environment = "\n".join(
|
|
307
|
-
[
|
|
308
|
-
make_prompt_section("Current OS", platform.system()),
|
|
309
|
-
make_prompt_section("OS Version", platform.version()),
|
|
310
|
-
make_prompt_section("Python Version", platform.python_version()),
|
|
311
|
-
make_prompt_section(
|
|
312
|
-
"Directory Tree (depth=2)", directory_tree, as_code=True
|
|
313
|
-
),
|
|
314
|
-
]
|
|
315
|
-
)
|
|
316
390
|
iso_date = datetime.now(timezone.utc).astimezone().isoformat()
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
[
|
|
324
|
-
make_prompt_section(
|
|
325
|
-
"Current working directory", current_directory
|
|
326
|
-
),
|
|
327
|
-
make_prompt_section("Current time", iso_date),
|
|
328
|
-
make_prompt_section("Apendixes", "\n".join(apendixes)),
|
|
329
|
-
]
|
|
330
|
-
),
|
|
331
|
-
),
|
|
332
|
-
]
|
|
333
|
-
)
|
|
334
|
-
return conversation_environment, modified_user_message
|
|
391
|
+
user_message_context = {
|
|
392
|
+
"Current Working Directory": current_directory,
|
|
393
|
+
"Current Time": iso_date,
|
|
394
|
+
"Apendixes": "\n".join(apendixes),
|
|
395
|
+
}
|
|
396
|
+
return modified_user_message, user_message_context
|
|
335
397
|
|
|
336
398
|
|
|
337
399
|
def get_user_message(
|
zrb/task/llm/tool_wrapper.py
CHANGED
|
@@ -7,6 +7,7 @@ from collections.abc import Callable
|
|
|
7
7
|
from typing import TYPE_CHECKING, Any
|
|
8
8
|
|
|
9
9
|
from zrb.config.config import CFG
|
|
10
|
+
from zrb.config.llm_rate_limitter import llm_rate_limitter
|
|
10
11
|
from zrb.context.any_context import AnyContext
|
|
11
12
|
from zrb.task.llm.error import ToolExecutionError
|
|
12
13
|
from zrb.util.callable import get_callable_name
|
|
@@ -124,6 +125,7 @@ def _create_wrapper(
|
|
|
124
125
|
f"Tool execution cancelled. User disapproving: {reason}"
|
|
125
126
|
)
|
|
126
127
|
result = await run_async(func(*args, **kwargs))
|
|
128
|
+
_check_tool_call_result_limit(result)
|
|
127
129
|
if has_ever_edited:
|
|
128
130
|
return {
|
|
129
131
|
"tool_call_result": result,
|
|
@@ -145,6 +147,14 @@ def _create_wrapper(
|
|
|
145
147
|
return wrapper
|
|
146
148
|
|
|
147
149
|
|
|
150
|
+
def _check_tool_call_result_limit(result: Any):
|
|
151
|
+
if (
|
|
152
|
+
llm_rate_limitter.count_token(result)
|
|
153
|
+
> llm_rate_limitter.max_tokens_per_tool_call_result
|
|
154
|
+
):
|
|
155
|
+
raise ValueError("Result value is too large, please adjust the parameter")
|
|
156
|
+
|
|
157
|
+
|
|
148
158
|
async def _handle_user_response(
|
|
149
159
|
ctx: AnyContext,
|
|
150
160
|
func: Callable,
|
|
@@ -17,9 +17,9 @@ zrb/builtin/llm/llm_ask.py,sha256=nMrKO_j3X1ZyFWHabi8M4Fh74gDQerocni_ozvLa4_8,76
|
|
|
17
17
|
zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sguePIoCjTY,816
|
|
18
18
|
zrb/builtin/llm/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
19
19
|
zrb/builtin/llm/tool/api.py,sha256=HFs7kEUSg-tzsc-ab4h3-pT_6KjuyF1Wo-ukgo_FWr4,3081
|
|
20
|
-
zrb/builtin/llm/tool/cli.py,sha256=
|
|
20
|
+
zrb/builtin/llm/tool/cli.py,sha256=nqVnCcQzLc2OX7wD48YjJBLZ05FDkVyTn7hBvOhbYAM,1391
|
|
21
21
|
zrb/builtin/llm/tool/code.py,sha256=-MKUpXX4jkWm4rCqrUmTTzsYhjfzKle9_XsNPtq8PNM,8952
|
|
22
|
-
zrb/builtin/llm/tool/file.py,sha256=
|
|
22
|
+
zrb/builtin/llm/tool/file.py,sha256=Pa-2Vy3M9EKcngPXnzA5GwSbeZYPDE94ZAuiagGZqks,23456
|
|
23
23
|
zrb/builtin/llm/tool/note.py,sha256=7H1PK2NJRAF5BqVNwh6d0I27zUIKvkiyPS1xzVxlcZY,2298
|
|
24
24
|
zrb/builtin/llm/tool/rag.py,sha256=aN8D8ZqzGXWCP_1F1LbN0QgfyzaK9CKrjfTPorDIYjw,9824
|
|
25
25
|
zrb/builtin/llm/tool/sub_agent.py,sha256=nYluPfc8FlSobpP_4vnBIqkPARrDHq_SwKkmlh_ATUI,5067
|
|
@@ -221,7 +221,7 @@ zrb/callback/callback.py,sha256=PFhCqzfxdk6IAthmXcZ13DokT62xtBzJr_ciLw6I8Zg,4030
|
|
|
221
221
|
zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
222
222
|
zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
|
|
223
223
|
zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
|
|
224
|
-
zrb/config/config.py,sha256=
|
|
224
|
+
zrb/config/config.py,sha256=7eMY6n54-NVrUXSxemHrgH_f6dG2q6Hr-OdP0dBtUmU,15164
|
|
225
225
|
zrb/config/default_prompt/file_extractor_system_prompt.md,sha256=dNBWy4O4mfCqGkqaRQHDo18hDCengU0IZ0vQMSDwbyY,3821
|
|
226
226
|
zrb/config/default_prompt/interactive_system_prompt.md,sha256=5wE_E1WiMwbXY_jaRoQzDM9esnOxmlsD3BShGx8HyEc,2702
|
|
227
227
|
zrb/config/default_prompt/persona.md,sha256=GfUJ4-Mlf_Bm1YTzxFNkPkdVbAi06ZDVYh-iIma3NOs,253
|
|
@@ -232,7 +232,7 @@ zrb/config/default_prompt/system_prompt.md,sha256=SP49gcvIQB7mxWqfQAAQUwXwYemQkg
|
|
|
232
232
|
zrb/config/llm_config.py,sha256=1rX07YpMGDwS2MoqjdFzbi5tOrmBi8qo7YXP2rx9J9Q,12718
|
|
233
233
|
zrb/config/llm_context/config.py,sha256=NXS1KNok-82VArstHmTVgrelPHSlKOWYJ6lytEyCQao,6833
|
|
234
234
|
zrb/config/llm_context/config_parser.py,sha256=XrvkpbmzrrAuDhLCugriBHf2t9fSFuYxwzKehvTl9x4,1467
|
|
235
|
-
zrb/config/llm_rate_limitter.py,sha256=
|
|
235
|
+
zrb/config/llm_rate_limitter.py,sha256=BC6kdsnaLiCBzrUWoAxkgDLH_y0wJadjtccSV88kKCI,6162
|
|
236
236
|
zrb/config/web_auth_config.py,sha256=_PXatQTYh2mX9H3HSYSQKp13zm1RlLyVIoeIr6KYMQ8,6279
|
|
237
237
|
zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
238
238
|
zrb/content_transformer/any_content_transformer.py,sha256=v8ZUbcix1GGeDQwB6OKX_1TjpY__ksxWVeqibwa_iZA,850
|
|
@@ -361,8 +361,8 @@ zrb/task/llm/error.py,sha256=QR-nIohS6pBpC_16cWR-fw7Mevo1sNYAiXMBsh_CJDE,4157
|
|
|
361
361
|
zrb/task/llm/history_summarization.py,sha256=Ntk0mCr4OKeFqjgyfI2UGifNLmAZi4UbSivphXfBwMc,7493
|
|
362
362
|
zrb/task/llm/history_summarization_tool.py,sha256=KjL2RPThg6zDAk9eATuXmQvyrzd1t0scCgo2Cn4-hpY,1022
|
|
363
363
|
zrb/task/llm/print_node.py,sha256=Nnf4F6eDJR4PFcOqQ1jLWBTFnzNGl1Stux2DZ3SMhsY,8062
|
|
364
|
-
zrb/task/llm/prompt.py,sha256=
|
|
365
|
-
zrb/task/llm/tool_wrapper.py,sha256=
|
|
364
|
+
zrb/task/llm/prompt.py,sha256=SZrt4y0ceZ4d7pq0RRBDBoAQHzBMV68yFxIl4h7nUmE,15630
|
|
365
|
+
zrb/task/llm/tool_wrapper.py,sha256=DCqRNw-7lul1PL4E11vi5cjb6uyjZ9_vUN5KKlcf0yI,11399
|
|
366
366
|
zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
|
|
367
367
|
zrb/task/llm_task.py,sha256=onQlUnOIBxXa6gSurgXcpmQ4BsIxDH6G4PnuEImLZPU,15994
|
|
368
368
|
zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
|
|
@@ -414,7 +414,7 @@ zrb/util/todo_model.py,sha256=hhzAX-uFl5rsg7iVX1ULlJOfBtblwQ_ieNUxBWfc-Os,1670
|
|
|
414
414
|
zrb/util/truncate.py,sha256=eSzmjBpc1Qod3lM3M73snNbDOcARHukW_tq36dWdPvc,921
|
|
415
415
|
zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
416
416
|
zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
|
|
417
|
-
zrb-1.17.
|
|
418
|
-
zrb-1.17.
|
|
419
|
-
zrb-1.17.
|
|
420
|
-
zrb-1.17.
|
|
417
|
+
zrb-1.17.3.dist-info/METADATA,sha256=7IcvYNF0jB3wsQ8tLDPa8_HqvJqnGPiuWzHLeQMarl0,9893
|
|
418
|
+
zrb-1.17.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
419
|
+
zrb-1.17.3.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
|
|
420
|
+
zrb-1.17.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|