zrb 1.8.3__py3-none-any.whl → 1.8.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
zrb/__init__.py CHANGED
@@ -34,6 +34,7 @@ from zrb.input.password_input import PasswordInput
34
34
  from zrb.input.str_input import StrInput
35
35
  from zrb.input.text_input import TextInput
36
36
  from zrb.llm_config import llm_config
37
+ from zrb.llm_rate_limitter import llm_rate_limitter
37
38
  from zrb.runner.cli import cli
38
39
  from zrb.runner.web_auth_config import web_auth_config
39
40
  from zrb.runner.web_schema.user import User
@@ -105,6 +106,7 @@ assert Scaffolder
105
106
  assert Scheduler
106
107
  assert cli
107
108
  assert llm_config
109
+ assert llm_rate_limitter
108
110
  assert Xcom
109
111
  assert web_auth_config
110
112
  assert User
@@ -33,10 +33,10 @@ async def read_user_prompt(ctx: AnyContext) -> str:
33
33
  previous_session_name=ctx.input.previous_session,
34
34
  start_new=ctx.input.start_new,
35
35
  )
36
+ if result is not None:
37
+ final_result = result
36
38
  if ctx.env.get("_ZRB_WEB_ENV", "0") != "0":
37
- # Don't run in web environment
38
- if result is not None:
39
- final_result = result
39
+ # On web environment this won't be interactive
40
40
  return final_result
41
41
  multiline_mode = False
42
42
  user_inputs = []
@@ -0,0 +1,233 @@
1
+ import os
2
+
3
+ from zrb.builtin.llm.tool.file import DEFAULT_EXCLUDED_PATTERNS, is_excluded
4
+ from zrb.builtin.llm.tool.sub_agent import create_sub_agent_tool
5
+ from zrb.context.any_context import AnyContext
6
+
7
+ _EXTRACT_INFO_SYSTEM_PROMPT = """
8
+ You are an extraction info agent.
9
+ Your goal is to help to extract relevant information to help the main LLM Agent.
10
+ You write your output is in markdown format containing path and relevant information.
11
+ Extract only information that relevant to main LLM Agent's goal.
12
+
13
+ Extracted Information format (Use this as reference, extract relevant information only):
14
+ # <file-name>
15
+ ## imports
16
+ - <imported-package>
17
+ - ...
18
+ ## variables
19
+ - <variable-type> <variable-name>: <the-purpose-of-the-variable>
20
+ - ...
21
+ ## functions
22
+ - <function-name>:
23
+ - parameters: <parameters>
24
+ - logic/description: <what-the-function-do-and-how-it-works>
25
+ ...
26
+ # <other-file-name>
27
+ ...
28
+ """.strip()
29
+
30
+
31
+ _SUMMARIZE_INFO_SYSTEM_PROMPT = """
32
+ You are an information summarization agent.
33
+ Your goal is to summarize information to help the main LLM Agent.
34
+ The summarization result should contains all necessary details
35
+ to help main LLM Agent achieve the goal.
36
+ """
37
+
38
+ _DEFAULT_EXTENSIONS = [
39
+ "py",
40
+ "go",
41
+ "java",
42
+ "ts",
43
+ "js",
44
+ "rs",
45
+ "rb",
46
+ "php",
47
+ "sh",
48
+ "bash",
49
+ "c",
50
+ "cpp",
51
+ "h",
52
+ "hpp",
53
+ "cs",
54
+ "swift",
55
+ "kt",
56
+ "scala",
57
+ "m",
58
+ "pl",
59
+ "lua",
60
+ "sql",
61
+ "html",
62
+ "css",
63
+ "scss",
64
+ "less",
65
+ "json",
66
+ "yaml",
67
+ "yml",
68
+ "toml",
69
+ "ini",
70
+ "xml",
71
+ "md",
72
+ "rst",
73
+ "txt",
74
+ ]
75
+
76
+
77
+ async def analyze_repo(
78
+ ctx: AnyContext,
79
+ path: str,
80
+ goal: str,
81
+ extensions: list[str] = _DEFAULT_EXTENSIONS,
82
+ exclude_patterns: list[str] = DEFAULT_EXCLUDED_PATTERNS,
83
+ extraction_char_limit: int = 150000,
84
+ summarization_char_limit: int = 150000,
85
+ ) -> str:
86
+ """
87
+ Extract and summarize information from a directory that probably
88
+ contains a large resources.
89
+ You should state the goal specifically so that the tool can return relevant informations.
90
+ Use this tool for:
91
+ - summarization
92
+ - outline/structure extraction
93
+ - code review
94
+ - create diagram as code
95
+ - other tasks
96
+ Args:
97
+ path (str): File path to be analyze. Pass exactly as provided, including '~'.
98
+ goal(str): Goal of extracting information (for example creating C4 diagram)
99
+ extensions(Optional[list[str]]): List of extension to be included
100
+ while reading resources. Defaults to common programming languages and config files.
101
+ exclude_patterns(Optional[list[str]]): List of patterns to exclude from analysis.
102
+ Common patterns like '.venv', 'node_modules' should be excluded by default.
103
+ extraction_char_limit(Optional[int]): Max resource content char length
104
+ the extraction assistant able to handle. Defaults to 150000
105
+ summarization_char_limit(Optional[int]): Max resource content char length
106
+ the summarization assistant able to handle. Defaults to 150000
107
+ Returns:
108
+ str: The analysis result
109
+ Raises:
110
+ Exception: If an error occurs.
111
+ """
112
+ abs_path = os.path.abspath(os.path.expanduser(path))
113
+ file_metadatas = _get_file_metadatas(abs_path, extensions, exclude_patterns)
114
+ ctx.print("Extraction")
115
+ extracted_infos = await _extract_info(
116
+ ctx,
117
+ file_metadatas=file_metadatas,
118
+ goal=goal,
119
+ char_limit=extraction_char_limit,
120
+ )
121
+ ctx.print("Summarization")
122
+ summarized_infos = await _summarize_info(
123
+ ctx,
124
+ extracted_infos=extracted_infos,
125
+ goal=goal,
126
+ char_limit=summarization_char_limit,
127
+ )
128
+ while len(summarized_infos) > 1:
129
+ ctx.print("Summarization")
130
+ summarized_infos = await _summarize_info(
131
+ ctx,
132
+ extracted_infos=summarized_infos,
133
+ goal=goal,
134
+ char_limit=summarization_char_limit,
135
+ )
136
+ return summarized_infos[0]
137
+
138
+
139
+ def _get_file_metadatas(
140
+ dir_path: str,
141
+ extensions: list[str],
142
+ exclude_patterns: list[str],
143
+ ) -> list[dict[str, str]]:
144
+ metadata_list = []
145
+ for root, _, files in os.walk(dir_path):
146
+ files.sort()
147
+ for file in files:
148
+ if not any(file.endswith(f".{ext}") for ext in extensions):
149
+ continue
150
+ file_path = os.path.join(root, file)
151
+ if is_excluded(file_path, exclude_patterns):
152
+ continue
153
+ try:
154
+ with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
155
+ rel_path = os.path.relpath(file_path, dir_path)
156
+ metadata_list.append({"path": rel_path, "content": f.read()})
157
+ except Exception as e:
158
+ print(f"Error reading file {file_path}: {e}")
159
+ metadata_list.sort(key=lambda m: m["path"])
160
+ return metadata_list
161
+
162
+
163
+ async def _extract_info(
164
+ ctx: AnyContext,
165
+ file_metadatas: list[dict[str, str]],
166
+ goal: str,
167
+ char_limit: int,
168
+ ) -> list[str]:
169
+ extract = create_sub_agent_tool(
170
+ tool_name="extract",
171
+ tool_description="extract",
172
+ system_prompt=_EXTRACT_INFO_SYSTEM_PROMPT,
173
+ )
174
+ extracted_infos = []
175
+ content_buffer = ""
176
+ for metadata in file_metadatas:
177
+ path = metadata.get("path", "")
178
+ content = metadata.get("content", "")
179
+ metadata_str = f"Path: {path}\nContent: {content}"
180
+ if len(content_buffer) + len(metadata_str) > char_limit:
181
+ if content_buffer:
182
+ prompt = _create_extract_info_prompt(goal, content_buffer)
183
+ extracted_info = await extract(ctx, prompt)
184
+ extracted_infos.append(extracted_info)
185
+ content_buffer = metadata_str
186
+ else:
187
+ content_buffer += metadata_str + "\n"
188
+
189
+ # Process any remaining content in the buffer
190
+ if content_buffer:
191
+ prompt = _create_extract_info_prompt(goal, content_buffer)
192
+ extracted_info = await extract(ctx, prompt)
193
+ extracted_infos.append(extracted_info)
194
+ return extracted_infos
195
+
196
+
197
+ def _create_extract_info_prompt(goal: str, content_buffer: str) -> str:
198
+ return f"# Main LLM Agent Goal\n{goal}\n# Files\n{content_buffer}"
199
+
200
+
201
+ async def _summarize_info(
202
+ ctx: AnyContext,
203
+ extracted_infos: list[str],
204
+ goal: str,
205
+ char_limit: int,
206
+ ) -> list[str]:
207
+ summarize = create_sub_agent_tool(
208
+ tool_name="extract",
209
+ tool_description="extract",
210
+ system_prompt=_SUMMARIZE_INFO_SYSTEM_PROMPT,
211
+ )
212
+ summarized_infos = []
213
+ content_buffer = ""
214
+ for extracted_info in extracted_infos:
215
+ if len(content_buffer) + len(extracted_info) > char_limit:
216
+ if content_buffer:
217
+ prompt = _create_summarize_info_prompt(goal, content_buffer)
218
+ summarized_info = await summarize(ctx, prompt)
219
+ summarized_infos.append(summarized_info)
220
+ content_buffer = extracted_info
221
+ else:
222
+ content_buffer += extracted_info + "\n"
223
+
224
+ # Process any remaining content in the buffer
225
+ if content_buffer:
226
+ prompt = _create_summarize_info_prompt(goal, content_buffer)
227
+ summarized_info = await summarize(ctx, prompt)
228
+ summarized_infos.append(summarized_info)
229
+ return summarized_infos
230
+
231
+
232
+ def _create_summarize_info_prompt(goal: str, content_buffer: str) -> str:
233
+ return f"# Main LLM Agent Goal\n{goal}\n# Extracted Info\n{content_buffer}"
@@ -71,6 +71,9 @@ DEFAULT_EXCLUDED_PATTERNS = [
71
71
  "*.so",
72
72
  "*.dylib",
73
73
  "*.dll",
74
+ # Minified files
75
+ "*.min.css",
76
+ "*.min.js",
74
77
  ]
75
78
 
76
79
 
@@ -112,17 +115,17 @@ def list_files(
112
115
  d
113
116
  for d in dirs
114
117
  if (include_hidden or not _is_hidden(d))
115
- and not _is_excluded(d, patterns_to_exclude)
118
+ and not is_excluded(d, patterns_to_exclude)
116
119
  ]
117
120
  # Process files
118
121
  for filename in files:
119
- if (
120
- include_hidden or not _is_hidden(filename)
121
- ) and not _is_excluded(filename, patterns_to_exclude):
122
+ if (include_hidden or not _is_hidden(filename)) and not is_excluded(
123
+ filename, patterns_to_exclude
124
+ ):
122
125
  full_path = os.path.join(root, filename)
123
126
  # Check rel path for patterns like '**/node_modules/*'
124
127
  rel_full_path = os.path.relpath(full_path, abs_path)
125
- is_rel_path_excluded = _is_excluded(
128
+ is_rel_path_excluded = is_excluded(
126
129
  rel_full_path, patterns_to_exclude
127
130
  )
128
131
  if not is_rel_path_excluded:
@@ -132,7 +135,7 @@ def list_files(
132
135
  for item in os.listdir(abs_path):
133
136
  full_path = os.path.join(abs_path, item)
134
137
  # Include both files and directories if not recursive
135
- if (include_hidden or not _is_hidden(item)) and not _is_excluded(
138
+ if (include_hidden or not _is_hidden(item)) and not is_excluded(
136
139
  item, patterns_to_exclude
137
140
  ):
138
141
  all_files.append(full_path)
@@ -169,7 +172,7 @@ def _is_hidden(path: str) -> bool:
169
172
  return basename.startswith(".")
170
173
 
171
174
 
172
- def _is_excluded(name: str, patterns: list[str]) -> bool:
175
+ def is_excluded(name: str, patterns: list[str]) -> bool:
173
176
  """Check if a name/path matches any exclusion patterns."""
174
177
  for pattern in patterns:
175
178
  if fnmatch.fnmatch(name, pattern):
@@ -461,14 +464,14 @@ async def analyze_file(ctx: AnyContext, path: str, query: str) -> str:
461
464
  _analyze_file = create_sub_agent_tool(
462
465
  tool_name="analyze_file",
463
466
  tool_description="analyze file with LLM capability",
464
- sub_agent_system_prompt="\n".join(
467
+ system_prompt="\n".join(
465
468
  [
466
469
  "You are a file analyzer assistant",
467
470
  "Your goal is to help the main asisstant by reading file",
468
471
  "and perform necessary indepth analysis required by the main assistant",
469
472
  ]
470
473
  ),
471
- sub_agent_tools=[read_from_file, search_files],
474
+ tools=[read_from_file, search_files],
472
475
  )
473
476
  return await _analyze_file(
474
477
  ctx,
@@ -28,11 +28,11 @@ else:
28
28
  def create_sub_agent_tool(
29
29
  tool_name: str,
30
30
  tool_description: str,
31
- sub_agent_system_prompt: str | None = None, # Make optional
32
- sub_agent_model: str | Model | None = None,
33
- sub_agent_model_settings: ModelSettings | None = None,
34
- sub_agent_tools: list[ToolOrCallable] = [],
35
- sub_agent_mcp_servers: list[MCPServer] = [],
31
+ system_prompt: str | None = None,
32
+ model: str | Model | None = None,
33
+ model_settings: ModelSettings | None = None,
34
+ tools: list[ToolOrCallable] = [],
35
+ mcp_servers: list[MCPServer] = [],
36
36
  ) -> Callable[[AnyContext, str], str]:
37
37
  """
38
38
  Create an LLM "sub-agent" tool function for use by a main LLM agent.
@@ -63,7 +63,7 @@ def create_sub_agent_tool(
63
63
  # Resolve parameters, falling back to llm_config defaults if None
64
64
  resolved_model = get_model(
65
65
  ctx=ctx,
66
- model_attr=sub_agent_model,
66
+ model_attr=model,
67
67
  render_model=True, # Assuming we always want to render model string attributes
68
68
  model_base_url_attr=None,
69
69
  # Sub-agent tool doesn't have separate base_url/api_key params
@@ -73,10 +73,10 @@ def create_sub_agent_tool(
73
73
  )
74
74
  resolved_model_settings = get_model_settings(
75
75
  ctx=ctx,
76
- model_settings_attr=sub_agent_model_settings,
76
+ model_settings_attr=model_settings,
77
77
  )
78
78
 
79
- if sub_agent_system_prompt is None:
79
+ if system_prompt is None:
80
80
  resolved_system_prompt = get_combined_system_prompt(
81
81
  ctx=ctx,
82
82
  persona_attr=None,
@@ -87,20 +87,16 @@ def create_sub_agent_tool(
87
87
  render_special_instruction_prompt=False,
88
88
  )
89
89
  else:
90
- resolved_system_prompt = sub_agent_system_prompt
91
-
90
+ resolved_system_prompt = system_prompt
92
91
  # Create the sub-agent instance
93
92
  sub_agent_agent = create_agent_instance(
94
93
  ctx=ctx,
95
94
  model=resolved_model,
96
95
  system_prompt=resolved_system_prompt,
97
96
  model_settings=resolved_model_settings,
98
- tools_attr=sub_agent_tools, # Pass tools from factory closure
99
- additional_tools=[], # No additional tools added after factory creation
100
- mcp_servers_attr=sub_agent_mcp_servers, # Pass servers from factory closure
101
- additional_mcp_servers=[], # No additional servers added after factory creation
97
+ tools=tools,
98
+ mcp_servers=mcp_servers,
102
99
  )
103
-
104
100
  # Run the sub-agent iteration
105
101
  # Start with an empty history for the sub-agent
106
102
  sub_agent_run = await run_agent_iteration(
zrb/callback/callback.py CHANGED
@@ -1,3 +1,4 @@
1
+ import traceback
1
2
  from typing import Any
2
3
 
3
4
  from zrb.attr.type import StrDictAttr
@@ -5,6 +6,7 @@ from zrb.callback.any_callback import AnyCallback
5
6
  from zrb.session.any_session import AnySession
6
7
  from zrb.task.any_task import AnyTask
7
8
  from zrb.util.attr import get_str_dict_attr
9
+ from zrb.util.cli.style import stylize_faint
8
10
  from zrb.util.string.conversion import to_snake_case
9
11
  from zrb.xcom.xcom import Xcom
10
12
 
@@ -67,6 +69,8 @@ class Callback(AnyCallback):
67
69
  self._maybe_publish_result_to_parent_session(parent_session, result)
68
70
  return result
69
71
  except BaseException as e:
72
+ ctx = session.get_ctx(self._task)
73
+ ctx.print(traceback.format_exc())
70
74
  self._maybe_publish_error_to_parent_session(parent_session, e)
71
75
 
72
76
  def _maybe_publish_session_name_to_parent_session(
zrb/config.py CHANGED
@@ -237,6 +237,26 @@ class Config:
237
237
  def LLM_SUMMARIZATION_PROMPT(self) -> str | None:
238
238
  return os.getenv("ZRB_LLM_SUMMARIZATION_PROMPT", None)
239
239
 
240
+ @property
241
+ def LLM_MAX_REQUESTS_PER_MINUTE(self) -> int:
242
+ """Maximum number of LLM requests allowed per minute."""
243
+ return int(os.getenv("LLM_MAX_REQUESTS_PER_MINUTE", "60"))
244
+
245
+ @property
246
+ def LLM_MAX_TOKENS_PER_MINUTE(self) -> int:
247
+ """Maximum number of LLM tokens allowed per minute."""
248
+ return int(os.getenv("LLM_MAX_TOKENS_PER_MINUTE", "120000"))
249
+
250
+ @property
251
+ def LLM_MAX_TOKENS_PER_REQUEST(self) -> int:
252
+ """Maximum number of tokens allowed per individual LLM request."""
253
+ return int(os.getenv("LLM_MAX_TOKENS_PER_REQUEST", "4096"))
254
+
255
+ @property
256
+ def LLM_THROTTLE_SLEEP(self) -> float:
257
+ """Number of seconds to sleep when throttling is required."""
258
+ return float(os.getenv("LLM_THROTTLE_SLEEP", "1.0"))
259
+
240
260
  @property
241
261
  def LLM_CONTEXT_ENRICHMENT_PROMPT(self) -> str | None:
242
262
  return os.getenv("ZRB_LLM_CONTEXT_ENRICHMENT_PROMPT", None)
@@ -0,0 +1,105 @@
1
+ import asyncio
2
+ import time
3
+ from collections import deque
4
+ from typing import Callable
5
+
6
+ from zrb.config import CFG
7
+
8
+
9
+ class LLMRateLimiter:
10
+ """
11
+ Helper class to enforce LLM API rate limits and throttling.
12
+ Tracks requests and tokens in a rolling 60-second window.
13
+ """
14
+
15
+ def __init__(
16
+ self,
17
+ max_requests_per_minute: int | None = None,
18
+ max_tokens_per_minute: int | None = None,
19
+ max_tokens_per_request: int | None = None,
20
+ throttle_sleep: float | None = None,
21
+ token_counter_fn: Callable[[str], int] | None = None,
22
+ ):
23
+ self._max_requests_per_minute = max_requests_per_minute
24
+ self._max_tokens_per_minute = max_tokens_per_minute
25
+ self._max_tokens_per_request = max_tokens_per_request
26
+ self._throttle_sleep = throttle_sleep
27
+ self._token_counter_fn = token_counter_fn
28
+ self.request_times = deque()
29
+ self.token_times = deque()
30
+
31
+ @property
32
+ def max_requests_per_minute(self) -> int:
33
+ if self._max_requests_per_minute is not None:
34
+ return self._max_requests_per_minute
35
+ return CFG.LLM_MAX_REQUESTS_PER_MINUTE
36
+
37
+ @property
38
+ def max_tokens_per_minute(self) -> int:
39
+ if self._max_tokens_per_minute is not None:
40
+ return self._max_tokens_per_minute
41
+ return CFG.LLM_MAX_TOKENS_PER_MINUTE
42
+
43
+ @property
44
+ def max_tokens_per_request(self) -> int:
45
+ if self._max_tokens_per_request is not None:
46
+ return self._max_tokens_per_request
47
+ return CFG.LLM_MAX_TOKENS_PER_REQUEST
48
+
49
+ @property
50
+ def throttle_sleep(self) -> float:
51
+ if self._throttle_sleep is not None:
52
+ return self._throttle_sleep
53
+ return CFG.LLM_THROTTLE_SLEEP
54
+
55
+ @property
56
+ def token_counter_fn(self) -> Callable[[str], int]:
57
+ if self._token_counter_fn is not None:
58
+ return self._token_counter_fn
59
+ return lambda x: len(x.split())
60
+
61
+ def set_max_requests_per_minute(self, value: int):
62
+ self._max_requests_per_minute = value
63
+
64
+ def set_max_tokens_per_minute(self, value: int):
65
+ self._max_tokens_per_minute = value
66
+
67
+ def set_max_tokens_per_request(self, value: int):
68
+ self._max_tokens_per_request = value
69
+
70
+ def set_throttle_sleep(self, value: float):
71
+ self._throttle_sleep = value
72
+
73
+ def set_token_counter_fn(self, fn: Callable[[str], int]):
74
+ self._token_counter_fn = fn
75
+
76
+ async def throttle(self, prompt: str):
77
+ now = time.time()
78
+ tokens = self.token_counter_fn(prompt)
79
+ # Clean up old entries
80
+ while self.request_times and now - self.request_times[0] > 60:
81
+ self.request_times.popleft()
82
+ while self.token_times and now - self.token_times[0][0] > 60:
83
+ self.token_times.popleft()
84
+ # Check per-request token limit
85
+ if tokens > self.max_tokens_per_request:
86
+ raise ValueError(
87
+ f"Request exceeds max_tokens_per_request ({self.max_tokens_per_request})."
88
+ )
89
+ # Wait if over per-minute request or token limit
90
+ while (
91
+ len(self.request_times) >= self.max_requests_per_minute
92
+ or sum(t for _, t in self.token_times) + tokens > self.max_tokens_per_minute
93
+ ):
94
+ await asyncio.sleep(self.throttle_sleep)
95
+ now = time.time()
96
+ while self.request_times and now - self.request_times[0] > 60:
97
+ self.request_times.popleft()
98
+ while self.token_times and now - self.token_times[0][0] > 60:
99
+ self.token_times.popleft()
100
+ # Record this request
101
+ self.request_times.append(now)
102
+ self.token_times.append((now, tokens))
103
+
104
+
105
+ llm_rate_limitter = LLMRateLimiter()
zrb/session/session.py CHANGED
@@ -186,17 +186,16 @@ class Session(AnySession):
186
186
 
187
187
  def defer_monitoring(self, task: AnyTask, coro: Coroutine):
188
188
  self._register_single_task(task)
189
- self._monitoring_coros[task] = asyncio.create_task(coro)
189
+ self._monitoring_coros[task] = coro
190
190
 
191
191
  def defer_action(self, task: AnyTask, coro: Coroutine):
192
192
  self._register_single_task(task)
193
- self._action_coros[task] = asyncio.create_task(coro)
193
+ self._action_coros[task] = coro
194
194
 
195
195
  def defer_coro(self, coro: Coroutine):
196
- task = asyncio.create_task(coro)
197
- self._coros.append(task)
196
+ self._coros.append(coro)
198
197
  self._coros = [
199
- existing_task for existing_task in self._coros if not existing_task.done()
198
+ existing_coro for existing_coro in self._coros if not existing_coro.done()
200
199
  ]
201
200
 
202
201
  async def wait_deferred(self):
zrb/task/llm/agent.py CHANGED
@@ -18,6 +18,7 @@ else:
18
18
 
19
19
  from zrb.context.any_context import AnyContext
20
20
  from zrb.context.any_shared_context import AnySharedContext
21
+ from zrb.llm_rate_limitter import LLMRateLimiter, llm_rate_limitter
21
22
  from zrb.task.llm.error import extract_api_error_details
22
23
  from zrb.task.llm.print_node import print_node
23
24
  from zrb.task.llm.tool_wrapper import wrap_tool
@@ -28,42 +29,32 @@ ToolOrCallable = Tool | Callable
28
29
 
29
30
  def create_agent_instance(
30
31
  ctx: AnyContext,
31
- model: str | Model | None,
32
- system_prompt: str,
33
- model_settings: ModelSettings | None,
34
- tools_attr: (
35
- list[ToolOrCallable] | Callable[[AnySharedContext], list[ToolOrCallable]]
36
- ),
37
- additional_tools: list[ToolOrCallable],
38
- mcp_servers_attr: list[MCPServer] | Callable[[AnySharedContext], list[MCPServer]],
39
- additional_mcp_servers: list[MCPServer],
32
+ model: str | Model | None = None,
33
+ system_prompt: str = "",
34
+ model_settings: ModelSettings | None = None,
35
+ tools: list[ToolOrCallable] = [],
36
+ mcp_servers: list[MCPServer] = [],
37
+ retries: int = 3,
40
38
  ) -> Agent:
41
39
  """Creates a new Agent instance with configured tools and servers."""
42
- # Get tools
43
40
  from pydantic_ai import Agent, Tool
44
41
 
45
- tools_or_callables = list(tools_attr(ctx) if callable(tools_attr) else tools_attr)
46
- tools_or_callables.extend(additional_tools)
47
- tools = []
48
- for tool_or_callable in tools_or_callables:
42
+ # Normalize tools
43
+ tool_list = []
44
+ for tool_or_callable in tools:
49
45
  if isinstance(tool_or_callable, Tool):
50
- tools.append(tool_or_callable)
46
+ tool_list.append(tool_or_callable)
51
47
  else:
52
48
  # Pass ctx to wrap_tool
53
- tools.append(wrap_tool(tool_or_callable, ctx))
54
- # Get MCP Servers
55
- mcp_servers = list(
56
- mcp_servers_attr(ctx) if callable(mcp_servers_attr) else mcp_servers_attr
57
- )
58
- mcp_servers.extend(additional_mcp_servers)
49
+ tool_list.append(wrap_tool(tool_or_callable, ctx))
59
50
  # Return Agent
60
51
  return Agent(
61
52
  model=model,
62
53
  system_prompt=system_prompt,
63
- tools=tools,
54
+ tools=tool_list,
64
55
  mcp_servers=mcp_servers,
65
56
  model_settings=model_settings,
66
- retries=3, # Consider making retries configurable?
57
+ retries=retries,
67
58
  )
68
59
 
69
60
 
@@ -79,10 +70,12 @@ def get_agent(
79
70
  additional_tools: list[ToolOrCallable],
80
71
  mcp_servers_attr: list[MCPServer] | Callable[[AnySharedContext], list[MCPServer]],
81
72
  additional_mcp_servers: list[MCPServer],
73
+ retries: int = 3,
82
74
  ) -> Agent:
83
75
  """Retrieves the configured Agent instance or creates one if necessary."""
84
76
  from pydantic_ai import Agent
85
77
 
78
+ # Render agent instance and return if agent_attr is already an agent
86
79
  if isinstance(agent_attr, Agent):
87
80
  return agent_attr
88
81
  if callable(agent_attr):
@@ -94,16 +87,23 @@ def get_agent(
94
87
  )
95
88
  raise TypeError(err_msg)
96
89
  return agent_instance
90
+ # Get tools for agent
91
+ tools = list(tools_attr(ctx) if callable(tools_attr) else tools_attr)
92
+ tools.extend(additional_tools)
93
+ # Get MCP Servers for agent
94
+ mcp_servers = list(
95
+ mcp_servers_attr(ctx) if callable(mcp_servers_attr) else mcp_servers_attr
96
+ )
97
+ mcp_servers.extend(additional_mcp_servers)
97
98
  # If no agent provided, create one using the configuration
98
99
  return create_agent_instance(
99
100
  ctx=ctx,
100
101
  model=model,
101
102
  system_prompt=system_prompt,
103
+ tools=tools,
104
+ mcp_servers=mcp_servers,
102
105
  model_settings=model_settings,
103
- tools_attr=tools_attr,
104
- additional_tools=additional_tools,
105
- mcp_servers_attr=mcp_servers_attr,
106
- additional_mcp_servers=additional_mcp_servers,
106
+ retries=retries,
107
107
  )
108
108
 
109
109
 
@@ -112,6 +112,7 @@ async def run_agent_iteration(
112
112
  agent: Agent,
113
113
  user_prompt: str,
114
114
  history_list: ListOfDict,
115
+ rate_limitter: LLMRateLimiter | None = None,
115
116
  ) -> AgentRun:
116
117
  """
117
118
  Runs a single iteration of the agent execution loop.
@@ -131,6 +132,11 @@ async def run_agent_iteration(
131
132
  from openai import APIError
132
133
  from pydantic_ai.messages import ModelMessagesTypeAdapter
133
134
 
135
+ if rate_limitter:
136
+ await rate_limitter.throttle(user_prompt)
137
+ else:
138
+ await llm_rate_limitter.throttle(user_prompt)
139
+
134
140
  async with agent.run_mcp_servers():
135
141
  async with agent.iter(
136
142
  user_prompt=user_prompt,
zrb/task/llm/config.py CHANGED
@@ -18,7 +18,7 @@ def get_model_settings(
18
18
  ctx: AnyContext,
19
19
  model_settings_attr: (
20
20
  ModelSettings | Callable[[AnySharedContext], ModelSettings] | None
21
- ),
21
+ ) = None,
22
22
  ) -> ModelSettings | None:
23
23
  """Gets the model settings, resolving callables if necessary."""
24
24
  model_settings = get_attr(ctx, model_settings_attr, None, auto_render=False)
@@ -29,8 +29,8 @@ def get_model_settings(
29
29
 
30
30
  def get_model_base_url(
31
31
  ctx: AnyContext,
32
- model_base_url_attr: StrAttr | None,
33
- render_model_base_url: bool,
32
+ model_base_url_attr: StrAttr | None = None,
33
+ render_model_base_url: bool = True,
34
34
  ) -> str | None:
35
35
  """Gets the model base URL, rendering if configured."""
36
36
  base_url = get_attr(
@@ -45,8 +45,8 @@ def get_model_base_url(
45
45
 
46
46
  def get_model_api_key(
47
47
  ctx: AnyContext,
48
- model_api_key_attr: StrAttr | None,
49
- render_model_api_key: bool,
48
+ model_api_key_attr: StrAttr | None = None,
49
+ render_model_api_key: bool = True,
50
50
  ) -> str | None:
51
51
  """Gets the model API key, rendering if configured."""
52
52
  api_key = get_attr(ctx, model_api_key_attr, None, auto_render=render_model_api_key)
@@ -61,10 +61,10 @@ def get_model(
61
61
  ctx: AnyContext,
62
62
  model_attr: Callable[[AnySharedContext], Model | str | fstring] | Model | None,
63
63
  render_model: bool,
64
- model_base_url_attr: StrAttr | None,
65
- render_model_base_url: bool,
66
- model_api_key_attr: StrAttr | None,
67
- render_model_api_key: bool,
64
+ model_base_url_attr: StrAttr | None = None,
65
+ render_model_base_url: bool = True,
66
+ model_api_key_attr: StrAttr | None = None,
67
+ render_model_api_key: bool = True,
68
68
  ) -> str | Model | None:
69
69
  """Gets the model instance or name, handling defaults and configuration."""
70
70
  from pydantic_ai.models import Model
@@ -7,6 +7,7 @@ from pydantic import BaseModel
7
7
  from zrb.attr.type import BoolAttr, IntAttr
8
8
  from zrb.context.any_context import AnyContext
9
9
  from zrb.llm_config import llm_config
10
+ from zrb.llm_rate_limitter import LLMRateLimiter
10
11
  from zrb.task.llm.agent import run_agent_iteration
11
12
  from zrb.task.llm.history import (
12
13
  count_part_in_history_list,
@@ -29,7 +30,7 @@ class EnrichmentConfig(BaseModel):
29
30
  model: Model | str | None = None
30
31
  settings: ModelSettings | None = None
31
32
  prompt: str
32
- retries: int = 1
33
+ retries: int = 3
33
34
 
34
35
 
35
36
  class EnrichmentResult(BaseModel):
@@ -41,6 +42,7 @@ async def enrich_context(
41
42
  config: EnrichmentConfig,
42
43
  conversation_context: dict[str, Any],
43
44
  history_list: ListOfDict,
45
+ rate_limitter: LLMRateLimiter | None = None,
44
46
  ) -> dict[str, Any]:
45
47
  """Runs an LLM call to extract key info and merge it into the context."""
46
48
  from pydantic_ai import Agent
@@ -74,10 +76,7 @@ async def enrich_context(
74
76
 
75
77
  enrichment_agent = Agent(
76
78
  model=config.model,
77
- # System prompt is part of the user prompt for this specific call
78
79
  system_prompt=config.prompt, # Use the main prompt as system prompt
79
- tools=[],
80
- mcp_servers=[],
81
80
  model_settings=config.settings,
82
81
  retries=config.retries,
83
82
  output_type=EnrichmentResult,
@@ -90,6 +89,7 @@ async def enrich_context(
90
89
  agent=enrichment_agent,
91
90
  user_prompt=user_prompt_data, # Pass the formatted data as user prompt
92
91
  history_list=[], # Enrichment agent doesn't need prior history itself
92
+ rate_limitter=rate_limitter,
93
93
  )
94
94
  if enrichment_run and enrichment_run.result.output:
95
95
  response = enrichment_run.result.output.response
@@ -173,6 +173,7 @@ async def maybe_enrich_context(
173
173
  model: str | Model | None,
174
174
  model_settings: ModelSettings | None,
175
175
  context_enrichment_prompt: str,
176
+ rate_limitter: LLMRateLimiter | None = None,
176
177
  ) -> dict[str, Any]:
177
178
  """Enriches context based on history if enabled and threshold met."""
178
179
  shorten_history_list = replace_system_prompt_in_history_list(history_list)
@@ -193,5 +194,6 @@ async def maybe_enrich_context(
193
194
  ),
194
195
  conversation_context=conversation_context,
195
196
  history_list=shorten_history_list,
197
+ rate_limitter=rate_limitter,
196
198
  )
197
199
  return conversation_context
@@ -6,6 +6,7 @@ from pydantic import BaseModel
6
6
  from zrb.attr.type import BoolAttr, IntAttr
7
7
  from zrb.context.any_context import AnyContext
8
8
  from zrb.llm_config import llm_config
9
+ from zrb.llm_rate_limitter import LLMRateLimiter
9
10
  from zrb.task.llm.agent import run_agent_iteration
10
11
  from zrb.task.llm.history import (
11
12
  count_part_in_history_list,
@@ -78,7 +79,7 @@ class SummarizationConfig(BaseModel):
78
79
  model: Model | str | None = None
79
80
  settings: ModelSettings | None = None
80
81
  prompt: str
81
- retries: int = 1
82
+ retries: int = 3
82
83
 
83
84
 
84
85
  async def summarize_history(
@@ -86,6 +87,7 @@ async def summarize_history(
86
87
  config: SummarizationConfig,
87
88
  conversation_context: dict[str, Any],
88
89
  history_list: ListOfDict,
90
+ rate_limitter: LLMRateLimiter | None = None,
89
91
  ) -> dict[str, Any]:
90
92
  """Runs an LLM call to summarize history and update the context."""
91
93
  from pydantic_ai import Agent
@@ -95,8 +97,6 @@ async def summarize_history(
95
97
  summarization_agent = Agent(
96
98
  model=config.model,
97
99
  system_prompt=config.prompt,
98
- tools=[], # No tools needed for summarization
99
- mcp_servers=[],
100
100
  model_settings=config.settings,
101
101
  retries=config.retries,
102
102
  )
@@ -122,6 +122,7 @@ async def summarize_history(
122
122
  agent=summarization_agent,
123
123
  user_prompt=summarization_user_prompt,
124
124
  history_list=[], # Summarization agent doesn't need prior history
125
+ rate_limitter=rate_limitter,
125
126
  )
126
127
  if summary_run and summary_run.result.output:
127
128
  summary_text = str(summary_run.result.output)
@@ -150,6 +151,7 @@ async def maybe_summarize_history(
150
151
  model: str | Model | None,
151
152
  model_settings: ModelSettings | None,
152
153
  summarization_prompt: str,
154
+ rate_limitter: LLMRateLimiter | None = None,
153
155
  ) -> tuple[ListOfDict, dict[str, Any]]:
154
156
  """Summarizes history and updates context if enabled and threshold met."""
155
157
  shorten_history_list = replace_system_prompt_in_history_list(history_list)
@@ -171,6 +173,7 @@ async def maybe_summarize_history(
171
173
  ),
172
174
  conversation_context=conversation_context,
173
175
  history_list=shorten_history_list, # Pass the full list for context
176
+ rate_limitter=rate_limitter,
174
177
  )
175
178
  # Truncate the history list after summarization
176
179
  return [], updated_context
zrb/task/llm_task.py CHANGED
@@ -19,6 +19,7 @@ from zrb.context.any_context import AnyContext
19
19
  from zrb.context.any_shared_context import AnySharedContext
20
20
  from zrb.env.any_env import AnyEnv
21
21
  from zrb.input.any_input import AnyInput
22
+ from zrb.llm_rate_limitter import LLMRateLimiter
22
23
  from zrb.task.any_task import AnyTask
23
24
  from zrb.task.base_task import BaseTask
24
25
  from zrb.task.llm.agent import get_agent, run_agent_iteration
@@ -113,6 +114,7 @@ class LLMTask(BaseTask):
113
114
  render_summarize_history: bool = True,
114
115
  history_summarization_threshold: IntAttr | None = None,
115
116
  render_history_summarization_threshold: bool = True,
117
+ rate_limitter: LLMRateLimiter | None = None,
116
118
  execute_condition: bool | str | Callable[[AnySharedContext], bool] = True,
117
119
  retries: int = 2,
118
120
  retry_period: float = 0,
@@ -175,6 +177,7 @@ class LLMTask(BaseTask):
175
177
  self._context_enrichment_threshold = context_enrichment_threshold
176
178
  self._render_context_enrichment_threshold = render_context_enrichment_threshold
177
179
  self._tools = tools
180
+ self._rate_limitter = rate_limitter
178
181
  self._additional_tools: list["ToolOrCallable"] = []
179
182
  self._mcp_servers = mcp_servers
180
183
  self._additional_mcp_servers: list["MCPServer"] = []
@@ -276,6 +279,7 @@ class LLMTask(BaseTask):
276
279
  model=model,
277
280
  model_settings=model_settings,
278
281
  context_enrichment_prompt=context_enrichment_prompt,
282
+ rate_limitter=self._rate_limitter,
279
283
  )
280
284
  # 3. Summarize history (optional, modifies history_list and context)
281
285
  history_list, conversation_context = await maybe_summarize_history(
@@ -291,6 +295,7 @@ class LLMTask(BaseTask):
291
295
  model=model,
292
296
  model_settings=model_settings,
293
297
  summarization_prompt=summarization_prompt,
298
+ rate_limitter=self._rate_limitter,
294
299
  )
295
300
  # 4. Build the final user prompt and system prompt
296
301
  final_user_prompt, default_context = extract_default_context(user_message)
@@ -333,6 +338,7 @@ class LLMTask(BaseTask):
333
338
  agent=agent,
334
339
  user_prompt=user_prompt,
335
340
  history_list=history_list,
341
+ rate_limitter=self._rate_limitter,
336
342
  )
337
343
  if agent_run and agent_run.result:
338
344
  new_history_list = json.loads(agent_run.result.all_messages_json())
zrb/util/run.py CHANGED
@@ -13,6 +13,8 @@ async def run_async(value: Any) -> Any:
13
13
  Returns:
14
14
  Any: The result of the awaited value or the value itself if not awaitable.
15
15
  """
16
+ if isinstance(value, asyncio.Task):
17
+ return value
16
18
  if inspect.isawaitable(value):
17
19
  return await value
18
20
  return await asyncio.to_thread(lambda: value)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: zrb
3
- Version: 1.8.3
3
+ Version: 1.8.5
4
4
  Summary: Your Automation Powerhouse
5
5
  Home-page: https://github.com/state-alchemists/zrb
6
6
  License: AGPL-3.0-or-later
@@ -30,7 +30,7 @@ Requires-Dist: pydantic-ai (>=0.2.7,<0.3.0)
30
30
  Requires-Dist: pyjwt (>=2.10.1,<3.0.0)
31
31
  Requires-Dist: python-dotenv (>=1.1.0,<2.0.0)
32
32
  Requires-Dist: python-jose[cryptography] (>=3.4.0,<4.0.0)
33
- Requires-Dist: requests (>=2.32.3,<3.0.0)
33
+ Requires-Dist: requests (>=2.32.4,<3.0.0)
34
34
  Requires-Dist: ulid-py (>=1.1.0,<2.0.0)
35
35
  Project-URL: Documentation, https://github.com/state-alchemists/zrb
36
36
  Project-URL: Repository, https://github.com/state-alchemists/zrb
@@ -1,4 +1,4 @@
1
- zrb/__init__.py,sha256=e0fZglzFsjO-jz0HhHaBV5Vm0e3MZJBtXcrgDOPESB0,3103
1
+ zrb/__init__.py,sha256=6q6sd4KnIwerCPOjv0ouyoXYlmnCfzT87KApckucp9M,3180
2
2
  zrb/__main__.py,sha256=aeIpBjlLef8bfdp0CYumnn5jVkHDPS5bwAxfuCJVUNI,2650
3
3
  zrb/attr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  zrb/attr/type.py,sha256=4TV5gPYMMrKh5V-yB6iRYKCbsXAH_AvGXMsjxKLHcUs,568
@@ -9,7 +9,7 @@ zrb/builtin/git_subtree.py,sha256=7BKwOkVTWDrR0DXXQ4iJyHqeR6sV5VYRt8y_rEB0EHg,35
9
9
  zrb/builtin/group.py,sha256=t008xLM4_fgbjfZrPoi_fQAnSHIo6MOiQSCHBO4GDYU,2379
10
10
  zrb/builtin/http.py,sha256=sLqEczuSxGYXWzyJR6frGOHkPTviu4BeyroUr3-ZuAI,4322
11
11
  zrb/builtin/jwt.py,sha256=3M5uaQhJZbKQLjTUft1OwPz_JxtmK-xtkjxWjciOQho,2859
12
- zrb/builtin/llm/chat_session.py,sha256=HqFwrE1DiSlJrR-S3LRYWQBHkVsD-sfAV8_IIbnmtqY,6631
12
+ zrb/builtin/llm/chat_session.py,sha256=ot2ss6yA4qIINg0nl3KJYnLag8H0eB9ggAgRGEUkZdE,6639
13
13
  zrb/builtin/llm/history.py,sha256=cnkOyO43uiMQ9cEvmqk-pPoCk1zCAH_fwAqSgBtsjzY,3079
14
14
  zrb/builtin/llm/input.py,sha256=Nw-26uTWp2QhUgKJcP_IMHmtk-b542CCSQ_vCOjhvhM,877
15
15
  zrb/builtin/llm/llm_ask.py,sha256=TIHpZFofbehJO1LXbsi4O84kHF85Xfqjev6UkI1RiL0,4367
@@ -17,9 +17,10 @@ zrb/builtin/llm/previous-session.js,sha256=xMKZvJoAbrwiyHS0OoPrWuaKxWYLoyR5sgueP
17
17
  zrb/builtin/llm/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  zrb/builtin/llm/tool/api.py,sha256=yR9I0ZsI96OeQl9pgwORMASVuXsAL0a89D_iPS4C8Dc,1699
19
19
  zrb/builtin/llm/tool/cli.py,sha256=_CNEmEc6K2Z0i9ppYeM7jGpqaEdT3uxaWQatmxP3jKE,858
20
- zrb/builtin/llm/tool/file.py,sha256=nCY74VtruTr9LgAq5mroSr4zF0g6LA_uXMI5sh9c8OE,17909
20
+ zrb/builtin/llm/tool/code.py,sha256=X2wEHJlYUcJXhQUrpZ-WXr2BZFS6gXvbv18dEoeHzf4,7408
21
+ zrb/builtin/llm/tool/file.py,sha256=-lw-_ts5UyrbfWL-LKGIelljfvU7D7MTL-C0jfNMxpY,17938
21
22
  zrb/builtin/llm/tool/rag.py,sha256=yqx7vXXyrOCJjhQJl4s0TnLL-2uQUTuKRnkWlSQBW0M,7883
22
- zrb/builtin/llm/tool/sub_agent.py,sha256=7n14KzUSFe5Bjf2lpluKlLyL-b1Mehj2QekkuDzo0ik,5091
23
+ zrb/builtin/llm/tool/sub_agent.py,sha256=_ItDE5MV_RZtnY_-IUsSMmm6mYaDY3YRINT0hVNsGkA,4702
23
24
  zrb/builtin/llm/tool/web.py,sha256=pXRLhcB_Y6z-2w4C4WezH8n-pg3PSMgt_bwn3aaqi6g,5479
24
25
  zrb/builtin/md5.py,sha256=690RV2LbW7wQeTFxY-lmmqTSVEEZv3XZbjEUW1Q3XpE,1480
25
26
  zrb/builtin/project/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -212,11 +213,11 @@ zrb/builtin/todo.py,sha256=pDbDKp94VHy-JsOr1sFtY8K4nIpNr1v6siqs5ptypsg,11568
212
213
  zrb/builtin/uuid.py,sha256=lIdhSGzPQ1rixRzMXxQDcgFgV7W-gUduHIudZXlzZzg,5393
213
214
  zrb/callback/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
214
215
  zrb/callback/any_callback.py,sha256=PqEJYX_RigXEmoPniSeZusZBZSLWEoVIHvHk8MZ0Mvg,253
215
- zrb/callback/callback.py,sha256=mk_RIHuWi-oP5b81jfhzU6fruhsIjhRtKpwh2yYmsiM,3876
216
+ zrb/callback/callback.py,sha256=PFhCqzfxdk6IAthmXcZ13DokT62xtBzJr_ciLw6I8Zg,4030
216
217
  zrb/cmd/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
217
218
  zrb/cmd/cmd_result.py,sha256=L8bQJzWCpcYexIxHBNsXj2pT3BtLmWex0iJSMkvimOA,597
218
219
  zrb/cmd/cmd_val.py,sha256=7Doowyg6BK3ISSGBLt-PmlhzaEkBjWWm51cED6fAUOQ,1014
219
- zrb/config.py,sha256=lpJJv5ns-bNntEpScSLptPHv9gQYvEl4M8LP6rE7zfk,9423
220
+ zrb/config.py,sha256=FsDU_SyX3NL9xX3sMvaGflKjRyNm8k-x5OYSfR6dcFw,10211
220
221
  zrb/content_transformer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
221
222
  zrb/content_transformer/any_content_transformer.py,sha256=v8ZUbcix1GGeDQwB6OKX_1TjpY__ksxWVeqibwa_iZA,850
222
223
  zrb/content_transformer/content_transformer.py,sha256=STl77wW-I69QaGzCXjvkppngYFLufow8ybPLSyAvlHs,2404
@@ -246,6 +247,7 @@ zrb/input/password_input.py,sha256=szBojWxSP9QJecgsgA87OIYwQrY2AQ3USIKdDZY6snU,1
246
247
  zrb/input/str_input.py,sha256=NevZHX9rf1g8eMatPyy-kUX3DglrVAQpzvVpKAzf7bA,81
247
248
  zrb/input/text_input.py,sha256=6T3MngWdUs0u0ZVs5Dl11w5KS7nN1RkgrIR_zKumzPM,3695
248
249
  zrb/llm_config.py,sha256=w_GSyQiJ_Q2jupej8xvQULScqPaSYICdNcH9J54W1lE,11696
250
+ zrb/llm_rate_limitter.py,sha256=RXdtPreMcmoYSE2Ab2StyHH95F0bD2pGmyySXs4gRio,3725
249
251
  zrb/runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
250
252
  zrb/runner/cli.py,sha256=AbLTNqFy5FuyGQOWOjHZGaBC8e2yuE_Dx1sBdnisR18,6984
251
253
  zrb/runner/common_util.py,sha256=JDMcwvQ8cxnv9kQrAoKVLA40Q1omfv-u5_d5MvvwHeE,1373
@@ -315,7 +317,7 @@ zrb/runner/web_util/token.py,sha256=6Yqp6mQJJMAOsSkAN-6dvtdiQbAv5xtll9jOmNYzbUY,
315
317
  zrb/runner/web_util/user.py,sha256=vE61pDjHoaHw9K0YAv1Gu2zWX2WkM2aWG-8776_aAiM,2061
316
318
  zrb/session/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
317
319
  zrb/session/any_session.py,sha256=x57mS15E-AfUjdVxwOWEzCBjW32zjer7WoeBw0guoDc,5266
318
- zrb/session/session.py,sha256=wkWwueMotpbXaMew1JKil4QMR3UbcBbx4IAZWKSFYjY,10272
320
+ zrb/session/session.py,sha256=1COMZ1JDCpkiAHxSFcaFE71GmatZ4cCHmHbpUPvt8k0,10189
319
321
  zrb/session_state_log/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
320
322
  zrb/session_state_log/session_state_log.py,sha256=VVghDMU72PbrvnzQ7MJuc-KTJ5P5fX0FYuCh3Rlwd9M,709
321
323
  zrb/session_state_logger/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -335,18 +337,18 @@ zrb/task/base_trigger.py,sha256=WSGcmBcGAZw8EzUXfmCjqJQkz8GEmi1RzogpF6A1V4s,6902
335
337
  zrb/task/cmd_task.py,sha256=irGi0txTcsvGhxjfem4_radR4csNXhgtfcxruSF1LFI,10853
336
338
  zrb/task/http_check.py,sha256=Gf5rOB2Se2EdizuN9rp65HpGmfZkGc-clIAlHmPVehs,2565
337
339
  zrb/task/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
338
- zrb/task/llm/agent.py,sha256=6wGSsw03GdY_fj12CsJh7wxB6BnE13N8RYXaWfbiUsk,5451
339
- zrb/task/llm/config.py,sha256=oGxHYMdIvhASnKwNuMPwcdeJiFfS0tNskzGHakpfpQU,3458
340
+ zrb/task/llm/agent.py,sha256=pLldTsXzdaPs07WIw6DbpLpe-y6xIPU5MAPQ0fRA8Bk,5526
341
+ zrb/task/llm/config.py,sha256=Gb0lSHCgGXOAr7igkU7k_Ew5Yp_wOTpNQyZrLrtA7oc,3521
340
342
  zrb/task/llm/context.py,sha256=U9a8lxa2ikz6my0Sd5vpO763legHrMHyvBjbrqNmv0Y,3838
341
- zrb/task/llm/context_enrichment.py,sha256=jB7lekwo4hTk8HIwhhq7HSMfgAzwaV2U867icBRt6Z0,7088
343
+ zrb/task/llm/context_enrichment.py,sha256=BlW2CjSUsKJT8EZBXYxOE4MEBbRCoO34PlQQdzA-zBM,7201
342
344
  zrb/task/llm/error.py,sha256=27DQXSG8SH1-XuvXFdZQKzP39wZDWmd_YnSTz6DJKKI,3690
343
345
  zrb/task/llm/history.py,sha256=3WMXoi7RquxosXQf3iv2_BCeF8iKtY1f407pR71xERs,7745
344
- zrb/task/llm/history_summarization.py,sha256=n3GbgwXlDIkgpJppMGfpqF_8Wpi9yAoZYh46O1pFQeU,6432
346
+ zrb/task/llm/history_summarization.py,sha256=d6RF1duVe7aog2gUf7kzQLIqTwNTfVsOtvx5629hiTU,6582
345
347
  zrb/task/llm/print_node.py,sha256=bpISOUxSH_JBLR-4Nq6-iLrzNWFagrKFX6u8ogYYMw8,4395
346
348
  zrb/task/llm/prompt.py,sha256=zBo3xT3YPX_A4_t8Cd-QjNqQZl9dsoWMTt-NdytI2f4,3827
347
349
  zrb/task/llm/tool_wrapper.py,sha256=Xygd4VCY3ykjVv63pqlTI16ZG41ySkp683_5VTnL-Zo,6481
348
350
  zrb/task/llm/typing.py,sha256=c8VAuPBw_4A3DxfYdydkgedaP-LU61W9_wj3m3CAX1E,58
349
- zrb/task/llm_task.py,sha256=Yav1pmV26Eh4h9xTh16dN-DbTvhfYINI0EDp_ptJHLg,15643
351
+ zrb/task/llm_task.py,sha256=R-VBzESfuzo4RxbtvXd3XBhHoWe8PEDiuNmKe_93wEo,15934
350
352
  zrb/task/make_task.py,sha256=PD3b_aYazthS8LHeJsLAhwKDEgdurQZpymJDKeN60u0,2265
351
353
  zrb/task/rsync_task.py,sha256=GSL9144bmp6F0EckT6m-2a1xG25AzrrWYzH4k3SVUKM,6370
352
354
  zrb/task/scaffolder.py,sha256=rME18w1HJUHXgi9eTYXx_T2G4JdqDYzBoNOkdOOo5-o,6806
@@ -380,7 +382,7 @@ zrb/util/git_subtree.py,sha256=E_UB5OIgm8WkHL9beifRxpZ25_BB9p1H578OhLZTgRU,4611
380
382
  zrb/util/group.py,sha256=T82yr3qg9I5k10VPXkMyrIRIqyfzadSH813bqzwKEPI,4718
381
383
  zrb/util/init_path.py,sha256=n4BgLGeq3mPLS1la8VEqZpqJHx0vJRe2WRwTtbw-FjE,652
382
384
  zrb/util/load.py,sha256=DK0KYSlu48HCoGPqnW1IxnE3pHrZSPCstfz8Fjyqqv8,2140
383
- zrb/util/run.py,sha256=FPRCCvl5g6GuDvHTkaV95CFDlqxQ-5FZb2-F-Jz1fnI,485
385
+ zrb/util/run.py,sha256=vu-mcSWDP_WuuvIKqM_--Gk3WkABO1oTXiHmBRTvVQk,546
384
386
  zrb/util/string/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
385
387
  zrb/util/string/conversion.py,sha256=sMmstzbrNgLvWAQukqoXz45JtsNpJrniudAtzJaQlYw,6240
386
388
  zrb/util/string/format.py,sha256=MwWGAwSdtOgR_2uz-JCXlg_q-uRYUUI-G8CGkfdgqik,1198
@@ -388,7 +390,7 @@ zrb/util/string/name.py,sha256=SXEfxJ1-tDOzHqmSV8kvepRVyMqs2XdV_vyoh_9XUu0,1584
388
390
  zrb/util/todo.py,sha256=VGISej2KQZERpornK-8X7bysp4JydMrMUTnG8B0-liI,20708
389
391
  zrb/xcom/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
390
392
  zrb/xcom/xcom.py,sha256=o79rxR9wphnShrcIushA0Qt71d_p3ZTxjNf7x9hJB78,1571
391
- zrb-1.8.3.dist-info/METADATA,sha256=VMBmGEImGJOvME1Jr6IrdYEWexUsmYcmgh8kmhE-kT4,9760
392
- zrb-1.8.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
393
- zrb-1.8.3.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
394
- zrb-1.8.3.dist-info/RECORD,,
393
+ zrb-1.8.5.dist-info/METADATA,sha256=R27kDs_HVlzKm-rSSnkk4XVbPI_dIehXERaiZgGGL_M,9760
394
+ zrb-1.8.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
395
+ zrb-1.8.5.dist-info/entry_points.txt,sha256=-Pg3ElWPfnaSM-XvXqCxEAa-wfVI6BEgcs386s8C8v8,46
396
+ zrb-1.8.5.dist-info/RECORD,,
File without changes