zrb 1.21.31__py3-none-any.whl → 1.21.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of zrb might be problematic. Click here for more details.

Files changed (46) hide show
  1. zrb/builtin/llm/chat_completion.py +94 -84
  2. zrb/builtin/llm/chat_session.py +90 -30
  3. zrb/builtin/llm/chat_session_cmd.py +115 -22
  4. zrb/builtin/llm/chat_trigger.py +92 -5
  5. zrb/builtin/llm/history.py +14 -7
  6. zrb/builtin/llm/llm_ask.py +16 -7
  7. zrb/builtin/llm/tool/cli.py +34 -15
  8. zrb/builtin/llm/tool/file.py +14 -2
  9. zrb/builtin/llm/tool/search/brave.py +8 -2
  10. zrb/builtin/llm/tool/search/searxng.py +8 -2
  11. zrb/builtin/llm/tool/search/serpapi.py +8 -2
  12. zrb/builtin/llm/tool/sub_agent.py +4 -1
  13. zrb/builtin/llm/tool/web.py +5 -0
  14. zrb/builtin/llm/xcom_names.py +3 -0
  15. zrb/callback/callback.py +8 -1
  16. zrb/cmd/cmd_result.py +2 -1
  17. zrb/config/config.py +6 -2
  18. zrb/config/default_prompt/interactive_system_prompt.md +15 -12
  19. zrb/config/default_prompt/system_prompt.md +16 -18
  20. zrb/config/llm_rate_limitter.py +36 -13
  21. zrb/context/context.py +11 -0
  22. zrb/input/option_input.py +30 -2
  23. zrb/task/base/context.py +25 -13
  24. zrb/task/base/execution.py +52 -47
  25. zrb/task/base/lifecycle.py +1 -1
  26. zrb/task/base_task.py +31 -45
  27. zrb/task/base_trigger.py +0 -1
  28. zrb/task/cmd_task.py +3 -0
  29. zrb/task/llm/agent.py +39 -31
  30. zrb/task/llm/agent_runner.py +65 -3
  31. zrb/task/llm/default_workflow/researching/workflow.md +2 -0
  32. zrb/task/llm/history_list.py +13 -0
  33. zrb/task/llm/history_processor.py +4 -13
  34. zrb/task/llm/print_node.py +79 -25
  35. zrb/task/llm/prompt.py +70 -40
  36. zrb/task/llm/tool_wrapper.py +4 -1
  37. zrb/task/llm/workflow.py +54 -15
  38. zrb/task/llm_task.py +87 -33
  39. zrb/task/rsync_task.py +2 -0
  40. zrb/util/cmd/command.py +33 -10
  41. zrb/util/match.py +71 -0
  42. zrb/util/run.py +3 -3
  43. {zrb-1.21.31.dist-info → zrb-1.21.43.dist-info}/METADATA +1 -1
  44. {zrb-1.21.31.dist-info → zrb-1.21.43.dist-info}/RECORD +46 -43
  45. {zrb-1.21.31.dist-info → zrb-1.21.43.dist-info}/WHEEL +0 -0
  46. {zrb-1.21.31.dist-info → zrb-1.21.43.dist-info}/entry_points.txt +0 -0
zrb/task/llm/prompt.py CHANGED
@@ -10,7 +10,7 @@ from zrb.context.any_context import AnyContext
10
10
  from zrb.task.llm.conversation_history_model import ConversationHistory
11
11
  from zrb.task.llm.workflow import LLMWorkflow, get_available_workflows
12
12
  from zrb.util.attr import get_attr, get_str_attr, get_str_list_attr
13
- from zrb.util.file import read_dir, read_file_with_line_numbers
13
+ from zrb.util.file import read_dir, read_file, read_file_with_line_numbers
14
14
  from zrb.util.markdown import make_markdown_section
15
15
 
16
16
  if TYPE_CHECKING:
@@ -32,11 +32,8 @@ def get_system_and_user_prompt(
32
32
  ) -> tuple[str, str]:
33
33
  if conversation_history is None:
34
34
  conversation_history = ConversationHistory()
35
- new_user_message_prompt, apendixes = _get_user_message_prompt(user_message)
36
35
  new_system_prompt = _construct_system_prompt(
37
36
  ctx=ctx,
38
- user_message=user_message,
39
- apendixes=apendixes,
40
37
  persona_attr=persona_attr,
41
38
  render_persona=render_persona,
42
39
  system_prompt_attr=system_prompt_attr,
@@ -47,13 +44,12 @@ def get_system_and_user_prompt(
47
44
  render_workflows=render_workflows,
48
45
  conversation_history=conversation_history,
49
46
  )
47
+ new_user_message_prompt = _get_user_message_prompt(user_message)
50
48
  return new_system_prompt, new_user_message_prompt
51
49
 
52
50
 
53
51
  def _construct_system_prompt(
54
52
  ctx: AnyContext,
55
- user_message: str,
56
- apendixes: str,
57
53
  persona_attr: StrAttr | None = None,
58
54
  render_persona: bool = False,
59
55
  system_prompt_attr: StrAttr | None = None,
@@ -71,6 +67,7 @@ def _construct_system_prompt(
71
67
  special_instruction_prompt = _get_special_instruction_prompt(
72
68
  ctx, special_instruction_prompt_attr, render_special_instruction_prompt
73
69
  )
70
+ project_instructions = _get_project_instructions()
74
71
  available_workflows = get_available_workflows()
75
72
  active_workflow_names = set(
76
73
  _get_active_workflow_names(ctx, workflows_attr, render_workflows)
@@ -98,6 +95,7 @@ def _construct_system_prompt(
98
95
  ]
99
96
  ),
100
97
  ),
98
+ make_markdown_section("📜 PROJECT INSTRUCTIONS", project_instructions),
101
99
  make_markdown_section("🛠️ AVAILABLE WORKFLOWS", inactive_workflow_prompt),
102
100
  make_markdown_section(
103
101
  "📚 CONTEXT",
@@ -122,10 +120,6 @@ def _construct_system_prompt(
122
120
  "📝 Contextual Note Content",
123
121
  conversation_history.contextual_note,
124
122
  ),
125
- make_markdown_section(
126
- "📄 Apendixes",
127
- apendixes,
128
- ),
129
123
  ]
130
124
  ),
131
125
  ),
@@ -133,21 +127,63 @@ def _construct_system_prompt(
133
127
  )
134
128
 
135
129
 
130
+ def _get_project_instructions() -> str:
131
+ instructions = []
132
+ cwd = os.path.abspath(os.getcwd())
133
+ home = os.path.abspath(os.path.expanduser("~"))
134
+ search_dirs = []
135
+ if cwd == home or cwd.startswith(os.path.join(home, "")):
136
+ current_dir = cwd
137
+ while True:
138
+ search_dirs.append(current_dir)
139
+ if current_dir == home:
140
+ break
141
+ parent_dir = os.path.dirname(current_dir)
142
+ if parent_dir == current_dir:
143
+ break
144
+ current_dir = parent_dir
145
+ else:
146
+ search_dirs.append(cwd)
147
+ for file_name in ["AGENTS.md", "CLAUDE.md"]:
148
+ for dir_path in search_dirs:
149
+ abs_file_name = os.path.join(dir_path, file_name)
150
+ if os.path.isfile(abs_file_name):
151
+ content = read_file(abs_file_name)
152
+ instructions.append(
153
+ make_markdown_section(
154
+ f"Instruction from `{abs_file_name}`", content
155
+ )
156
+ )
157
+ break
158
+ return "\n".join(instructions)
159
+
160
+
161
+ def _get_prompt_attr(
162
+ ctx: AnyContext,
163
+ attr: StrAttr | None,
164
+ render: bool,
165
+ default: str | None,
166
+ ) -> str:
167
+ """Generic helper to get a prompt attribute, prioritizing task-specific then default."""
168
+ value = get_attr(
169
+ ctx,
170
+ attr,
171
+ None,
172
+ auto_render=render,
173
+ )
174
+ if value is not None:
175
+ return value
176
+ return default or ""
177
+
178
+
136
179
  def _get_persona(
137
180
  ctx: AnyContext,
138
181
  persona_attr: StrAttr | None,
139
182
  render_persona: bool,
140
183
  ) -> str:
141
- """Gets the persona, prioritizing task-specific, then default."""
142
- persona = get_attr(
143
- ctx,
144
- persona_attr,
145
- None,
146
- auto_render=render_persona,
184
+ return _get_prompt_attr(
185
+ ctx, persona_attr, render_persona, llm_config.default_persona
147
186
  )
148
- if persona is not None:
149
- return persona
150
- return llm_config.default_persona or ""
151
187
 
152
188
 
153
189
  def _get_base_system_prompt(
@@ -155,16 +191,9 @@ def _get_base_system_prompt(
155
191
  system_prompt_attr: StrAttr | None,
156
192
  render_system_prompt: bool,
157
193
  ) -> str:
158
- """Gets the base system prompt, prioritizing task-specific, then default."""
159
- system_prompt = get_attr(
160
- ctx,
161
- system_prompt_attr,
162
- None,
163
- auto_render=render_system_prompt,
194
+ return _get_prompt_attr(
195
+ ctx, system_prompt_attr, render_system_prompt, llm_config.default_system_prompt
164
196
  )
165
- if system_prompt is not None:
166
- return system_prompt
167
- return llm_config.default_system_prompt or ""
168
197
 
169
198
 
170
199
  def _get_special_instruction_prompt(
@@ -172,16 +201,12 @@ def _get_special_instruction_prompt(
172
201
  special_instruction_prompt_attr: StrAttr | None,
173
202
  render_spcecial_instruction_prompt: bool,
174
203
  ) -> str:
175
- """Gets the special instruction prompt, prioritizing task-specific, then default."""
176
- special_instruction = get_attr(
204
+ return _get_prompt_attr(
177
205
  ctx,
178
206
  special_instruction_prompt_attr,
179
- None,
180
- auto_render=render_spcecial_instruction_prompt,
207
+ render_spcecial_instruction_prompt,
208
+ llm_config.default_special_instruction_prompt,
181
209
  )
182
- if special_instruction is not None:
183
- return special_instruction
184
- return llm_config.default_special_instruction_prompt
185
210
 
186
211
 
187
212
  def _get_active_workflow_names(
@@ -229,7 +254,8 @@ def _get_workflow_prompt(
229
254
  )
230
255
 
231
256
 
232
- def _get_user_message_prompt(user_message: str) -> tuple[str, str]:
257
+ def _get_user_message_prompt(user_message: str) -> str:
258
+ current_directory = os.getcwd()
233
259
  processed_user_message = user_message
234
260
  # Match “@” + any non-space/comma sequence that contains at least one “/”
235
261
  pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\?\!\s]+)"
@@ -247,19 +273,19 @@ def _get_user_message_prompt(user_message: str) -> tuple[str, str]:
247
273
  ref_type = "directory"
248
274
  if content != "":
249
275
  # Replace the @-reference in the user message with the placeholder
250
- placeholder = f"[Reference {i+1}: `{os.path.basename(ref)}`]"
276
+ rel_path = os.path.relpath(resource_path, current_directory)
277
+ placeholder = f"`{rel_path}`"
251
278
  processed_user_message = processed_user_message.replace(
252
279
  f"@{ref}", placeholder, 1
253
280
  )
254
281
  apendix_list.append(
255
282
  make_markdown_section(
256
- f"Content of {placeholder} ({ref_type} path: `{resource_path}`)",
283
+ f"{placeholder} {ref_type}",
257
284
  "\n".join(content) if isinstance(content, list) else content,
258
285
  as_code=True,
259
286
  )
260
287
  )
261
288
  apendixes = "\n".join(apendix_list)
262
- current_directory = os.getcwd()
263
289
  iso_date = datetime.now(timezone.utc).astimezone().isoformat()
264
290
  modified_user_message = make_markdown_section(
265
291
  "User Request",
@@ -269,10 +295,14 @@ def _get_user_message_prompt(user_message: str) -> tuple[str, str]:
269
295
  f"- Current Time: {iso_date}",
270
296
  "---",
271
297
  processed_user_message,
298
+ make_markdown_section(
299
+ "📄 Apendixes",
300
+ apendixes,
301
+ ),
272
302
  ]
273
303
  ),
274
304
  )
275
- return modified_user_message, apendixes
305
+ return modified_user_message
276
306
 
277
307
 
278
308
  def get_user_message(
@@ -136,9 +136,12 @@ def _create_wrapper(
136
136
  result = await run_async(func(*args, **kwargs))
137
137
  _check_tool_call_result_limit(result)
138
138
  if has_ever_edited:
139
+ serializable_kwargs = kwargs.copy()
140
+ if any_context_param_name is not None:
141
+ serializable_kwargs.pop(any_context_param_name, None)
139
142
  return {
140
143
  "tool_call_result": result,
141
- "new_tool_parameters": kwargs,
144
+ "new_tool_parameters": serializable_kwargs,
142
145
  "message": "User correction: Tool was called with user's parameters",
143
146
  }
144
147
  return result
zrb/task/llm/workflow.py CHANGED
@@ -3,9 +3,13 @@ import os
3
3
  from zrb.config.config import CFG
4
4
  from zrb.config.llm_context.config import llm_context_config
5
5
  from zrb.config.llm_context.workflow import LLMWorkflow
6
+ from zrb.context.any_context import AnyContext
7
+ from zrb.xcom.xcom import Xcom
6
8
 
9
+ LLM_LOADED_WORKFLOW_XCOM_NAME = "_llm_loaded_workflow_name"
7
10
 
8
- def load_workflow(workflow_name: str | list[str]) -> str:
11
+
12
+ def load_workflow(ctx: AnyContext, workflow_name: str | list[str]) -> str:
9
13
  """
10
14
  Loads and formats one or more workflow documents for LLM consumption.
11
15
 
@@ -36,34 +40,56 @@ def load_workflow(workflow_name: str | list[str]) -> str:
36
40
  ]
37
41
  )
38
42
  )
43
+ llm_loaded_workflow_xcom = get_llm_loaded_workflow_xcom(ctx)
44
+ llm_loaded_workflow_xcom.push(names)
39
45
  return "\n".join(contents)
40
46
 
41
47
 
48
+ def get_llm_loaded_workflow_xcom(ctx: AnyContext) -> Xcom:
49
+ if LLM_LOADED_WORKFLOW_XCOM_NAME not in ctx.xcom:
50
+ ctx.xcom[LLM_LOADED_WORKFLOW_XCOM_NAME] = Xcom([])
51
+ return ctx.xcom[LLM_LOADED_WORKFLOW_XCOM_NAME]
52
+
53
+
42
54
  def get_available_workflows() -> dict[str, LLMWorkflow]:
43
55
  available_workflows = {
44
56
  workflow_name.strip().lower(): workflow
45
57
  for workflow_name, workflow in llm_context_config.get_workflows().items()
46
58
  }
47
- # Define builtin workflow locations in order of precedence
48
- builtin_workflow_locations = [
49
- os.path.expanduser(additional_builtin_workflow_path)
50
- for additional_builtin_workflow_path in CFG.LLM_BUILTIN_WORKFLOW_PATHS
51
- if os.path.isdir(os.path.expanduser(additional_builtin_workflow_path))
52
- ]
53
- builtin_workflow_locations.append(
54
- os.path.join(os.path.dirname(__file__), "default_workflow")
59
+ workflow_hidden_folder = f".{CFG.ROOT_GROUP_NAME}"
60
+ # Define workflow locations in order of precedence
61
+ default_workflow_locations = (
62
+ [
63
+ # Project specific + user specific workflows
64
+ os.path.join(
65
+ os.path.dirname(__file__), workflow_hidden_folder, "workflows"
66
+ ),
67
+ os.path.join(os.path.dirname(__file__), workflow_hidden_folder, "skills"),
68
+ os.path.join(os.path.dirname(__file__), ".claude", "skills"),
69
+ os.path.join(os.path.expanduser("~"), workflow_hidden_folder, "workflows"),
70
+ os.path.join(os.path.expanduser("~"), workflow_hidden_folder, "skills"),
71
+ os.path.join(os.path.expanduser("~"), ".claude", "skills"),
72
+ ]
73
+ + [
74
+ # User defined builtin workflows
75
+ os.path.expanduser(additional_builtin_workflow_path)
76
+ for additional_builtin_workflow_path in CFG.LLM_BUILTIN_WORKFLOW_PATHS
77
+ if os.path.isdir(os.path.expanduser(additional_builtin_workflow_path))
78
+ ]
79
+ + [
80
+ # Zrb builtin workflows
81
+ os.path.join(os.path.dirname(__file__), "default_workflow"),
82
+ ]
55
83
  )
56
84
  # Load workflows from all locations
57
- for workflow_location in builtin_workflow_locations:
85
+ for workflow_location in default_workflow_locations:
58
86
  if not os.path.isdir(workflow_location):
59
87
  continue
60
88
  for workflow_name in os.listdir(workflow_location):
61
89
  workflow_dir = os.path.join(workflow_location, workflow_name)
62
- workflow_file = os.path.join(workflow_dir, "workflow.md")
63
- if not os.path.isfile(workflow_file):
64
- workflow_file = os.path.join(workflow_dir, "SKILL.md")
65
- if not os.path.isfile(path=workflow_file):
66
- continue
90
+ workflow_file = _get_workflow_file_name(workflow_dir)
91
+ if not workflow_file:
92
+ continue
67
93
  # Only add if not already defined (earlier locations have precedence)
68
94
  if workflow_name not in available_workflows:
69
95
  with open(workflow_file, "r") as f:
@@ -74,3 +100,16 @@ def get_available_workflows() -> dict[str, LLMWorkflow]:
74
100
  content=workflow_content,
75
101
  )
76
102
  return available_workflows
103
+
104
+
105
+ def _get_workflow_file_name(workflow_dir: str) -> str | None:
106
+ workflow_file = os.path.join(workflow_dir, "workflow.md")
107
+ if os.path.isfile(workflow_file):
108
+ return workflow_file
109
+ workflow_file = os.path.join(workflow_dir, "WORKFLOW.md")
110
+ if os.path.isfile(workflow_file):
111
+ return workflow_file
112
+ workflow_file = os.path.join(workflow_dir, "SKILL.md")
113
+ if os.path.isfile(workflow_file):
114
+ return workflow_file
115
+ return None
zrb/task/llm_task.py CHANGED
@@ -22,6 +22,7 @@ from zrb.task.llm.conversation_history import (
22
22
  write_conversation_history,
23
23
  )
24
24
  from zrb.task.llm.conversation_history_model import ConversationHistory
25
+ from zrb.task.llm.history_list import remove_system_prompt_and_instruction
25
26
  from zrb.task.llm.history_summarization import get_history_summarization_token_threshold
26
27
  from zrb.task.llm.prompt import (
27
28
  get_attachments,
@@ -81,7 +82,7 @@ class LLMTask(BaseTask):
81
82
  render_system_prompt: bool = False,
82
83
  special_instruction_prompt: "Callable[[AnyContext], str | None] | str | None" = None,
83
84
  render_special_instruction_prompt: bool = False,
84
- workflows: StrListAttr | None = None,
85
+ workflows: "Callable[[AnyContext], list[str] | None] | list[str] | None" = None,
85
86
  render_workflows: bool = True,
86
87
  message: StrAttr | None = None,
87
88
  attachment: "UserContent | list[UserContent] | Callable[[AnyContext], UserContent | list[UserContent]] | None" = None, # noqa
@@ -134,9 +135,6 @@ class LLMTask(BaseTask):
134
135
  upstream: list[AnyTask] | AnyTask | None = None,
135
136
  fallback: list[AnyTask] | AnyTask | None = None,
136
137
  successor: list[AnyTask] | AnyTask | None = None,
137
- conversation_context: (
138
- dict[str, Any] | Callable[[AnyContext], dict[str, Any]] | None
139
- ) = None,
140
138
  ):
141
139
  super().__init__(
142
140
  name=name,
@@ -204,7 +202,6 @@ class LLMTask(BaseTask):
204
202
  render_history_summarization_token_threshold
205
203
  )
206
204
  self._max_call_iteration = max_call_iteration
207
- self._conversation_context = conversation_context
208
205
  self._yolo_mode = yolo_mode
209
206
  self._render_yolo_mode = render_yolo_mode
210
207
  self._attachment = attachment
@@ -238,7 +235,58 @@ class LLMTask(BaseTask):
238
235
  self._yolo_mode = yolo_mode
239
236
 
240
237
  async def _exec_action(self, ctx: AnyContext) -> Any:
241
- # Get dependent configurations first
238
+ # 1. Get dependent configurations
239
+ (
240
+ model_settings,
241
+ model,
242
+ yolo_mode,
243
+ summarization_prompt,
244
+ user_message,
245
+ attachments,
246
+ ) = self._get_llm_config(ctx)
247
+
248
+ # 2. Prepare initial state (read history from previous session)
249
+ conversation_history = await read_conversation_history(
250
+ ctx=ctx,
251
+ conversation_history_reader=self._conversation_history_reader,
252
+ conversation_history_file_attr=self._conversation_history_file,
253
+ render_history_file=self._render_history_file,
254
+ conversation_history_attr=self._conversation_history,
255
+ )
256
+ inject_conversation_history_notes(conversation_history)
257
+ inject_subagent_conversation_history_into_ctx(ctx, conversation_history)
258
+
259
+ # 3. Get system prompt and user prompt
260
+ system_prompt, user_prompt = self._get_prompts(
261
+ ctx, user_message, conversation_history
262
+ )
263
+
264
+ # 4. Get the agent instance
265
+ ctx.log_info(f"SYSTEM PROMPT:\n{system_prompt}")
266
+ ctx.log_info(f"USER PROMPT:\n{user_prompt}")
267
+ agent = self._create_agent(
268
+ ctx,
269
+ model,
270
+ system_prompt,
271
+ model_settings,
272
+ yolo_mode,
273
+ summarization_prompt,
274
+ )
275
+
276
+ # 5. Run the agent iteration
277
+ result = await self._execute_agent(
278
+ ctx=ctx,
279
+ agent=agent,
280
+ user_prompt=user_prompt,
281
+ attachments=attachments,
282
+ conversation_history=conversation_history,
283
+ )
284
+
285
+ # 6. Save history and usage
286
+ await self._save_history_and_usage(ctx, conversation_history)
287
+ return result
288
+
289
+ def _get_llm_config(self, ctx: AnyContext):
242
290
  model_settings = get_model_settings(ctx, self._model_settings)
243
291
  model = get_model(
244
292
  ctx=ctx,
@@ -261,18 +309,22 @@ class LLMTask(BaseTask):
261
309
  )
262
310
  user_message = get_user_message(ctx, self._message, self._render_message)
263
311
  attachments = get_attachments(ctx, self._attachment)
264
- # 1. Prepare initial state (read history from previous session)
265
- conversation_history = await read_conversation_history(
266
- ctx=ctx,
267
- conversation_history_reader=self._conversation_history_reader,
268
- conversation_history_file_attr=self._conversation_history_file,
269
- render_history_file=self._render_history_file,
270
- conversation_history_attr=self._conversation_history,
312
+ return (
313
+ model_settings,
314
+ model,
315
+ yolo_mode,
316
+ summarization_prompt,
317
+ user_message,
318
+ attachments,
271
319
  )
272
- inject_conversation_history_notes(conversation_history)
273
- inject_subagent_conversation_history_into_ctx(ctx, conversation_history)
274
- # 2. Get system prompt and user prompt
275
- system_prompt, user_prompt = get_system_and_user_prompt(
320
+
321
+ def _get_prompts(
322
+ self,
323
+ ctx: AnyContext,
324
+ user_message: str,
325
+ conversation_history: ConversationHistory,
326
+ ):
327
+ return get_system_and_user_prompt(
276
328
  ctx=ctx,
277
329
  user_message=user_message,
278
330
  persona_attr=self._persona,
@@ -285,7 +337,16 @@ class LLMTask(BaseTask):
285
337
  render_workflows=self._render_workflows,
286
338
  conversation_history=conversation_history,
287
339
  )
288
- # 3. Summarization
340
+
341
+ def _create_agent(
342
+ self,
343
+ ctx: AnyContext,
344
+ model,
345
+ system_prompt,
346
+ model_settings,
347
+ yolo_mode,
348
+ summarization_prompt,
349
+ ):
289
350
  small_model = get_model(
290
351
  ctx=ctx,
291
352
  model_attr=self._small_model,
@@ -301,10 +362,7 @@ class LLMTask(BaseTask):
301
362
  self._history_summarization_token_threshold,
302
363
  self._render_history_summarization_token_threshold,
303
364
  )
304
- # 4. Get the agent instance
305
- ctx.log_info(f"SYSTEM PROMPT:\n{system_prompt}")
306
- ctx.log_info(f"USER PROMPT:\n{user_prompt}")
307
- agent = get_agent(
365
+ return get_agent(
308
366
  ctx=ctx,
309
367
  model=model,
310
368
  rate_limitter=self._rate_limitter,
@@ -322,15 +380,10 @@ class LLMTask(BaseTask):
322
380
  summarization_token_threshold=summarization_token_threshold,
323
381
  history_processors=[], # TODO: make this a property
324
382
  )
325
- # 5. Run the agent iteration and save the results/history
326
- result = await self._execute_agent(
327
- ctx=ctx,
328
- agent=agent,
329
- user_prompt=user_prompt,
330
- attachments=attachments,
331
- conversation_history=conversation_history,
332
- )
333
- # 6. Write conversation history
383
+
384
+ async def _save_history_and_usage(
385
+ self, ctx: AnyContext, conversation_history: ConversationHistory
386
+ ):
334
387
  conversation_history.subagent_history = (
335
388
  extract_subagent_conversation_history_from_ctx(ctx)
336
389
  )
@@ -341,7 +394,6 @@ class LLMTask(BaseTask):
341
394
  conversation_history_file_attr=self._conversation_history_file,
342
395
  render_history_file=self._render_history_file,
343
396
  )
344
- return result
345
397
 
346
398
  async def _execute_agent(
347
399
  self,
@@ -362,7 +414,9 @@ class LLMTask(BaseTask):
362
414
  rate_limitter=self._rate_limitter,
363
415
  )
364
416
  if agent_run and agent_run.result:
365
- new_history_list = json.loads(agent_run.result.all_messages_json())
417
+ new_history_list = remove_system_prompt_and_instruction(
418
+ json.loads(agent_run.result.all_messages_json())
419
+ )
366
420
  conversation_history.history = new_history_list
367
421
  xcom_usage_key = f"{self.name}-usage"
368
422
  if xcom_usage_key not in ctx.xcom:
zrb/task/rsync_task.py CHANGED
@@ -44,6 +44,7 @@ class RsyncTask(CmdTask):
44
44
  plain_print: bool = False,
45
45
  max_output_line: int = 1000,
46
46
  max_error_line: int = 1000,
47
+ execution_timeout: int = 3600,
47
48
  execute_condition: BoolAttr = True,
48
49
  retries: int = 2,
49
50
  retry_period: float = 0,
@@ -77,6 +78,7 @@ class RsyncTask(CmdTask):
77
78
  plain_print=plain_print,
78
79
  max_output_line=max_output_line,
79
80
  max_error_line=max_error_line,
81
+ execution_timeout=execution_timeout,
80
82
  execute_condition=execute_condition,
81
83
  retries=retries,
82
84
  retry_period=retry_period,
zrb/util/cmd/command.py CHANGED
@@ -5,7 +5,7 @@ import signal
5
5
  import sys
6
6
  from collections import deque
7
7
  from collections.abc import Callable
8
- from typing import TextIO
8
+ from typing import Any, TextIO
9
9
 
10
10
  import psutil
11
11
 
@@ -62,6 +62,8 @@ async def run_command(
62
62
  register_pid_method: Callable[[int], None] | None = None,
63
63
  max_output_line: int = 1000,
64
64
  max_error_line: int = 1000,
65
+ max_display_line: int | None = None,
66
+ timeout: int = 3600,
65
67
  is_interactive: bool = False,
66
68
  ) -> tuple[CmdResult, int]:
67
69
  """
@@ -77,6 +79,8 @@ async def run_command(
77
79
  actual_print_method = print_method if print_method is not None else print
78
80
  if cwd is None:
79
81
  cwd = os.getcwd()
82
+ if max_display_line is None:
83
+ max_display_line = max(max_output_line, max_error_line)
80
84
  # While environment variables alone weren't the fix, they are still
81
85
  # good practice for encouraging simpler output from tools.
82
86
  child_env = (env_map or os.environ).copy()
@@ -95,17 +99,33 @@ async def run_command(
95
99
  if register_pid_method is not None:
96
100
  register_pid_method(cmd_process.pid)
97
101
  # Use the new, simple, and correct stream reader.
102
+ display_lines = deque(maxlen=max_display_line if max_display_line > 0 else 0)
98
103
  stdout_task = asyncio.create_task(
99
- __read_stream(cmd_process.stdout, actual_print_method, max_output_line)
104
+ __read_stream(
105
+ cmd_process.stdout, actual_print_method, max_output_line, display_lines
106
+ )
100
107
  )
101
108
  stderr_task = asyncio.create_task(
102
- __read_stream(cmd_process.stderr, actual_print_method, max_error_line)
109
+ __read_stream(
110
+ cmd_process.stderr, actual_print_method, max_error_line, display_lines
111
+ )
112
+ )
113
+ timeout_task = (
114
+ asyncio.create_task(asyncio.sleep(timeout)) if timeout and timeout > 0 else None
103
115
  )
104
116
  try:
105
- return_code = await cmd_process.wait()
117
+ wait_task = asyncio.create_task(cmd_process.wait())
118
+ done, pending = await asyncio.wait(
119
+ {wait_task, timeout_task} if timeout_task else {wait_task},
120
+ return_when=asyncio.FIRST_COMPLETED,
121
+ )
122
+ if timeout_task and timeout_task in done:
123
+ raise asyncio.TimeoutError()
124
+ return_code = wait_task.result()
106
125
  stdout, stderr = await asyncio.gather(stdout_task, stderr_task)
107
- return CmdResult(stdout, stderr), return_code
108
- except (KeyboardInterrupt, asyncio.CancelledError):
126
+ display = "\r\n".join(display_lines)
127
+ return CmdResult(stdout, stderr, display=display), return_code
128
+ except (KeyboardInterrupt, asyncio.CancelledError, asyncio.TimeoutError):
109
129
  try:
110
130
  os.killpg(cmd_process.pid, signal.SIGINT)
111
131
  await asyncio.wait_for(cmd_process.wait(), timeout=2.0)
@@ -133,13 +153,14 @@ def __get_cmd_stdin(is_interactive: bool) -> int | TextIO:
133
153
  async def __read_stream(
134
154
  stream: asyncio.StreamReader,
135
155
  print_method: Callable[..., None],
136
- max_lines: int,
156
+ max_line: int,
157
+ display_queue: deque[Any],
137
158
  ) -> str:
138
159
  """
139
160
  Reads from the stream using the robust `readline()` and correctly
140
161
  interprets carriage returns (`\r`) as distinct print events.
141
162
  """
142
- captured_lines = deque(maxlen=max_lines if max_lines > 0 else 0)
163
+ captured_lines = deque(maxlen=max_line if max_line > 0 else 0)
143
164
  while True:
144
165
  try:
145
166
  line_bytes = await stream.readline()
@@ -149,8 +170,9 @@ async def __read_stream(
149
170
  # Safety valve for the memory limit.
150
171
  error_msg = "[ERROR] A single line of output was too long to process."
151
172
  print_method(error_msg)
152
- if max_lines > 0:
173
+ if max_line > 0:
153
174
  captured_lines.append(error_msg)
175
+ display_queue.append(error_msg)
154
176
  break
155
177
  except (KeyboardInterrupt, asyncio.CancelledError):
156
178
  raise
@@ -165,8 +187,9 @@ async def __read_stream(
165
187
  print_method(clean_part, end="\r\n")
166
188
  except Exception:
167
189
  print_method(clean_part)
168
- if max_lines > 0:
190
+ if max_line > 0:
169
191
  captured_lines.append(clean_part)
192
+ display_queue.append(clean_part)
170
193
  return "\r\n".join(captured_lines)
171
194
 
172
195