zrb 1.13.1__py3-none-any.whl → 1.21.33__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. zrb/__init__.py +2 -6
  2. zrb/attr/type.py +10 -7
  3. zrb/builtin/__init__.py +2 -0
  4. zrb/builtin/git.py +12 -1
  5. zrb/builtin/group.py +31 -15
  6. zrb/builtin/http.py +7 -8
  7. zrb/builtin/llm/attachment.py +40 -0
  8. zrb/builtin/llm/chat_completion.py +287 -0
  9. zrb/builtin/llm/chat_session.py +130 -144
  10. zrb/builtin/llm/chat_session_cmd.py +288 -0
  11. zrb/builtin/llm/chat_trigger.py +78 -0
  12. zrb/builtin/llm/history.py +4 -4
  13. zrb/builtin/llm/llm_ask.py +218 -110
  14. zrb/builtin/llm/tool/api.py +74 -62
  15. zrb/builtin/llm/tool/cli.py +56 -21
  16. zrb/builtin/llm/tool/code.py +57 -47
  17. zrb/builtin/llm/tool/file.py +292 -255
  18. zrb/builtin/llm/tool/note.py +84 -0
  19. zrb/builtin/llm/tool/rag.py +25 -18
  20. zrb/builtin/llm/tool/search/__init__.py +1 -0
  21. zrb/builtin/llm/tool/search/brave.py +66 -0
  22. zrb/builtin/llm/tool/search/searxng.py +61 -0
  23. zrb/builtin/llm/tool/search/serpapi.py +61 -0
  24. zrb/builtin/llm/tool/sub_agent.py +53 -26
  25. zrb/builtin/llm/tool/web.py +94 -157
  26. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
  27. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
  28. zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
  29. zrb/builtin/searxng/config/settings.yml +5671 -0
  30. zrb/builtin/searxng/start.py +21 -0
  31. zrb/builtin/setup/latex/ubuntu.py +1 -0
  32. zrb/builtin/setup/ubuntu.py +1 -1
  33. zrb/builtin/shell/autocomplete/bash.py +4 -3
  34. zrb/builtin/shell/autocomplete/zsh.py +4 -3
  35. zrb/config/config.py +297 -79
  36. zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
  37. zrb/config/default_prompt/interactive_system_prompt.md +25 -28
  38. zrb/config/default_prompt/persona.md +1 -1
  39. zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
  40. zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
  41. zrb/config/default_prompt/summarization_prompt.md +57 -16
  42. zrb/config/default_prompt/system_prompt.md +29 -25
  43. zrb/config/llm_config.py +129 -24
  44. zrb/config/llm_context/config.py +127 -90
  45. zrb/config/llm_context/config_parser.py +1 -7
  46. zrb/config/llm_context/workflow.py +81 -0
  47. zrb/config/llm_rate_limitter.py +100 -47
  48. zrb/context/any_shared_context.py +7 -1
  49. zrb/context/context.py +8 -2
  50. zrb/context/shared_context.py +6 -8
  51. zrb/group/any_group.py +12 -5
  52. zrb/group/group.py +67 -3
  53. zrb/input/any_input.py +5 -1
  54. zrb/input/base_input.py +18 -6
  55. zrb/input/option_input.py +13 -1
  56. zrb/input/text_input.py +7 -24
  57. zrb/runner/cli.py +21 -20
  58. zrb/runner/common_util.py +24 -19
  59. zrb/runner/web_route/task_input_api_route.py +5 -5
  60. zrb/runner/web_route/task_session_api_route.py +1 -4
  61. zrb/runner/web_util/user.py +7 -3
  62. zrb/session/any_session.py +12 -6
  63. zrb/session/session.py +39 -18
  64. zrb/task/any_task.py +24 -3
  65. zrb/task/base/context.py +17 -9
  66. zrb/task/base/execution.py +15 -8
  67. zrb/task/base/lifecycle.py +8 -4
  68. zrb/task/base/monitoring.py +12 -7
  69. zrb/task/base_task.py +69 -5
  70. zrb/task/base_trigger.py +12 -5
  71. zrb/task/llm/agent.py +130 -145
  72. zrb/task/llm/agent_runner.py +152 -0
  73. zrb/task/llm/config.py +45 -13
  74. zrb/task/llm/conversation_history.py +110 -29
  75. zrb/task/llm/conversation_history_model.py +4 -179
  76. zrb/task/llm/default_workflow/coding/workflow.md +41 -0
  77. zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
  78. zrb/task/llm/default_workflow/git/workflow.md +118 -0
  79. zrb/task/llm/default_workflow/golang/workflow.md +128 -0
  80. zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
  81. zrb/task/llm/default_workflow/java/workflow.md +146 -0
  82. zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
  83. zrb/task/llm/default_workflow/python/workflow.md +160 -0
  84. zrb/task/llm/default_workflow/researching/workflow.md +153 -0
  85. zrb/task/llm/default_workflow/rust/workflow.md +162 -0
  86. zrb/task/llm/default_workflow/shell/workflow.md +299 -0
  87. zrb/task/llm/file_replacement.py +206 -0
  88. zrb/task/llm/file_tool_model.py +57 -0
  89. zrb/task/llm/history_processor.py +206 -0
  90. zrb/task/llm/history_summarization.py +2 -192
  91. zrb/task/llm/print_node.py +192 -64
  92. zrb/task/llm/prompt.py +198 -153
  93. zrb/task/llm/subagent_conversation_history.py +41 -0
  94. zrb/task/llm/tool_confirmation_completer.py +41 -0
  95. zrb/task/llm/tool_wrapper.py +216 -55
  96. zrb/task/llm/workflow.py +76 -0
  97. zrb/task/llm_task.py +122 -70
  98. zrb/task/make_task.py +2 -3
  99. zrb/task/rsync_task.py +25 -10
  100. zrb/task/scheduler.py +4 -4
  101. zrb/util/attr.py +54 -39
  102. zrb/util/cli/markdown.py +12 -0
  103. zrb/util/cli/text.py +30 -0
  104. zrb/util/file.py +27 -11
  105. zrb/util/git.py +2 -2
  106. zrb/util/{llm/prompt.py → markdown.py} +2 -3
  107. zrb/util/string/conversion.py +1 -1
  108. zrb/util/truncate.py +23 -0
  109. zrb/util/yaml.py +204 -0
  110. zrb/xcom/xcom.py +10 -0
  111. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/METADATA +40 -20
  112. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/RECORD +114 -83
  113. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/WHEEL +1 -1
  114. zrb/task/llm/default_workflow/coding.md +0 -24
  115. zrb/task/llm/default_workflow/copywriting.md +0 -17
  116. zrb/task/llm/default_workflow/researching.md +0 -18
  117. {zrb-1.13.1.dist-info → zrb-1.21.33.dist-info}/entry_points.txt +0 -0
zrb/task/llm/prompt.py CHANGED
@@ -2,18 +2,138 @@ import os
2
2
  import platform
3
3
  import re
4
4
  from datetime import datetime, timezone
5
+ from typing import TYPE_CHECKING, Callable
5
6
 
6
7
  from zrb.attr.type import StrAttr, StrListAttr
7
- from zrb.config.llm_config import llm_config as llm_config
8
- from zrb.config.llm_context.config import llm_context_config
8
+ from zrb.config.llm_config import llm_config
9
9
  from zrb.context.any_context import AnyContext
10
10
  from zrb.task.llm.conversation_history_model import ConversationHistory
11
+ from zrb.task.llm.workflow import LLMWorkflow, get_available_workflows
11
12
  from zrb.util.attr import get_attr, get_str_attr, get_str_list_attr
12
13
  from zrb.util.file import read_dir, read_file_with_line_numbers
13
- from zrb.util.llm.prompt import make_prompt_section
14
+ from zrb.util.markdown import make_markdown_section
14
15
 
16
+ if TYPE_CHECKING:
17
+ from pydantic_ai.messages import UserContent
15
18
 
16
- def get_persona(
19
+
20
+ def get_system_and_user_prompt(
21
+ ctx: AnyContext,
22
+ user_message: str,
23
+ persona_attr: StrAttr | None = None,
24
+ render_persona: bool = False,
25
+ system_prompt_attr: StrAttr | None = None,
26
+ render_system_prompt: bool = False,
27
+ special_instruction_prompt_attr: StrAttr | None = None,
28
+ render_special_instruction_prompt: bool = False,
29
+ workflows_attr: StrListAttr | None = None,
30
+ render_workflows: bool = False,
31
+ conversation_history: ConversationHistory | None = None,
32
+ ) -> tuple[str, str]:
33
+ if conversation_history is None:
34
+ conversation_history = ConversationHistory()
35
+ new_user_message_prompt, apendixes = _get_user_message_prompt(user_message)
36
+ new_system_prompt = _construct_system_prompt(
37
+ ctx=ctx,
38
+ user_message=user_message,
39
+ apendixes=apendixes,
40
+ persona_attr=persona_attr,
41
+ render_persona=render_persona,
42
+ system_prompt_attr=system_prompt_attr,
43
+ render_system_prompt=render_system_prompt,
44
+ special_instruction_prompt_attr=special_instruction_prompt_attr,
45
+ render_special_instruction_prompt=render_special_instruction_prompt,
46
+ workflows_attr=workflows_attr,
47
+ render_workflows=render_workflows,
48
+ conversation_history=conversation_history,
49
+ )
50
+ return new_system_prompt, new_user_message_prompt
51
+
52
+
53
+ def _construct_system_prompt(
54
+ ctx: AnyContext,
55
+ user_message: str,
56
+ apendixes: str,
57
+ persona_attr: StrAttr | None = None,
58
+ render_persona: bool = False,
59
+ system_prompt_attr: StrAttr | None = None,
60
+ render_system_prompt: bool = False,
61
+ special_instruction_prompt_attr: StrAttr | None = None,
62
+ render_special_instruction_prompt: bool = False,
63
+ workflows_attr: StrListAttr | None = None,
64
+ render_workflows: bool = False,
65
+ conversation_history: ConversationHistory | None = None,
66
+ ) -> str:
67
+ persona = _get_persona(ctx, persona_attr, render_persona)
68
+ base_system_prompt = _get_base_system_prompt(
69
+ ctx, system_prompt_attr, render_system_prompt
70
+ )
71
+ special_instruction_prompt = _get_special_instruction_prompt(
72
+ ctx, special_instruction_prompt_attr, render_special_instruction_prompt
73
+ )
74
+ available_workflows = get_available_workflows()
75
+ active_workflow_names = set(
76
+ _get_active_workflow_names(ctx, workflows_attr, render_workflows)
77
+ )
78
+ active_workflow_prompt = _get_workflow_prompt(
79
+ available_workflows, active_workflow_names, True
80
+ )
81
+ inactive_workflow_prompt = _get_workflow_prompt(
82
+ available_workflows, active_workflow_names, False
83
+ )
84
+ if conversation_history is None:
85
+ conversation_history = ConversationHistory()
86
+ current_directory = os.getcwd()
87
+ iso_date = datetime.now(timezone.utc).astimezone().isoformat()
88
+ return "\n".join(
89
+ [
90
+ persona,
91
+ base_system_prompt,
92
+ make_markdown_section(
93
+ "📝 SPECIAL INSTRUCTION",
94
+ "\n".join(
95
+ [
96
+ special_instruction_prompt,
97
+ active_workflow_prompt,
98
+ ]
99
+ ),
100
+ ),
101
+ make_markdown_section("🛠️ AVAILABLE WORKFLOWS", inactive_workflow_prompt),
102
+ make_markdown_section(
103
+ "📚 CONTEXT",
104
+ "\n".join(
105
+ [
106
+ make_markdown_section(
107
+ "ℹ️ System Information",
108
+ "\n".join(
109
+ [
110
+ f"- OS: {platform.system()} {platform.version()}",
111
+ f"- Python Version: {platform.python_version()}",
112
+ f"- Current Directory: {current_directory}",
113
+ f"- Current Time: {iso_date}",
114
+ ]
115
+ ),
116
+ ),
117
+ make_markdown_section(
118
+ "🧠 Long Term Note Content",
119
+ conversation_history.long_term_note,
120
+ ),
121
+ make_markdown_section(
122
+ "📝 Contextual Note Content",
123
+ conversation_history.contextual_note,
124
+ ),
125
+ make_markdown_section(
126
+ "📄 Apendixes",
127
+ apendixes,
128
+ ),
129
+ ]
130
+ ),
131
+ ),
132
+ ]
133
+ )
134
+
135
+
136
+ def _get_persona(
17
137
  ctx: AnyContext,
18
138
  persona_attr: StrAttr | None,
19
139
  render_persona: bool,
@@ -30,7 +150,7 @@ def get_persona(
30
150
  return llm_config.default_persona or ""
31
151
 
32
152
 
33
- def get_base_system_prompt(
153
+ def _get_base_system_prompt(
34
154
  ctx: AnyContext,
35
155
  system_prompt_attr: StrAttr | None,
36
156
  render_system_prompt: bool,
@@ -47,7 +167,7 @@ def get_base_system_prompt(
47
167
  return llm_config.default_system_prompt or ""
48
168
 
49
169
 
50
- def get_special_instruction_prompt(
170
+ def _get_special_instruction_prompt(
51
171
  ctx: AnyContext,
52
172
  special_instruction_prompt_attr: StrAttr | None,
53
173
  render_spcecial_instruction_prompt: bool,
@@ -64,138 +184,57 @@ def get_special_instruction_prompt(
64
184
  return llm_config.default_special_instruction_prompt
65
185
 
66
186
 
67
- def get_modes(
187
+ def _get_active_workflow_names(
68
188
  ctx: AnyContext,
69
- modes_attr: StrAttr | None,
70
- render_modes: bool,
71
- ) -> str:
72
- """Gets the modes, prioritizing task-specific, then default."""
73
- raw_modes = get_str_list_attr(
189
+ workflows_attr: StrListAttr | None,
190
+ render_workflows: bool,
191
+ ) -> list[str]:
192
+ """Gets the workflows, prioritizing task-specific, then default."""
193
+ raw_workflows = get_str_list_attr(
74
194
  ctx,
75
- modes_attr,
76
- auto_render=render_modes,
195
+ [] if workflows_attr is None else workflows_attr,
196
+ auto_render=render_workflows,
77
197
  )
78
- if raw_modes is None:
79
- raw_modes = []
80
- modes = [mode.strip() for mode in raw_modes if mode.strip() != ""]
81
- if len(modes) > 0:
82
- return modes
83
- return llm_config.default_modes or []
198
+ if raw_workflows is not None and len(raw_workflows) > 0:
199
+ return [w.strip().lower() for w in raw_workflows if w.strip() != ""]
200
+ return []
84
201
 
85
202
 
86
- def get_workflow_prompt(
87
- ctx: AnyContext,
88
- modes_attr: StrAttr | None,
89
- render_modes: bool,
203
+ def _get_workflow_prompt(
204
+ available_workflows: dict[str, LLMWorkflow],
205
+ active_workflow_names: list[str] | set[str],
206
+ select_active_workflow: bool,
90
207
  ) -> str:
91
- modes = get_modes(ctx, modes_attr, render_modes)
92
- # Get user-defined workflows
93
- workflows = {
94
- workflow_name: content
95
- for workflow_name, content in llm_context_config.get_workflows().items()
96
- if workflow_name in modes
208
+ selected_workflows = {
209
+ workflow_name: available_workflows[workflow_name]
210
+ for workflow_name in available_workflows
211
+ if (workflow_name in active_workflow_names) == select_active_workflow
97
212
  }
98
- # Get requested builtin-workflow names
99
- requested_builtin_workflow_names = [
100
- workflow_name
101
- for workflow_name in ("coding", "copywriting", "researching")
102
- if workflow_name in modes and workflow_name not in workflows
103
- ]
104
- # add builtin-workflows if requested
105
- if len(requested_builtin_workflow_names) > 0:
106
- dir_path = os.path.dirname(__file__)
107
- for workflow_name in requested_builtin_workflow_names:
108
- workflow_file_path = os.path.join(
109
- dir_path, "default_workflow", f"{workflow_name}.md"
110
- )
111
- with open(workflow_file_path, "r") as f:
112
- workflows[workflow_name] = f.read()
113
213
  return "\n".join(
114
214
  [
115
- make_prompt_section(header.capitalize(), content)
116
- for header, content in workflows.items()
117
- if header.lower() in modes
118
- ]
119
- )
120
-
121
-
122
- def get_system_and_user_prompt(
123
- ctx: AnyContext,
124
- user_message: str,
125
- persona_attr: StrAttr | None = None,
126
- render_persona: bool = False,
127
- system_prompt_attr: StrAttr | None = None,
128
- render_system_prompt: bool = False,
129
- special_instruction_prompt_attr: StrAttr | None = None,
130
- render_special_instruction_prompt: bool = False,
131
- modes_attr: StrListAttr | None = None,
132
- render_modes: bool = False,
133
- conversation_history: ConversationHistory | None = None,
134
- ) -> tuple[str, str]:
135
- """Combines persona, base system prompt, and special instructions."""
136
- persona = get_persona(ctx, persona_attr, render_persona)
137
- base_system_prompt = get_base_system_prompt(
138
- ctx, system_prompt_attr, render_system_prompt
139
- )
140
- special_instruction_prompt = get_special_instruction_prompt(
141
- ctx, special_instruction_prompt_attr, render_special_instruction_prompt
142
- )
143
- workflow_prompt = get_workflow_prompt(ctx, modes_attr, render_modes)
144
- if conversation_history is None:
145
- conversation_history = ConversationHistory()
146
- conversation_context, new_user_message = extract_conversation_context(user_message)
147
- new_system_prompt = "\n".join(
148
- [
149
- make_prompt_section("Persona", persona),
150
- make_prompt_section("System Prompt", base_system_prompt),
151
- make_prompt_section("Special Instruction", special_instruction_prompt),
152
- make_prompt_section("Special Workflows", workflow_prompt),
153
- make_prompt_section(
154
- "Past Conversation",
155
- "\n".join(
156
- [
157
- make_prompt_section(
158
- "Summary",
159
- conversation_history.past_conversation_summary,
160
- as_code=True,
161
- ),
162
- make_prompt_section(
163
- "Last Transcript",
164
- conversation_history.past_conversation_transcript,
165
- as_code=True,
166
- ),
167
- ]
215
+ make_markdown_section(
216
+ workflow_name.capitalize(),
217
+ (
218
+ (
219
+ "> Workflow status: Automatically Loaded/Activated.\n"
220
+ f"> Workflow location: `{workflow.path}`\n"
221
+ "{workflow.content}"
222
+ )
223
+ if select_active_workflow
224
+ else f"Workflow name: {workflow_name}\n{workflow.description}"
168
225
  ),
169
- ),
170
- make_prompt_section(
171
- "Notes",
172
- "\n".join(
173
- [
174
- make_prompt_section(
175
- "Long Term",
176
- conversation_history.long_term_note,
177
- as_code=True,
178
- ),
179
- make_prompt_section(
180
- "Contextual",
181
- conversation_history.contextual_note,
182
- as_code=True,
183
- ),
184
- ]
185
- ),
186
- ),
187
- make_prompt_section("Conversation Context", conversation_context),
226
+ )
227
+ for workflow_name, workflow in selected_workflows.items()
188
228
  ]
189
229
  )
190
- return new_system_prompt, new_user_message
191
230
 
192
231
 
193
- def extract_conversation_context(user_message: str) -> tuple[str, str]:
194
- modified_user_message = user_message
232
+ def _get_user_message_prompt(user_message: str) -> tuple[str, str]:
233
+ processed_user_message = user_message
195
234
  # Match “@” + any non-space/comma sequence that contains at least one “/”
196
- pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\s]+)"
235
+ pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\?\!\s]+)"
197
236
  potential_resource_path = re.findall(pattern, user_message)
198
- apendixes = []
237
+ apendix_list = []
199
238
  for i, ref in enumerate(potential_resource_path):
200
239
  resource_path = os.path.abspath(os.path.expanduser(ref))
201
240
  content = ""
@@ -208,44 +247,32 @@ def extract_conversation_context(user_message: str) -> tuple[str, str]:
208
247
  ref_type = "directory"
209
248
  if content != "":
210
249
  # Replace the @-reference in the user message with the placeholder
211
- placeholder = f"[Reference {i+1}: {os.path.basename(ref)}]"
212
- modified_user_message = modified_user_message.replace(
250
+ placeholder = f"[Reference {i+1}: `{os.path.basename(ref)}`]"
251
+ processed_user_message = processed_user_message.replace(
213
252
  f"@{ref}", placeholder, 1
214
253
  )
215
- apendixes.append(
216
- make_prompt_section(
217
- f"{placeholder} ({ref_type} path: `{resource_path}`)",
218
- content,
254
+ apendix_list.append(
255
+ make_markdown_section(
256
+ f"Content of {placeholder} ({ref_type} path: `{resource_path}`)",
257
+ "\n".join(content) if isinstance(content, list) else content,
219
258
  as_code=True,
220
259
  )
221
260
  )
222
- conversation_context = "\n".join(
223
- [
224
- make_prompt_section("Current OS", platform.system()),
225
- make_prompt_section("OS Version", platform.version()),
226
- make_prompt_section("Python Version", platform.python_version()),
227
- ]
228
- )
229
- iso_date = datetime.now(timezone.utc).astimezone().isoformat()
261
+ apendixes = "\n".join(apendix_list)
230
262
  current_directory = os.getcwd()
231
- modified_user_message = "\n".join(
232
- [
233
- make_prompt_section("User Message", modified_user_message),
234
- make_prompt_section(
235
- "Context",
236
- "\n".join(
237
- [
238
- make_prompt_section(
239
- "Current working directory", current_directory
240
- ),
241
- make_prompt_section("Current time", iso_date),
242
- make_prompt_section("Apendixes", "\n".join(apendixes)),
243
- ]
244
- ),
245
- ),
246
- ]
263
+ iso_date = datetime.now(timezone.utc).astimezone().isoformat()
264
+ modified_user_message = make_markdown_section(
265
+ "User Request",
266
+ "\n".join(
267
+ [
268
+ f"- Current Directory: {current_directory}",
269
+ f"- Current Time: {iso_date}",
270
+ "---",
271
+ processed_user_message,
272
+ ]
273
+ ),
247
274
  )
248
- return conversation_context, modified_user_message
275
+ return modified_user_message, apendixes
249
276
 
250
277
 
251
278
  def get_user_message(
@@ -274,3 +301,21 @@ def get_summarization_system_prompt(
274
301
  if summarization_prompt is not None:
275
302
  return summarization_prompt
276
303
  return llm_config.default_summarization_prompt
304
+
305
+
306
+ def get_attachments(
307
+ ctx: AnyContext,
308
+ attachment: "UserContent | list[UserContent] | Callable[[AnyContext], UserContent | list[UserContent]] | None" = None, # noqa
309
+ ) -> "list[UserContent]":
310
+ if attachment is None:
311
+ return []
312
+ if callable(attachment):
313
+ result = attachment(ctx)
314
+ if result is None:
315
+ return []
316
+ if isinstance(result, list):
317
+ return result
318
+ return [result]
319
+ if isinstance(attachment, list):
320
+ return attachment
321
+ return [attachment]
@@ -0,0 +1,41 @@
1
+ from zrb.context.any_context import AnyContext
2
+ from zrb.task.llm.conversation_history_model import ConversationHistory
3
+ from zrb.task.llm.typing import ListOfDict
4
+ from zrb.xcom.xcom import Xcom
5
+
6
+
7
+ def inject_subagent_conversation_history_into_ctx(
8
+ ctx: AnyContext, conversation_history: ConversationHistory
9
+ ):
10
+ subagent_messages_xcom = _get_global_subagent_history_xcom(ctx)
11
+ existing_subagent_history = subagent_messages_xcom.get({})
12
+ subagent_messages_xcom.set(
13
+ {**existing_subagent_history, **conversation_history.subagent_history}
14
+ )
15
+
16
+
17
+ def extract_subagent_conversation_history_from_ctx(
18
+ ctx: AnyContext,
19
+ ) -> dict[str, ListOfDict]:
20
+ subagent_messsages_xcom = _get_global_subagent_history_xcom(ctx)
21
+ return subagent_messsages_xcom.get({})
22
+
23
+
24
+ def get_ctx_subagent_history(ctx: AnyContext, subagent_name: str) -> ListOfDict:
25
+ subagent_history = extract_subagent_conversation_history_from_ctx(ctx)
26
+ return subagent_history.get(subagent_name, [])
27
+
28
+
29
+ def set_ctx_subagent_history(ctx: AnyContext, subagent_name: str, messages: ListOfDict):
30
+ subagent_history = extract_subagent_conversation_history_from_ctx(ctx)
31
+ subagent_history[subagent_name] = messages
32
+ subagent_messages_xcom = _get_global_subagent_history_xcom(ctx)
33
+ subagent_messages_xcom.set(subagent_history)
34
+
35
+
36
+ def _get_global_subagent_history_xcom(ctx: AnyContext) -> Xcom:
37
+ if "_global_subagents" not in ctx.xcom:
38
+ ctx.xcom["_global_subagents"] = Xcom([{}])
39
+ if not isinstance(ctx.xcom["_global_subagents"], Xcom):
40
+ raise ValueError("ctx.xcom._global_subagents must be an Xcom")
41
+ return ctx.xcom["_global_subagents"]
@@ -0,0 +1,41 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ if TYPE_CHECKING:
4
+ from prompt_toolkit.completion import Completer
5
+
6
+
7
+ def get_tool_confirmation_completer(
8
+ options: list[str], meta_dict: dict[str, str]
9
+ ) -> "Completer":
10
+ from prompt_toolkit.completion import Completer, Completion
11
+
12
+ class ToolConfirmationCompleter(Completer):
13
+ """Custom completer for tool confirmation that doesn't auto-complete partial words."""
14
+
15
+ def __init__(self, options, meta_dict):
16
+ self.options = options
17
+ self.meta_dict = meta_dict
18
+
19
+ def get_completions(self, document, complete_event):
20
+ text = document.text.strip()
21
+ # 1. Input is empty, OR
22
+ # 2. Input exactly matches the beginning of an option
23
+ if text == "":
24
+ # Show all options when nothing is typed
25
+ for option in self.options:
26
+ yield Completion(
27
+ option,
28
+ start_position=0,
29
+ display_meta=self.meta_dict.get(option, ""),
30
+ )
31
+ return
32
+ # Only complete if text exactly matches the beginning of an option
33
+ for option in self.options:
34
+ if option.startswith(text):
35
+ yield Completion(
36
+ option,
37
+ start_position=-len(text),
38
+ display_meta=self.meta_dict.get(option, ""),
39
+ )
40
+
41
+ return ToolConfirmationCompleter(options, meta_dict)