zrb 1.15.3__py3-none-any.whl → 1.21.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of zrb might be problematic. Click here for more details.

Files changed (108) hide show
  1. zrb/__init__.py +2 -6
  2. zrb/attr/type.py +10 -7
  3. zrb/builtin/__init__.py +2 -0
  4. zrb/builtin/git.py +12 -1
  5. zrb/builtin/group.py +31 -15
  6. zrb/builtin/llm/attachment.py +40 -0
  7. zrb/builtin/llm/chat_completion.py +274 -0
  8. zrb/builtin/llm/chat_session.py +126 -167
  9. zrb/builtin/llm/chat_session_cmd.py +288 -0
  10. zrb/builtin/llm/chat_trigger.py +79 -0
  11. zrb/builtin/llm/history.py +4 -4
  12. zrb/builtin/llm/llm_ask.py +217 -135
  13. zrb/builtin/llm/tool/api.py +74 -70
  14. zrb/builtin/llm/tool/cli.py +35 -21
  15. zrb/builtin/llm/tool/code.py +55 -73
  16. zrb/builtin/llm/tool/file.py +278 -344
  17. zrb/builtin/llm/tool/note.py +84 -0
  18. zrb/builtin/llm/tool/rag.py +27 -34
  19. zrb/builtin/llm/tool/sub_agent.py +54 -41
  20. zrb/builtin/llm/tool/web.py +74 -98
  21. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
  22. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
  23. zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
  24. zrb/builtin/searxng/config/settings.yml +5671 -0
  25. zrb/builtin/searxng/start.py +21 -0
  26. zrb/builtin/shell/autocomplete/bash.py +4 -3
  27. zrb/builtin/shell/autocomplete/zsh.py +4 -3
  28. zrb/config/config.py +202 -27
  29. zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
  30. zrb/config/default_prompt/interactive_system_prompt.md +24 -30
  31. zrb/config/default_prompt/persona.md +1 -1
  32. zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
  33. zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
  34. zrb/config/default_prompt/summarization_prompt.md +57 -16
  35. zrb/config/default_prompt/system_prompt.md +36 -30
  36. zrb/config/llm_config.py +119 -23
  37. zrb/config/llm_context/config.py +127 -90
  38. zrb/config/llm_context/config_parser.py +1 -7
  39. zrb/config/llm_context/workflow.py +81 -0
  40. zrb/config/llm_rate_limitter.py +100 -47
  41. zrb/context/any_shared_context.py +7 -1
  42. zrb/context/context.py +8 -2
  43. zrb/context/shared_context.py +3 -7
  44. zrb/group/any_group.py +3 -3
  45. zrb/group/group.py +3 -3
  46. zrb/input/any_input.py +5 -1
  47. zrb/input/base_input.py +18 -6
  48. zrb/input/option_input.py +13 -1
  49. zrb/input/text_input.py +7 -24
  50. zrb/runner/cli.py +21 -20
  51. zrb/runner/common_util.py +24 -19
  52. zrb/runner/web_route/task_input_api_route.py +5 -5
  53. zrb/runner/web_util/user.py +7 -3
  54. zrb/session/any_session.py +12 -6
  55. zrb/session/session.py +39 -18
  56. zrb/task/any_task.py +24 -3
  57. zrb/task/base/context.py +17 -9
  58. zrb/task/base/execution.py +15 -8
  59. zrb/task/base/lifecycle.py +8 -4
  60. zrb/task/base/monitoring.py +12 -7
  61. zrb/task/base_task.py +69 -5
  62. zrb/task/base_trigger.py +12 -5
  63. zrb/task/llm/agent.py +128 -167
  64. zrb/task/llm/agent_runner.py +152 -0
  65. zrb/task/llm/config.py +39 -20
  66. zrb/task/llm/conversation_history.py +110 -29
  67. zrb/task/llm/conversation_history_model.py +4 -179
  68. zrb/task/llm/default_workflow/coding/workflow.md +41 -0
  69. zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
  70. zrb/task/llm/default_workflow/git/workflow.md +118 -0
  71. zrb/task/llm/default_workflow/golang/workflow.md +128 -0
  72. zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
  73. zrb/task/llm/default_workflow/java/workflow.md +146 -0
  74. zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
  75. zrb/task/llm/default_workflow/python/workflow.md +160 -0
  76. zrb/task/llm/default_workflow/researching/workflow.md +153 -0
  77. zrb/task/llm/default_workflow/rust/workflow.md +162 -0
  78. zrb/task/llm/default_workflow/shell/workflow.md +299 -0
  79. zrb/task/llm/file_replacement.py +206 -0
  80. zrb/task/llm/file_tool_model.py +57 -0
  81. zrb/task/llm/history_processor.py +206 -0
  82. zrb/task/llm/history_summarization.py +2 -193
  83. zrb/task/llm/print_node.py +184 -64
  84. zrb/task/llm/prompt.py +175 -179
  85. zrb/task/llm/subagent_conversation_history.py +41 -0
  86. zrb/task/llm/tool_wrapper.py +226 -85
  87. zrb/task/llm/workflow.py +76 -0
  88. zrb/task/llm_task.py +109 -71
  89. zrb/task/make_task.py +2 -3
  90. zrb/task/rsync_task.py +25 -10
  91. zrb/task/scheduler.py +4 -4
  92. zrb/util/attr.py +54 -39
  93. zrb/util/cli/markdown.py +12 -0
  94. zrb/util/cli/text.py +30 -0
  95. zrb/util/file.py +12 -3
  96. zrb/util/git.py +2 -2
  97. zrb/util/{llm/prompt.py → markdown.py} +2 -3
  98. zrb/util/string/conversion.py +1 -1
  99. zrb/util/truncate.py +23 -0
  100. zrb/util/yaml.py +204 -0
  101. zrb/xcom/xcom.py +10 -0
  102. {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/METADATA +38 -18
  103. {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/RECORD +105 -79
  104. {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/WHEEL +1 -1
  105. zrb/task/llm/default_workflow/coding.md +0 -24
  106. zrb/task/llm/default_workflow/copywriting.md +0 -17
  107. zrb/task/llm/default_workflow/researching.md +0 -18
  108. {zrb-1.15.3.dist-info → zrb-1.21.29.dist-info}/entry_points.txt +0 -0
zrb/task/llm/prompt.py CHANGED
@@ -5,20 +5,135 @@ from datetime import datetime, timezone
5
5
  from typing import TYPE_CHECKING, Callable
6
6
 
7
7
  from zrb.attr.type import StrAttr, StrListAttr
8
- from zrb.config.llm_config import llm_config as llm_config
9
- from zrb.config.llm_context.config import llm_context_config
8
+ from zrb.config.llm_config import llm_config
10
9
  from zrb.context.any_context import AnyContext
11
- from zrb.context.any_shared_context import AnySharedContext
12
10
  from zrb.task.llm.conversation_history_model import ConversationHistory
11
+ from zrb.task.llm.workflow import LLMWorkflow, get_available_workflows
13
12
  from zrb.util.attr import get_attr, get_str_attr, get_str_list_attr
14
13
  from zrb.util.file import read_dir, read_file_with_line_numbers
15
- from zrb.util.llm.prompt import make_prompt_section
14
+ from zrb.util.markdown import make_markdown_section
16
15
 
17
16
  if TYPE_CHECKING:
18
17
  from pydantic_ai.messages import UserContent
19
18
 
20
19
 
21
- def get_persona(
20
+ def get_system_and_user_prompt(
21
+ ctx: AnyContext,
22
+ user_message: str,
23
+ persona_attr: StrAttr | None = None,
24
+ render_persona: bool = False,
25
+ system_prompt_attr: StrAttr | None = None,
26
+ render_system_prompt: bool = False,
27
+ special_instruction_prompt_attr: StrAttr | None = None,
28
+ render_special_instruction_prompt: bool = False,
29
+ workflows_attr: StrListAttr | None = None,
30
+ render_workflows: bool = False,
31
+ conversation_history: ConversationHistory | None = None,
32
+ ) -> tuple[str, str]:
33
+ if conversation_history is None:
34
+ conversation_history = ConversationHistory()
35
+ new_user_message_prompt, apendixes = _get_user_message_prompt(user_message)
36
+ new_system_prompt = _construct_system_prompt(
37
+ ctx=ctx,
38
+ user_message=user_message,
39
+ apendixes=apendixes,
40
+ persona_attr=persona_attr,
41
+ render_persona=render_persona,
42
+ system_prompt_attr=system_prompt_attr,
43
+ render_system_prompt=render_system_prompt,
44
+ special_instruction_prompt_attr=special_instruction_prompt_attr,
45
+ render_special_instruction_prompt=render_special_instruction_prompt,
46
+ workflows_attr=workflows_attr,
47
+ render_workflows=render_workflows,
48
+ conversation_history=conversation_history,
49
+ )
50
+ return new_system_prompt, new_user_message_prompt
51
+
52
+
53
+ def _construct_system_prompt(
54
+ ctx: AnyContext,
55
+ user_message: str,
56
+ apendixes: str,
57
+ persona_attr: StrAttr | None = None,
58
+ render_persona: bool = False,
59
+ system_prompt_attr: StrAttr | None = None,
60
+ render_system_prompt: bool = False,
61
+ special_instruction_prompt_attr: StrAttr | None = None,
62
+ render_special_instruction_prompt: bool = False,
63
+ workflows_attr: StrListAttr | None = None,
64
+ render_workflows: bool = False,
65
+ conversation_history: ConversationHistory | None = None,
66
+ ) -> str:
67
+ persona = _get_persona(ctx, persona_attr, render_persona)
68
+ base_system_prompt = _get_base_system_prompt(
69
+ ctx, system_prompt_attr, render_system_prompt
70
+ )
71
+ special_instruction_prompt = _get_special_instruction_prompt(
72
+ ctx, special_instruction_prompt_attr, render_special_instruction_prompt
73
+ )
74
+ available_workflows = get_available_workflows()
75
+ active_workflow_names = set(
76
+ _get_active_workflow_names(ctx, workflows_attr, render_workflows)
77
+ )
78
+ active_workflow_prompt = _get_workflow_prompt(
79
+ available_workflows, active_workflow_names, True
80
+ )
81
+ inactive_workflow_prompt = _get_workflow_prompt(
82
+ available_workflows, active_workflow_names, False
83
+ )
84
+ if conversation_history is None:
85
+ conversation_history = ConversationHistory()
86
+ current_directory = os.getcwd()
87
+ iso_date = datetime.now(timezone.utc).astimezone().isoformat()
88
+ return "\n".join(
89
+ [
90
+ persona,
91
+ base_system_prompt,
92
+ make_markdown_section(
93
+ "📝 SPECIAL INSTRUCTION",
94
+ "\n".join(
95
+ [
96
+ special_instruction_prompt,
97
+ active_workflow_prompt,
98
+ ]
99
+ ),
100
+ ),
101
+ make_markdown_section("🛠️ AVAILABLE WORKFLOWS", inactive_workflow_prompt),
102
+ make_markdown_section(
103
+ "📚 CONTEXT",
104
+ "\n".join(
105
+ [
106
+ make_markdown_section(
107
+ "ℹ️ System Information",
108
+ "\n".join(
109
+ [
110
+ f"- OS: {platform.system()} {platform.version()}",
111
+ f"- Python Version: {platform.python_version()}",
112
+ f"- Current Directory: {current_directory}",
113
+ f"- Current Time: {iso_date}",
114
+ ]
115
+ ),
116
+ ),
117
+ make_markdown_section(
118
+ "🧠 Long Term Note Content",
119
+ conversation_history.long_term_note,
120
+ ),
121
+ make_markdown_section(
122
+ "📝 Contextual Note Content",
123
+ conversation_history.contextual_note,
124
+ ),
125
+ make_markdown_section(
126
+ "📄 Apendixes",
127
+ apendixes,
128
+ ),
129
+ ]
130
+ ),
131
+ ),
132
+ ]
133
+ )
134
+
135
+
136
+ def _get_persona(
22
137
  ctx: AnyContext,
23
138
  persona_attr: StrAttr | None,
24
139
  render_persona: bool,
@@ -35,7 +150,7 @@ def get_persona(
35
150
  return llm_config.default_persona or ""
36
151
 
37
152
 
38
- def get_base_system_prompt(
153
+ def _get_base_system_prompt(
39
154
  ctx: AnyContext,
40
155
  system_prompt_attr: StrAttr | None,
41
156
  render_system_prompt: bool,
@@ -52,7 +167,7 @@ def get_base_system_prompt(
52
167
  return llm_config.default_system_prompt or ""
53
168
 
54
169
 
55
- def get_special_instruction_prompt(
170
+ def _get_special_instruction_prompt(
56
171
  ctx: AnyContext,
57
172
  special_instruction_prompt_attr: StrAttr | None,
58
173
  render_spcecial_instruction_prompt: bool,
@@ -69,164 +184,57 @@ def get_special_instruction_prompt(
69
184
  return llm_config.default_special_instruction_prompt
70
185
 
71
186
 
72
- def get_modes(
187
+ def _get_active_workflow_names(
73
188
  ctx: AnyContext,
74
- modes_attr: StrListAttr | None,
75
- render_modes: bool,
189
+ workflows_attr: StrListAttr | None,
190
+ render_workflows: bool,
76
191
  ) -> list[str]:
77
- """Gets the modes, prioritizing task-specific, then default."""
78
- raw_modes = get_str_list_attr(
192
+ """Gets the workflows, prioritizing task-specific, then default."""
193
+ raw_workflows = get_str_list_attr(
79
194
  ctx,
80
- [] if modes_attr is None else modes_attr,
81
- auto_render=render_modes,
195
+ [] if workflows_attr is None else workflows_attr,
196
+ auto_render=render_workflows,
82
197
  )
83
- if raw_modes is None:
84
- raw_modes = []
85
- modes = [mode.strip().lower() for mode in raw_modes if mode.strip() != ""]
86
- if len(modes) > 0:
87
- return modes
88
- return llm_config.default_modes or []
198
+ if raw_workflows is not None and len(raw_workflows) > 0:
199
+ return [w.strip().lower() for w in raw_workflows if w.strip() != ""]
200
+ return []
89
201
 
90
202
 
91
- def get_workflow_prompt(
92
- ctx: AnyContext,
93
- modes_attr: StrListAttr | None,
94
- render_modes: bool,
203
+ def _get_workflow_prompt(
204
+ available_workflows: dict[str, LLMWorkflow],
205
+ active_workflow_names: list[str] | set[str],
206
+ select_active_workflow: bool,
95
207
  ) -> str:
96
- builtin_workflow_dir = os.path.join(os.path.dirname(__file__), "default_workflow")
97
- modes = set(get_modes(ctx, modes_attr, render_modes))
98
-
99
- # Get user-defined workflows
100
- workflows = {
101
- workflow_name.strip().lower(): content
102
- for workflow_name, content in llm_context_config.get_workflows().items()
103
- if workflow_name.strip().lower() in modes
208
+ selected_workflows = {
209
+ workflow_name: available_workflows[workflow_name]
210
+ for workflow_name in available_workflows
211
+ if (workflow_name in active_workflow_names) == select_active_workflow
104
212
  }
105
-
106
- # Get available builtin workflow names from the file system
107
- available_builtin_workflow_names = set()
108
- try:
109
- for filename in os.listdir(builtin_workflow_dir):
110
- if filename.endswith(".md"):
111
- available_builtin_workflow_names.add(filename[:-3].lower())
112
- except FileNotFoundError:
113
- # Handle case where the directory might not exist
114
- ctx.log_error(
115
- f"Warning: Default workflow directory not found at {builtin_workflow_dir}"
116
- )
117
- except Exception as e:
118
- # Catch other potential errors during directory listing
119
- ctx.log_error(f"Error listing default workflows: {e}")
120
-
121
- # Determine which builtin workflows are requested and not already loaded
122
- requested_builtin_workflow_names = [
123
- workflow_name
124
- for workflow_name in available_builtin_workflow_names
125
- if workflow_name in modes and workflow_name not in workflows
126
- ]
127
-
128
- # Add builtin-workflows if requested
129
- if len(requested_builtin_workflow_names) > 0:
130
- for workflow_name in requested_builtin_workflow_names:
131
- workflow_file_path = os.path.join(
132
- builtin_workflow_dir, f"{workflow_name}.md"
133
- )
134
- try:
135
- with open(workflow_file_path, "r") as f:
136
- workflows[workflow_name] = f.read()
137
- except FileNotFoundError:
138
- ctx.log_error(
139
- f"Warning: Builtin workflow file not found: {workflow_file_path}"
140
- )
141
- except Exception as e:
142
- ctx.log_error(f"Error reading builtin workflow {workflow_name}: {e}")
143
-
144
213
  return "\n".join(
145
214
  [
146
- make_prompt_section(header.capitalize(), content)
147
- for header, content in workflows.items()
148
- if header.lower() in modes
149
- ]
150
- )
151
-
152
-
153
- def get_system_and_user_prompt(
154
- ctx: AnyContext,
155
- user_message: str,
156
- persona_attr: StrAttr | None = None,
157
- render_persona: bool = False,
158
- system_prompt_attr: StrAttr | None = None,
159
- render_system_prompt: bool = False,
160
- special_instruction_prompt_attr: StrAttr | None = None,
161
- render_special_instruction_prompt: bool = False,
162
- modes_attr: StrListAttr | None = None,
163
- render_modes: bool = False,
164
- conversation_history: ConversationHistory | None = None,
165
- ) -> tuple[str, str]:
166
- """Combines persona, base system prompt, and special instructions."""
167
- persona = get_persona(ctx, persona_attr, render_persona)
168
- base_system_prompt = get_base_system_prompt(
169
- ctx, system_prompt_attr, render_system_prompt
170
- )
171
- special_instruction_prompt = get_special_instruction_prompt(
172
- ctx, special_instruction_prompt_attr, render_special_instruction_prompt
173
- )
174
- workflow_prompt = get_workflow_prompt(ctx, modes_attr, render_modes)
175
- if conversation_history is None:
176
- conversation_history = ConversationHistory()
177
- conversation_context, new_user_message = extract_conversation_context(user_message)
178
- new_system_prompt = "\n".join(
179
- [
180
- make_prompt_section("Persona", persona),
181
- make_prompt_section("System Prompt", base_system_prompt),
182
- make_prompt_section("Special Instruction", special_instruction_prompt),
183
- make_prompt_section("Special Workflows", workflow_prompt),
184
- make_prompt_section(
185
- "Past Conversation",
186
- "\n".join(
187
- [
188
- make_prompt_section(
189
- "Summary",
190
- conversation_history.past_conversation_summary,
191
- as_code=True,
192
- ),
193
- make_prompt_section(
194
- "Last Transcript",
195
- conversation_history.past_conversation_transcript,
196
- as_code=True,
197
- ),
198
- ]
215
+ make_markdown_section(
216
+ workflow_name.capitalize(),
217
+ (
218
+ (
219
+ "> Workflow status: Automatically Loaded/Activated.\n"
220
+ f"> Workflow location: `{workflow.path}`\n"
221
+ "{workflow.content}"
222
+ )
223
+ if select_active_workflow
224
+ else f"Workflow name: {workflow_name}\n{workflow.description}"
199
225
  ),
200
- ),
201
- make_prompt_section(
202
- "Notes",
203
- "\n".join(
204
- [
205
- make_prompt_section(
206
- "Long Term",
207
- conversation_history.long_term_note,
208
- as_code=True,
209
- ),
210
- make_prompt_section(
211
- "Contextual",
212
- conversation_history.contextual_note,
213
- as_code=True,
214
- ),
215
- ]
216
- ),
217
- ),
218
- make_prompt_section("Conversation Context", conversation_context),
226
+ )
227
+ for workflow_name, workflow in selected_workflows.items()
219
228
  ]
220
229
  )
221
- return new_system_prompt, new_user_message
222
230
 
223
231
 
224
- def extract_conversation_context(user_message: str) -> tuple[str, str]:
225
- modified_user_message = user_message
232
+ def _get_user_message_prompt(user_message: str) -> tuple[str, str]:
233
+ processed_user_message = user_message
226
234
  # Match “@” + any non-space/comma sequence that contains at least one “/”
227
235
  pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\?\!\s]+)"
228
236
  potential_resource_path = re.findall(pattern, user_message)
229
- apendixes = []
237
+ apendix_list = []
230
238
  for i, ref in enumerate(potential_resource_path):
231
239
  resource_path = os.path.abspath(os.path.expanduser(ref))
232
240
  content = ""
@@ -239,44 +247,32 @@ def extract_conversation_context(user_message: str) -> tuple[str, str]:
239
247
  ref_type = "directory"
240
248
  if content != "":
241
249
  # Replace the @-reference in the user message with the placeholder
242
- placeholder = f"[Reference {i+1}: {os.path.basename(ref)}]"
243
- modified_user_message = modified_user_message.replace(
250
+ placeholder = f"[Reference {i+1}: `{os.path.basename(ref)}`]"
251
+ processed_user_message = processed_user_message.replace(
244
252
  f"@{ref}", placeholder, 1
245
253
  )
246
- apendixes.append(
247
- make_prompt_section(
248
- f"{placeholder} ({ref_type} path: `{resource_path}`)",
249
- content,
254
+ apendix_list.append(
255
+ make_markdown_section(
256
+ f"Content of {placeholder} ({ref_type} path: `{resource_path}`)",
257
+ "\n".join(content) if isinstance(content, list) else content,
250
258
  as_code=True,
251
259
  )
252
260
  )
253
- conversation_context = "\n".join(
254
- [
255
- make_prompt_section("Current OS", platform.system()),
256
- make_prompt_section("OS Version", platform.version()),
257
- make_prompt_section("Python Version", platform.python_version()),
258
- ]
259
- )
260
- iso_date = datetime.now(timezone.utc).astimezone().isoformat()
261
+ apendixes = "\n".join(apendix_list)
261
262
  current_directory = os.getcwd()
262
- modified_user_message = "\n".join(
263
- [
264
- make_prompt_section("User Message", modified_user_message),
265
- make_prompt_section(
266
- "Context",
267
- "\n".join(
268
- [
269
- make_prompt_section(
270
- "Current working directory", current_directory
271
- ),
272
- make_prompt_section("Current time", iso_date),
273
- make_prompt_section("Apendixes", "\n".join(apendixes)),
274
- ]
275
- ),
276
- ),
277
- ]
263
+ iso_date = datetime.now(timezone.utc).astimezone().isoformat()
264
+ modified_user_message = make_markdown_section(
265
+ "User Request",
266
+ "\n".join(
267
+ [
268
+ f"- Current Directory: {current_directory}",
269
+ f"- Current Time: {iso_date}",
270
+ "---",
271
+ processed_user_message,
272
+ ]
273
+ ),
278
274
  )
279
- return conversation_context, modified_user_message
275
+ return modified_user_message, apendixes
280
276
 
281
277
 
282
278
  def get_user_message(
@@ -309,7 +305,7 @@ def get_summarization_system_prompt(
309
305
 
310
306
  def get_attachments(
311
307
  ctx: AnyContext,
312
- attachment: "UserContent | list[UserContent] | Callable[[AnySharedContext], UserContent | list[UserContent]] | None" = None, # noqa
308
+ attachment: "UserContent | list[UserContent] | Callable[[AnyContext], UserContent | list[UserContent]] | None" = None, # noqa
313
309
  ) -> "list[UserContent]":
314
310
  if attachment is None:
315
311
  return []
@@ -0,0 +1,41 @@
1
+ from zrb.context.any_context import AnyContext
2
+ from zrb.task.llm.conversation_history_model import ConversationHistory
3
+ from zrb.task.llm.typing import ListOfDict
4
+ from zrb.xcom.xcom import Xcom
5
+
6
+
7
+ def inject_subagent_conversation_history_into_ctx(
8
+ ctx: AnyContext, conversation_history: ConversationHistory
9
+ ):
10
+ subagent_messages_xcom = _get_global_subagent_history_xcom(ctx)
11
+ existing_subagent_history = subagent_messages_xcom.get({})
12
+ subagent_messages_xcom.set(
13
+ {**existing_subagent_history, **conversation_history.subagent_history}
14
+ )
15
+
16
+
17
+ def extract_subagent_conversation_history_from_ctx(
18
+ ctx: AnyContext,
19
+ ) -> dict[str, ListOfDict]:
20
+ subagent_messsages_xcom = _get_global_subagent_history_xcom(ctx)
21
+ return subagent_messsages_xcom.get({})
22
+
23
+
24
+ def get_ctx_subagent_history(ctx: AnyContext, subagent_name: str) -> ListOfDict:
25
+ subagent_history = extract_subagent_conversation_history_from_ctx(ctx)
26
+ return subagent_history.get(subagent_name, [])
27
+
28
+
29
+ def set_ctx_subagent_history(ctx: AnyContext, subagent_name: str, messages: ListOfDict):
30
+ subagent_history = extract_subagent_conversation_history_from_ctx(ctx)
31
+ subagent_history[subagent_name] = messages
32
+ subagent_messages_xcom = _get_global_subagent_history_xcom(ctx)
33
+ subagent_messages_xcom.set(subagent_history)
34
+
35
+
36
+ def _get_global_subagent_history_xcom(ctx: AnyContext) -> Xcom:
37
+ if "_global_subagents" not in ctx.xcom:
38
+ ctx.xcom["_global_subagents"] = Xcom([{}])
39
+ if not isinstance(ctx.xcom["_global_subagents"], Xcom):
40
+ raise ValueError("ctx.xcom._global_subagents must be an Xcom")
41
+ return ctx.xcom["_global_subagents"]