zrb 1.13.1__py3-none-any.whl → 1.21.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. zrb/__init__.py +2 -6
  2. zrb/attr/type.py +8 -8
  3. zrb/builtin/__init__.py +2 -0
  4. zrb/builtin/group.py +31 -15
  5. zrb/builtin/http.py +7 -8
  6. zrb/builtin/llm/attachment.py +40 -0
  7. zrb/builtin/llm/chat_session.py +130 -144
  8. zrb/builtin/llm/chat_session_cmd.py +226 -0
  9. zrb/builtin/llm/chat_trigger.py +73 -0
  10. zrb/builtin/llm/history.py +4 -4
  11. zrb/builtin/llm/llm_ask.py +218 -110
  12. zrb/builtin/llm/tool/api.py +74 -62
  13. zrb/builtin/llm/tool/cli.py +35 -16
  14. zrb/builtin/llm/tool/code.py +49 -47
  15. zrb/builtin/llm/tool/file.py +262 -251
  16. zrb/builtin/llm/tool/note.py +84 -0
  17. zrb/builtin/llm/tool/rag.py +25 -18
  18. zrb/builtin/llm/tool/sub_agent.py +29 -22
  19. zrb/builtin/llm/tool/web.py +135 -143
  20. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
  21. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
  22. zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
  23. zrb/builtin/searxng/config/settings.yml +5671 -0
  24. zrb/builtin/searxng/start.py +21 -0
  25. zrb/builtin/setup/latex/ubuntu.py +1 -0
  26. zrb/builtin/setup/ubuntu.py +1 -1
  27. zrb/builtin/shell/autocomplete/bash.py +4 -3
  28. zrb/builtin/shell/autocomplete/zsh.py +4 -3
  29. zrb/config/config.py +255 -78
  30. zrb/config/default_prompt/file_extractor_system_prompt.md +109 -9
  31. zrb/config/default_prompt/interactive_system_prompt.md +24 -30
  32. zrb/config/default_prompt/persona.md +1 -1
  33. zrb/config/default_prompt/repo_extractor_system_prompt.md +31 -31
  34. zrb/config/default_prompt/repo_summarizer_system_prompt.md +27 -8
  35. zrb/config/default_prompt/summarization_prompt.md +8 -13
  36. zrb/config/default_prompt/system_prompt.md +36 -30
  37. zrb/config/llm_config.py +129 -24
  38. zrb/config/llm_context/config.py +127 -90
  39. zrb/config/llm_context/config_parser.py +1 -7
  40. zrb/config/llm_context/workflow.py +81 -0
  41. zrb/config/llm_rate_limitter.py +89 -45
  42. zrb/context/any_shared_context.py +7 -1
  43. zrb/context/context.py +8 -2
  44. zrb/context/shared_context.py +6 -8
  45. zrb/group/any_group.py +12 -5
  46. zrb/group/group.py +67 -3
  47. zrb/input/any_input.py +5 -1
  48. zrb/input/base_input.py +18 -6
  49. zrb/input/text_input.py +7 -24
  50. zrb/runner/cli.py +21 -20
  51. zrb/runner/common_util.py +24 -19
  52. zrb/runner/web_route/task_input_api_route.py +5 -5
  53. zrb/runner/web_route/task_session_api_route.py +1 -4
  54. zrb/runner/web_util/user.py +7 -3
  55. zrb/session/any_session.py +12 -6
  56. zrb/session/session.py +39 -18
  57. zrb/task/any_task.py +24 -3
  58. zrb/task/base/context.py +17 -9
  59. zrb/task/base/execution.py +15 -8
  60. zrb/task/base/lifecycle.py +8 -4
  61. zrb/task/base/monitoring.py +12 -7
  62. zrb/task/base_task.py +69 -5
  63. zrb/task/base_trigger.py +12 -5
  64. zrb/task/llm/agent.py +138 -52
  65. zrb/task/llm/config.py +45 -13
  66. zrb/task/llm/conversation_history.py +76 -6
  67. zrb/task/llm/conversation_history_model.py +0 -168
  68. zrb/task/llm/default_workflow/coding/workflow.md +41 -0
  69. zrb/task/llm/default_workflow/copywriting/workflow.md +68 -0
  70. zrb/task/llm/default_workflow/git/workflow.md +118 -0
  71. zrb/task/llm/default_workflow/golang/workflow.md +128 -0
  72. zrb/task/llm/default_workflow/html-css/workflow.md +135 -0
  73. zrb/task/llm/default_workflow/java/workflow.md +146 -0
  74. zrb/task/llm/default_workflow/javascript/workflow.md +158 -0
  75. zrb/task/llm/default_workflow/python/workflow.md +160 -0
  76. zrb/task/llm/default_workflow/researching/workflow.md +153 -0
  77. zrb/task/llm/default_workflow/rust/workflow.md +162 -0
  78. zrb/task/llm/default_workflow/shell/workflow.md +299 -0
  79. zrb/task/llm/file_replacement.py +206 -0
  80. zrb/task/llm/file_tool_model.py +57 -0
  81. zrb/task/llm/history_summarization.py +22 -35
  82. zrb/task/llm/history_summarization_tool.py +24 -0
  83. zrb/task/llm/print_node.py +182 -63
  84. zrb/task/llm/prompt.py +213 -153
  85. zrb/task/llm/tool_wrapper.py +210 -53
  86. zrb/task/llm/workflow.py +76 -0
  87. zrb/task/llm_task.py +98 -47
  88. zrb/task/make_task.py +2 -3
  89. zrb/task/rsync_task.py +25 -10
  90. zrb/task/scheduler.py +4 -4
  91. zrb/util/attr.py +50 -40
  92. zrb/util/cli/markdown.py +12 -0
  93. zrb/util/cli/text.py +30 -0
  94. zrb/util/file.py +27 -11
  95. zrb/util/{llm/prompt.py → markdown.py} +2 -3
  96. zrb/util/string/conversion.py +1 -1
  97. zrb/util/truncate.py +23 -0
  98. zrb/util/yaml.py +204 -0
  99. {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/METADATA +40 -20
  100. {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/RECORD +102 -79
  101. {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/WHEEL +1 -1
  102. zrb/task/llm/default_workflow/coding.md +0 -24
  103. zrb/task/llm/default_workflow/copywriting.md +0 -17
  104. zrb/task/llm/default_workflow/researching.md +0 -18
  105. {zrb-1.13.1.dist-info → zrb-1.21.17.dist-info}/entry_points.txt +0 -0
zrb/task/llm/prompt.py CHANGED
@@ -2,18 +2,153 @@ import os
2
2
  import platform
3
3
  import re
4
4
  from datetime import datetime, timezone
5
+ from typing import TYPE_CHECKING, Callable
5
6
 
6
7
  from zrb.attr.type import StrAttr, StrListAttr
7
- from zrb.config.llm_config import llm_config as llm_config
8
- from zrb.config.llm_context.config import llm_context_config
8
+ from zrb.config.llm_config import llm_config
9
9
  from zrb.context.any_context import AnyContext
10
10
  from zrb.task.llm.conversation_history_model import ConversationHistory
11
+ from zrb.task.llm.workflow import LLMWorkflow, get_available_workflows
11
12
  from zrb.util.attr import get_attr, get_str_attr, get_str_list_attr
12
13
  from zrb.util.file import read_dir, read_file_with_line_numbers
13
- from zrb.util.llm.prompt import make_prompt_section
14
+ from zrb.util.markdown import make_markdown_section
14
15
 
16
+ if TYPE_CHECKING:
17
+ from pydantic_ai.messages import UserContent
15
18
 
16
- def get_persona(
19
+
20
+ def get_system_and_user_prompt(
21
+ ctx: AnyContext,
22
+ user_message: str,
23
+ persona_attr: StrAttr | None = None,
24
+ render_persona: bool = False,
25
+ system_prompt_attr: StrAttr | None = None,
26
+ render_system_prompt: bool = False,
27
+ special_instruction_prompt_attr: StrAttr | None = None,
28
+ render_special_instruction_prompt: bool = False,
29
+ workflows_attr: StrListAttr | None = None,
30
+ render_workflows: bool = False,
31
+ conversation_history: ConversationHistory | None = None,
32
+ ) -> tuple[str, str]:
33
+ if conversation_history is None:
34
+ conversation_history = ConversationHistory()
35
+ new_user_message_prompt, apendixes = _get_user_message_prompt(user_message)
36
+ new_system_prompt = _construct_system_prompt(
37
+ ctx=ctx,
38
+ user_message=user_message,
39
+ apendixes=apendixes,
40
+ persona_attr=persona_attr,
41
+ render_persona=render_persona,
42
+ system_prompt_attr=system_prompt_attr,
43
+ render_system_prompt=render_system_prompt,
44
+ special_instruction_prompt_attr=special_instruction_prompt_attr,
45
+ render_special_instruction_prompt=render_special_instruction_prompt,
46
+ workflows_attr=workflows_attr,
47
+ render_workflows=render_workflows,
48
+ conversation_history=conversation_history,
49
+ )
50
+ return new_system_prompt, new_user_message_prompt
51
+
52
+
53
+ def _construct_system_prompt(
54
+ ctx: AnyContext,
55
+ user_message: str,
56
+ apendixes: str,
57
+ persona_attr: StrAttr | None = None,
58
+ render_persona: bool = False,
59
+ system_prompt_attr: StrAttr | None = None,
60
+ render_system_prompt: bool = False,
61
+ special_instruction_prompt_attr: StrAttr | None = None,
62
+ render_special_instruction_prompt: bool = False,
63
+ workflows_attr: StrListAttr | None = None,
64
+ render_workflows: bool = False,
65
+ conversation_history: ConversationHistory | None = None,
66
+ ) -> str:
67
+ persona = _get_persona(ctx, persona_attr, render_persona)
68
+ base_system_prompt = _get_base_system_prompt(
69
+ ctx, system_prompt_attr, render_system_prompt
70
+ )
71
+ special_instruction_prompt = _get_special_instruction_prompt(
72
+ ctx, special_instruction_prompt_attr, render_special_instruction_prompt
73
+ )
74
+ available_workflows = get_available_workflows()
75
+ active_workflow_names = set(
76
+ _get_active_workflow_names(ctx, workflows_attr, render_workflows)
77
+ )
78
+ active_workflow_prompt = _get_workflow_prompt(
79
+ available_workflows, active_workflow_names, True
80
+ )
81
+ inactive_workflow_prompt = _get_workflow_prompt(
82
+ available_workflows, active_workflow_names, False
83
+ )
84
+ if conversation_history is None:
85
+ conversation_history = ConversationHistory()
86
+ current_directory = os.getcwd()
87
+ iso_date = datetime.now(timezone.utc).astimezone().isoformat()
88
+ return "\n".join(
89
+ [
90
+ persona,
91
+ base_system_prompt,
92
+ make_markdown_section(
93
+ "📝 SPECIAL INSTRUCTION",
94
+ "\n".join(
95
+ [
96
+ special_instruction_prompt,
97
+ active_workflow_prompt,
98
+ ]
99
+ ),
100
+ ),
101
+ make_markdown_section("🛠️ AVAILABLE WORKFLOWS", inactive_workflow_prompt),
102
+ make_markdown_section(
103
+ "📚 CONTEXT",
104
+ "\n".join(
105
+ [
106
+ make_markdown_section(
107
+ "ℹ️ System Information",
108
+ "\n".join(
109
+ [
110
+ f"- OS: {platform.system()} {platform.version()}",
111
+ f"- Python Version: {platform.python_version()}",
112
+ f"- Current Directory: {current_directory}",
113
+ f"- Current Time: {iso_date}",
114
+ ]
115
+ ),
116
+ ),
117
+ make_markdown_section(
118
+ "🧠 Long Term Note",
119
+ conversation_history.long_term_note,
120
+ ),
121
+ make_markdown_section(
122
+ "📝 Contextual Note",
123
+ conversation_history.contextual_note,
124
+ ),
125
+ make_markdown_section(
126
+ "📄 Apendixes",
127
+ apendixes,
128
+ ),
129
+ ]
130
+ ),
131
+ ),
132
+ make_markdown_section(
133
+ "💬 PAST CONVERSATION",
134
+ "\n".join(
135
+ [
136
+ make_markdown_section(
137
+ "Narrative Summary",
138
+ conversation_history.past_conversation_summary,
139
+ ),
140
+ make_markdown_section(
141
+ "Past Transcript",
142
+ conversation_history.past_conversation_transcript,
143
+ ),
144
+ ]
145
+ ),
146
+ ),
147
+ ]
148
+ )
149
+
150
+
151
+ def _get_persona(
17
152
  ctx: AnyContext,
18
153
  persona_attr: StrAttr | None,
19
154
  render_persona: bool,
@@ -30,7 +165,7 @@ def get_persona(
30
165
  return llm_config.default_persona or ""
31
166
 
32
167
 
33
- def get_base_system_prompt(
168
+ def _get_base_system_prompt(
34
169
  ctx: AnyContext,
35
170
  system_prompt_attr: StrAttr | None,
36
171
  render_system_prompt: bool,
@@ -47,7 +182,7 @@ def get_base_system_prompt(
47
182
  return llm_config.default_system_prompt or ""
48
183
 
49
184
 
50
- def get_special_instruction_prompt(
185
+ def _get_special_instruction_prompt(
51
186
  ctx: AnyContext,
52
187
  special_instruction_prompt_attr: StrAttr | None,
53
188
  render_spcecial_instruction_prompt: bool,
@@ -64,138 +199,57 @@ def get_special_instruction_prompt(
64
199
  return llm_config.default_special_instruction_prompt
65
200
 
66
201
 
67
- def get_modes(
202
+ def _get_active_workflow_names(
68
203
  ctx: AnyContext,
69
- modes_attr: StrAttr | None,
70
- render_modes: bool,
71
- ) -> str:
72
- """Gets the modes, prioritizing task-specific, then default."""
73
- raw_modes = get_str_list_attr(
204
+ workflows_attr: StrListAttr | None,
205
+ render_workflows: bool,
206
+ ) -> list[str]:
207
+ """Gets the workflows, prioritizing task-specific, then default."""
208
+ raw_workflows = get_str_list_attr(
74
209
  ctx,
75
- modes_attr,
76
- auto_render=render_modes,
210
+ [] if workflows_attr is None else workflows_attr,
211
+ auto_render=render_workflows,
77
212
  )
78
- if raw_modes is None:
79
- raw_modes = []
80
- modes = [mode.strip() for mode in raw_modes if mode.strip() != ""]
81
- if len(modes) > 0:
82
- return modes
83
- return llm_config.default_modes or []
213
+ if raw_workflows is not None and len(raw_workflows) > 0:
214
+ return [w.strip().lower() for w in raw_workflows if w.strip() != ""]
215
+ return []
84
216
 
85
217
 
86
- def get_workflow_prompt(
87
- ctx: AnyContext,
88
- modes_attr: StrAttr | None,
89
- render_modes: bool,
218
+ def _get_workflow_prompt(
219
+ available_workflows: dict[str, LLMWorkflow],
220
+ active_workflow_names: list[str] | set[str],
221
+ select_active_workflow: bool,
90
222
  ) -> str:
91
- modes = get_modes(ctx, modes_attr, render_modes)
92
- # Get user-defined workflows
93
- workflows = {
94
- workflow_name: content
95
- for workflow_name, content in llm_context_config.get_workflows().items()
96
- if workflow_name in modes
223
+ selected_workflows = {
224
+ workflow_name: available_workflows[workflow_name]
225
+ for workflow_name in available_workflows
226
+ if (workflow_name in active_workflow_names) == select_active_workflow
97
227
  }
98
- # Get requested builtin-workflow names
99
- requested_builtin_workflow_names = [
100
- workflow_name
101
- for workflow_name in ("coding", "copywriting", "researching")
102
- if workflow_name in modes and workflow_name not in workflows
103
- ]
104
- # add builtin-workflows if requested
105
- if len(requested_builtin_workflow_names) > 0:
106
- dir_path = os.path.dirname(__file__)
107
- for workflow_name in requested_builtin_workflow_names:
108
- workflow_file_path = os.path.join(
109
- dir_path, "default_workflow", f"{workflow_name}.md"
110
- )
111
- with open(workflow_file_path, "r") as f:
112
- workflows[workflow_name] = f.read()
113
228
  return "\n".join(
114
229
  [
115
- make_prompt_section(header.capitalize(), content)
116
- for header, content in workflows.items()
117
- if header.lower() in modes
118
- ]
119
- )
120
-
121
-
122
- def get_system_and_user_prompt(
123
- ctx: AnyContext,
124
- user_message: str,
125
- persona_attr: StrAttr | None = None,
126
- render_persona: bool = False,
127
- system_prompt_attr: StrAttr | None = None,
128
- render_system_prompt: bool = False,
129
- special_instruction_prompt_attr: StrAttr | None = None,
130
- render_special_instruction_prompt: bool = False,
131
- modes_attr: StrListAttr | None = None,
132
- render_modes: bool = False,
133
- conversation_history: ConversationHistory | None = None,
134
- ) -> tuple[str, str]:
135
- """Combines persona, base system prompt, and special instructions."""
136
- persona = get_persona(ctx, persona_attr, render_persona)
137
- base_system_prompt = get_base_system_prompt(
138
- ctx, system_prompt_attr, render_system_prompt
139
- )
140
- special_instruction_prompt = get_special_instruction_prompt(
141
- ctx, special_instruction_prompt_attr, render_special_instruction_prompt
142
- )
143
- workflow_prompt = get_workflow_prompt(ctx, modes_attr, render_modes)
144
- if conversation_history is None:
145
- conversation_history = ConversationHistory()
146
- conversation_context, new_user_message = extract_conversation_context(user_message)
147
- new_system_prompt = "\n".join(
148
- [
149
- make_prompt_section("Persona", persona),
150
- make_prompt_section("System Prompt", base_system_prompt),
151
- make_prompt_section("Special Instruction", special_instruction_prompt),
152
- make_prompt_section("Special Workflows", workflow_prompt),
153
- make_prompt_section(
154
- "Past Conversation",
155
- "\n".join(
156
- [
157
- make_prompt_section(
158
- "Summary",
159
- conversation_history.past_conversation_summary,
160
- as_code=True,
161
- ),
162
- make_prompt_section(
163
- "Last Transcript",
164
- conversation_history.past_conversation_transcript,
165
- as_code=True,
166
- ),
167
- ]
168
- ),
169
- ),
170
- make_prompt_section(
171
- "Notes",
172
- "\n".join(
173
- [
174
- make_prompt_section(
175
- "Long Term",
176
- conversation_history.long_term_note,
177
- as_code=True,
178
- ),
179
- make_prompt_section(
180
- "Contextual",
181
- conversation_history.contextual_note,
182
- as_code=True,
183
- ),
184
- ]
230
+ make_markdown_section(
231
+ workflow_name.capitalize(),
232
+ (
233
+ (
234
+ "> Workflow status: Automatically Loaded/Activated.\n"
235
+ f"> Workflow location: `{workflow.path}`\n"
236
+ "{workflow.content}"
237
+ )
238
+ if select_active_workflow
239
+ else f"Workflow name: {workflow_name}\n{workflow.description}"
185
240
  ),
186
- ),
187
- make_prompt_section("Conversation Context", conversation_context),
241
+ )
242
+ for workflow_name, workflow in selected_workflows.items()
188
243
  ]
189
244
  )
190
- return new_system_prompt, new_user_message
191
245
 
192
246
 
193
- def extract_conversation_context(user_message: str) -> tuple[str, str]:
194
- modified_user_message = user_message
247
+ def _get_user_message_prompt(user_message: str) -> tuple[str, str]:
248
+ processed_user_message = user_message
195
249
  # Match “@” + any non-space/comma sequence that contains at least one “/”
196
- pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\s]+)"
250
+ pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\?\!\s]+)"
197
251
  potential_resource_path = re.findall(pattern, user_message)
198
- apendixes = []
252
+ apendix_list = []
199
253
  for i, ref in enumerate(potential_resource_path):
200
254
  resource_path = os.path.abspath(os.path.expanduser(ref))
201
255
  content = ""
@@ -208,44 +262,32 @@ def extract_conversation_context(user_message: str) -> tuple[str, str]:
208
262
  ref_type = "directory"
209
263
  if content != "":
210
264
  # Replace the @-reference in the user message with the placeholder
211
- placeholder = f"[Reference {i+1}: {os.path.basename(ref)}]"
212
- modified_user_message = modified_user_message.replace(
265
+ placeholder = f"[Reference {i+1}: `{os.path.basename(ref)}`]"
266
+ processed_user_message = processed_user_message.replace(
213
267
  f"@{ref}", placeholder, 1
214
268
  )
215
- apendixes.append(
216
- make_prompt_section(
217
- f"{placeholder} ({ref_type} path: `{resource_path}`)",
218
- content,
269
+ apendix_list.append(
270
+ make_markdown_section(
271
+ f"Content of {placeholder} ({ref_type} path: `{resource_path}`)",
272
+ "\n".join(content) if isinstance(content, list) else content,
219
273
  as_code=True,
220
274
  )
221
275
  )
222
- conversation_context = "\n".join(
223
- [
224
- make_prompt_section("Current OS", platform.system()),
225
- make_prompt_section("OS Version", platform.version()),
226
- make_prompt_section("Python Version", platform.python_version()),
227
- ]
228
- )
229
- iso_date = datetime.now(timezone.utc).astimezone().isoformat()
276
+ apendixes = "\n".join(apendix_list)
230
277
  current_directory = os.getcwd()
231
- modified_user_message = "\n".join(
232
- [
233
- make_prompt_section("User Message", modified_user_message),
234
- make_prompt_section(
235
- "Context",
236
- "\n".join(
237
- [
238
- make_prompt_section(
239
- "Current working directory", current_directory
240
- ),
241
- make_prompt_section("Current time", iso_date),
242
- make_prompt_section("Apendixes", "\n".join(apendixes)),
243
- ]
244
- ),
245
- ),
246
- ]
278
+ iso_date = datetime.now(timezone.utc).astimezone().isoformat()
279
+ modified_user_message = make_markdown_section(
280
+ "User Request",
281
+ "\n".join(
282
+ [
283
+ f"- Current Directory: {current_directory}",
284
+ f"- Current Time: {iso_date}",
285
+ "---",
286
+ processed_user_message,
287
+ ]
288
+ ),
247
289
  )
248
- return conversation_context, modified_user_message
290
+ return modified_user_message, apendixes
249
291
 
250
292
 
251
293
  def get_user_message(
@@ -274,3 +316,21 @@ def get_summarization_system_prompt(
274
316
  if summarization_prompt is not None:
275
317
  return summarization_prompt
276
318
  return llm_config.default_summarization_prompt
319
+
320
+
321
+ def get_attachments(
322
+ ctx: AnyContext,
323
+ attachment: "UserContent | list[UserContent] | Callable[[AnyContext], UserContent | list[UserContent]] | None" = None, # noqa
324
+ ) -> "list[UserContent]":
325
+ if attachment is None:
326
+ return []
327
+ if callable(attachment):
328
+ result = attachment(ctx)
329
+ if result is None:
330
+ return []
331
+ if isinstance(result, list):
332
+ return result
333
+ return [result]
334
+ if isinstance(attachment, list):
335
+ return attachment
336
+ return [attachment]