zrb 1.15.3__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of zrb might be problematic. Click here for more details.

Files changed (204) hide show
  1. zrb/__init__.py +118 -133
  2. zrb/attr/type.py +10 -7
  3. zrb/builtin/__init__.py +55 -1
  4. zrb/builtin/git.py +12 -1
  5. zrb/builtin/group.py +31 -15
  6. zrb/builtin/llm/chat.py +147 -0
  7. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/entity/add_entity_util.py +7 -7
  8. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/add_module_util.py +5 -5
  9. zrb/builtin/project/add/fastapp/fastapp_util.py +1 -1
  10. zrb/builtin/searxng/config/settings.yml +5671 -0
  11. zrb/builtin/searxng/start.py +21 -0
  12. zrb/builtin/shell/autocomplete/bash.py +4 -3
  13. zrb/builtin/shell/autocomplete/zsh.py +4 -3
  14. zrb/callback/callback.py +8 -1
  15. zrb/cmd/cmd_result.py +2 -1
  16. zrb/config/config.py +555 -169
  17. zrb/config/helper.py +84 -0
  18. zrb/config/web_auth_config.py +50 -35
  19. zrb/context/any_shared_context.py +20 -3
  20. zrb/context/context.py +39 -5
  21. zrb/context/print_fn.py +13 -0
  22. zrb/context/shared_context.py +17 -8
  23. zrb/group/any_group.py +3 -3
  24. zrb/group/group.py +3 -3
  25. zrb/input/any_input.py +5 -1
  26. zrb/input/base_input.py +18 -6
  27. zrb/input/option_input.py +41 -1
  28. zrb/input/text_input.py +7 -24
  29. zrb/llm/agent/__init__.py +9 -0
  30. zrb/llm/agent/agent.py +215 -0
  31. zrb/llm/agent/summarizer.py +20 -0
  32. zrb/llm/app/__init__.py +10 -0
  33. zrb/llm/app/completion.py +281 -0
  34. zrb/llm/app/confirmation/allow_tool.py +66 -0
  35. zrb/llm/app/confirmation/handler.py +178 -0
  36. zrb/llm/app/confirmation/replace_confirmation.py +77 -0
  37. zrb/llm/app/keybinding.py +34 -0
  38. zrb/llm/app/layout.py +117 -0
  39. zrb/llm/app/lexer.py +155 -0
  40. zrb/llm/app/redirection.py +28 -0
  41. zrb/llm/app/style.py +16 -0
  42. zrb/llm/app/ui.py +733 -0
  43. zrb/llm/config/__init__.py +4 -0
  44. zrb/llm/config/config.py +122 -0
  45. zrb/llm/config/limiter.py +247 -0
  46. zrb/llm/history_manager/__init__.py +4 -0
  47. zrb/llm/history_manager/any_history_manager.py +23 -0
  48. zrb/llm/history_manager/file_history_manager.py +91 -0
  49. zrb/llm/history_processor/summarizer.py +108 -0
  50. zrb/llm/note/__init__.py +3 -0
  51. zrb/llm/note/manager.py +122 -0
  52. zrb/llm/prompt/__init__.py +29 -0
  53. zrb/llm/prompt/claude_compatibility.py +92 -0
  54. zrb/llm/prompt/compose.py +55 -0
  55. zrb/llm/prompt/default.py +51 -0
  56. zrb/llm/prompt/markdown/file_extractor.md +112 -0
  57. zrb/llm/prompt/markdown/mandate.md +23 -0
  58. zrb/llm/prompt/markdown/persona.md +3 -0
  59. zrb/llm/prompt/markdown/repo_extractor.md +112 -0
  60. zrb/llm/prompt/markdown/repo_summarizer.md +29 -0
  61. zrb/llm/prompt/markdown/summarizer.md +21 -0
  62. zrb/llm/prompt/note.py +41 -0
  63. zrb/llm/prompt/system_context.py +46 -0
  64. zrb/llm/prompt/zrb.py +41 -0
  65. zrb/llm/skill/__init__.py +3 -0
  66. zrb/llm/skill/manager.py +86 -0
  67. zrb/llm/task/__init__.py +4 -0
  68. zrb/llm/task/llm_chat_task.py +316 -0
  69. zrb/llm/task/llm_task.py +245 -0
  70. zrb/llm/tool/__init__.py +39 -0
  71. zrb/llm/tool/bash.py +75 -0
  72. zrb/llm/tool/code.py +266 -0
  73. zrb/llm/tool/file.py +419 -0
  74. zrb/llm/tool/note.py +70 -0
  75. zrb/{builtin/llm → llm}/tool/rag.py +33 -37
  76. zrb/llm/tool/search/brave.py +53 -0
  77. zrb/llm/tool/search/searxng.py +47 -0
  78. zrb/llm/tool/search/serpapi.py +47 -0
  79. zrb/llm/tool/skill.py +19 -0
  80. zrb/llm/tool/sub_agent.py +70 -0
  81. zrb/llm/tool/web.py +97 -0
  82. zrb/llm/tool/zrb_task.py +66 -0
  83. zrb/llm/util/attachment.py +101 -0
  84. zrb/llm/util/prompt.py +104 -0
  85. zrb/llm/util/stream_response.py +178 -0
  86. zrb/runner/cli.py +21 -20
  87. zrb/runner/common_util.py +24 -19
  88. zrb/runner/web_route/task_input_api_route.py +5 -5
  89. zrb/runner/web_util/user.py +7 -3
  90. zrb/session/any_session.py +12 -9
  91. zrb/session/session.py +38 -17
  92. zrb/task/any_task.py +24 -3
  93. zrb/task/base/context.py +42 -22
  94. zrb/task/base/execution.py +67 -55
  95. zrb/task/base/lifecycle.py +14 -7
  96. zrb/task/base/monitoring.py +12 -7
  97. zrb/task/base_task.py +113 -50
  98. zrb/task/base_trigger.py +16 -6
  99. zrb/task/cmd_task.py +6 -0
  100. zrb/task/http_check.py +11 -5
  101. zrb/task/make_task.py +5 -3
  102. zrb/task/rsync_task.py +30 -10
  103. zrb/task/scaffolder.py +7 -4
  104. zrb/task/scheduler.py +7 -4
  105. zrb/task/tcp_check.py +6 -4
  106. zrb/util/ascii_art/art/bee.txt +17 -0
  107. zrb/util/ascii_art/art/cat.txt +9 -0
  108. zrb/util/ascii_art/art/ghost.txt +16 -0
  109. zrb/util/ascii_art/art/panda.txt +17 -0
  110. zrb/util/ascii_art/art/rose.txt +14 -0
  111. zrb/util/ascii_art/art/unicorn.txt +15 -0
  112. zrb/util/ascii_art/banner.py +92 -0
  113. zrb/util/attr.py +54 -39
  114. zrb/util/cli/markdown.py +32 -0
  115. zrb/util/cli/text.py +30 -0
  116. zrb/util/cmd/command.py +33 -10
  117. zrb/util/file.py +61 -33
  118. zrb/util/git.py +2 -2
  119. zrb/util/{llm/prompt.py → markdown.py} +2 -3
  120. zrb/util/match.py +78 -0
  121. zrb/util/run.py +3 -3
  122. zrb/util/string/conversion.py +1 -1
  123. zrb/util/truncate.py +23 -0
  124. zrb/util/yaml.py +204 -0
  125. zrb/xcom/xcom.py +10 -0
  126. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/METADATA +41 -27
  127. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/RECORD +129 -131
  128. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/WHEEL +1 -1
  129. zrb/attr/__init__.py +0 -0
  130. zrb/builtin/llm/chat_session.py +0 -311
  131. zrb/builtin/llm/history.py +0 -71
  132. zrb/builtin/llm/input.py +0 -27
  133. zrb/builtin/llm/llm_ask.py +0 -187
  134. zrb/builtin/llm/previous-session.js +0 -21
  135. zrb/builtin/llm/tool/__init__.py +0 -0
  136. zrb/builtin/llm/tool/api.py +0 -71
  137. zrb/builtin/llm/tool/cli.py +0 -38
  138. zrb/builtin/llm/tool/code.py +0 -254
  139. zrb/builtin/llm/tool/file.py +0 -626
  140. zrb/builtin/llm/tool/sub_agent.py +0 -137
  141. zrb/builtin/llm/tool/web.py +0 -195
  142. zrb/builtin/project/__init__.py +0 -0
  143. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/__init__.py +0 -0
  144. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/template/app_template/module/my_module/service/__init__.py +0 -0
  145. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/common/__init__.py +0 -0
  146. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/__init__.py +0 -0
  147. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/__init__.py +0 -0
  148. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/permission/__init__.py +0 -0
  149. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/__init__.py +0 -0
  150. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/__init__.py +0 -0
  151. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/__init__.py +0 -0
  152. zrb/builtin/project/create/__init__.py +0 -0
  153. zrb/builtin/shell/__init__.py +0 -0
  154. zrb/builtin/shell/autocomplete/__init__.py +0 -0
  155. zrb/callback/__init__.py +0 -0
  156. zrb/cmd/__init__.py +0 -0
  157. zrb/config/default_prompt/file_extractor_system_prompt.md +0 -12
  158. zrb/config/default_prompt/interactive_system_prompt.md +0 -35
  159. zrb/config/default_prompt/persona.md +0 -1
  160. zrb/config/default_prompt/repo_extractor_system_prompt.md +0 -112
  161. zrb/config/default_prompt/repo_summarizer_system_prompt.md +0 -10
  162. zrb/config/default_prompt/summarization_prompt.md +0 -16
  163. zrb/config/default_prompt/system_prompt.md +0 -32
  164. zrb/config/llm_config.py +0 -243
  165. zrb/config/llm_context/config.py +0 -129
  166. zrb/config/llm_context/config_parser.py +0 -46
  167. zrb/config/llm_rate_limitter.py +0 -137
  168. zrb/content_transformer/__init__.py +0 -0
  169. zrb/context/__init__.py +0 -0
  170. zrb/dot_dict/__init__.py +0 -0
  171. zrb/env/__init__.py +0 -0
  172. zrb/group/__init__.py +0 -0
  173. zrb/input/__init__.py +0 -0
  174. zrb/runner/__init__.py +0 -0
  175. zrb/runner/web_route/__init__.py +0 -0
  176. zrb/runner/web_route/home_page/__init__.py +0 -0
  177. zrb/session/__init__.py +0 -0
  178. zrb/session_state_log/__init__.py +0 -0
  179. zrb/session_state_logger/__init__.py +0 -0
  180. zrb/task/__init__.py +0 -0
  181. zrb/task/base/__init__.py +0 -0
  182. zrb/task/llm/__init__.py +0 -0
  183. zrb/task/llm/agent.py +0 -243
  184. zrb/task/llm/config.py +0 -103
  185. zrb/task/llm/conversation_history.py +0 -128
  186. zrb/task/llm/conversation_history_model.py +0 -242
  187. zrb/task/llm/default_workflow/coding.md +0 -24
  188. zrb/task/llm/default_workflow/copywriting.md +0 -17
  189. zrb/task/llm/default_workflow/researching.md +0 -18
  190. zrb/task/llm/error.py +0 -95
  191. zrb/task/llm/history_summarization.py +0 -216
  192. zrb/task/llm/print_node.py +0 -101
  193. zrb/task/llm/prompt.py +0 -325
  194. zrb/task/llm/tool_wrapper.py +0 -220
  195. zrb/task/llm/typing.py +0 -3
  196. zrb/task/llm_task.py +0 -341
  197. zrb/task_status/__init__.py +0 -0
  198. zrb/util/__init__.py +0 -0
  199. zrb/util/cli/__init__.py +0 -0
  200. zrb/util/cmd/__init__.py +0 -0
  201. zrb/util/codemod/__init__.py +0 -0
  202. zrb/util/string/__init__.py +0 -0
  203. zrb/xcom/__init__.py +0 -0
  204. {zrb-1.15.3.dist-info → zrb-2.0.0a4.dist-info}/entry_points.txt +0 -0
@@ -1,101 +0,0 @@
1
- from collections.abc import Callable
2
- from typing import Any
3
-
4
- from zrb.util.cli.style import stylize_faint
5
-
6
-
7
- async def print_node(print_func: Callable, agent_run: Any, node: Any):
8
- """Prints the details of an agent execution node using a provided print function."""
9
- from pydantic_ai import Agent
10
- from pydantic_ai.messages import (
11
- FinalResultEvent,
12
- FunctionToolCallEvent,
13
- FunctionToolResultEvent,
14
- PartDeltaEvent,
15
- PartStartEvent,
16
- TextPartDelta,
17
- ThinkingPartDelta,
18
- ToolCallPartDelta,
19
- )
20
-
21
- if Agent.is_user_prompt_node(node):
22
- print_func(stylize_faint(" 🔠 Receiving input..."))
23
- elif Agent.is_model_request_node(node):
24
- # A model request node => We can stream tokens from the model's request
25
- print_func(stylize_faint(" 🧠 Processing..."))
26
- # Reference: https://ai.pydantic.dev/agents/#streaming
27
- async with node.stream(agent_run.ctx) as request_stream:
28
- is_streaming = False
29
- async for event in request_stream:
30
- if isinstance(event, PartStartEvent) and event.part:
31
- if is_streaming:
32
- print_func("")
33
- content = _get_event_part_content(event)
34
- print_func(stylize_faint(f" {content}"), end="")
35
- is_streaming = False
36
- elif isinstance(event, PartDeltaEvent):
37
- if isinstance(event.delta, TextPartDelta) or isinstance(
38
- event.delta, ThinkingPartDelta
39
- ):
40
- print_func(
41
- stylize_faint(f"{event.delta.content_delta}"),
42
- end="",
43
- )
44
- elif isinstance(event.delta, ToolCallPartDelta):
45
- print_func(
46
- stylize_faint(f"{event.delta.args_delta}"),
47
- end="",
48
- )
49
- is_streaming = True
50
- elif isinstance(event, FinalResultEvent) and event.tool_name:
51
- if is_streaming:
52
- print_func("")
53
- print_func(
54
- stylize_faint(f" Result: tool_name={event.tool_name}"),
55
- )
56
- is_streaming = False
57
- if is_streaming:
58
- print_func("")
59
- elif Agent.is_call_tools_node(node):
60
- # A handle-response node => The model returned some data, potentially calls a tool
61
- print_func(stylize_faint(" 🧰 Calling Tool..."))
62
- async with node.stream(agent_run.ctx) as handle_stream:
63
- async for event in handle_stream:
64
- if isinstance(event, FunctionToolCallEvent):
65
- # Handle empty arguments across different providers
66
- if event.part.args == "" or event.part.args is None:
67
- event.part.args = {}
68
- elif isinstance(
69
- event.part.args, str
70
- ) and event.part.args.strip() in ["null", "{}"]:
71
- # Some providers might send "null" or "{}" as a string
72
- event.part.args = {}
73
- # Handle dummy property if present (from our schema sanitization)
74
- if (
75
- isinstance(event.part.args, dict)
76
- and "_dummy" in event.part.args
77
- ):
78
- del event.part.args["_dummy"]
79
- print_func(
80
- stylize_faint(
81
- f" {event.part.tool_call_id} | "
82
- f"Call {event.part.tool_name} {event.part.args}"
83
- )
84
- )
85
- elif isinstance(event, FunctionToolResultEvent):
86
- print_func(
87
- stylize_faint(
88
- f" {event.tool_call_id} | {event.result.content}"
89
- )
90
- )
91
- elif Agent.is_end_node(node):
92
- # Once an End node is reached, the agent run is complete
93
- print_func(stylize_faint(" ✅ Completed..."))
94
-
95
-
96
- def _get_event_part_content(event: Any) -> str:
97
- if not hasattr(event, "part"):
98
- return f"{event}"
99
- if not hasattr(event.part, "content"):
100
- return f"{event.part}"
101
- return getattr(event.part, "content")
zrb/task/llm/prompt.py DELETED
@@ -1,325 +0,0 @@
1
- import os
2
- import platform
3
- import re
4
- from datetime import datetime, timezone
5
- from typing import TYPE_CHECKING, Callable
6
-
7
- from zrb.attr.type import StrAttr, StrListAttr
8
- from zrb.config.llm_config import llm_config as llm_config
9
- from zrb.config.llm_context.config import llm_context_config
10
- from zrb.context.any_context import AnyContext
11
- from zrb.context.any_shared_context import AnySharedContext
12
- from zrb.task.llm.conversation_history_model import ConversationHistory
13
- from zrb.util.attr import get_attr, get_str_attr, get_str_list_attr
14
- from zrb.util.file import read_dir, read_file_with_line_numbers
15
- from zrb.util.llm.prompt import make_prompt_section
16
-
17
- if TYPE_CHECKING:
18
- from pydantic_ai.messages import UserContent
19
-
20
-
21
- def get_persona(
22
- ctx: AnyContext,
23
- persona_attr: StrAttr | None,
24
- render_persona: bool,
25
- ) -> str:
26
- """Gets the persona, prioritizing task-specific, then default."""
27
- persona = get_attr(
28
- ctx,
29
- persona_attr,
30
- None,
31
- auto_render=render_persona,
32
- )
33
- if persona is not None:
34
- return persona
35
- return llm_config.default_persona or ""
36
-
37
-
38
- def get_base_system_prompt(
39
- ctx: AnyContext,
40
- system_prompt_attr: StrAttr | None,
41
- render_system_prompt: bool,
42
- ) -> str:
43
- """Gets the base system prompt, prioritizing task-specific, then default."""
44
- system_prompt = get_attr(
45
- ctx,
46
- system_prompt_attr,
47
- None,
48
- auto_render=render_system_prompt,
49
- )
50
- if system_prompt is not None:
51
- return system_prompt
52
- return llm_config.default_system_prompt or ""
53
-
54
-
55
- def get_special_instruction_prompt(
56
- ctx: AnyContext,
57
- special_instruction_prompt_attr: StrAttr | None,
58
- render_spcecial_instruction_prompt: bool,
59
- ) -> str:
60
- """Gets the special instruction prompt, prioritizing task-specific, then default."""
61
- special_instruction = get_attr(
62
- ctx,
63
- special_instruction_prompt_attr,
64
- None,
65
- auto_render=render_spcecial_instruction_prompt,
66
- )
67
- if special_instruction is not None:
68
- return special_instruction
69
- return llm_config.default_special_instruction_prompt
70
-
71
-
72
- def get_modes(
73
- ctx: AnyContext,
74
- modes_attr: StrListAttr | None,
75
- render_modes: bool,
76
- ) -> list[str]:
77
- """Gets the modes, prioritizing task-specific, then default."""
78
- raw_modes = get_str_list_attr(
79
- ctx,
80
- [] if modes_attr is None else modes_attr,
81
- auto_render=render_modes,
82
- )
83
- if raw_modes is None:
84
- raw_modes = []
85
- modes = [mode.strip().lower() for mode in raw_modes if mode.strip() != ""]
86
- if len(modes) > 0:
87
- return modes
88
- return llm_config.default_modes or []
89
-
90
-
91
- def get_workflow_prompt(
92
- ctx: AnyContext,
93
- modes_attr: StrListAttr | None,
94
- render_modes: bool,
95
- ) -> str:
96
- builtin_workflow_dir = os.path.join(os.path.dirname(__file__), "default_workflow")
97
- modes = set(get_modes(ctx, modes_attr, render_modes))
98
-
99
- # Get user-defined workflows
100
- workflows = {
101
- workflow_name.strip().lower(): content
102
- for workflow_name, content in llm_context_config.get_workflows().items()
103
- if workflow_name.strip().lower() in modes
104
- }
105
-
106
- # Get available builtin workflow names from the file system
107
- available_builtin_workflow_names = set()
108
- try:
109
- for filename in os.listdir(builtin_workflow_dir):
110
- if filename.endswith(".md"):
111
- available_builtin_workflow_names.add(filename[:-3].lower())
112
- except FileNotFoundError:
113
- # Handle case where the directory might not exist
114
- ctx.log_error(
115
- f"Warning: Default workflow directory not found at {builtin_workflow_dir}"
116
- )
117
- except Exception as e:
118
- # Catch other potential errors during directory listing
119
- ctx.log_error(f"Error listing default workflows: {e}")
120
-
121
- # Determine which builtin workflows are requested and not already loaded
122
- requested_builtin_workflow_names = [
123
- workflow_name
124
- for workflow_name in available_builtin_workflow_names
125
- if workflow_name in modes and workflow_name not in workflows
126
- ]
127
-
128
- # Add builtin-workflows if requested
129
- if len(requested_builtin_workflow_names) > 0:
130
- for workflow_name in requested_builtin_workflow_names:
131
- workflow_file_path = os.path.join(
132
- builtin_workflow_dir, f"{workflow_name}.md"
133
- )
134
- try:
135
- with open(workflow_file_path, "r") as f:
136
- workflows[workflow_name] = f.read()
137
- except FileNotFoundError:
138
- ctx.log_error(
139
- f"Warning: Builtin workflow file not found: {workflow_file_path}"
140
- )
141
- except Exception as e:
142
- ctx.log_error(f"Error reading builtin workflow {workflow_name}: {e}")
143
-
144
- return "\n".join(
145
- [
146
- make_prompt_section(header.capitalize(), content)
147
- for header, content in workflows.items()
148
- if header.lower() in modes
149
- ]
150
- )
151
-
152
-
153
- def get_system_and_user_prompt(
154
- ctx: AnyContext,
155
- user_message: str,
156
- persona_attr: StrAttr | None = None,
157
- render_persona: bool = False,
158
- system_prompt_attr: StrAttr | None = None,
159
- render_system_prompt: bool = False,
160
- special_instruction_prompt_attr: StrAttr | None = None,
161
- render_special_instruction_prompt: bool = False,
162
- modes_attr: StrListAttr | None = None,
163
- render_modes: bool = False,
164
- conversation_history: ConversationHistory | None = None,
165
- ) -> tuple[str, str]:
166
- """Combines persona, base system prompt, and special instructions."""
167
- persona = get_persona(ctx, persona_attr, render_persona)
168
- base_system_prompt = get_base_system_prompt(
169
- ctx, system_prompt_attr, render_system_prompt
170
- )
171
- special_instruction_prompt = get_special_instruction_prompt(
172
- ctx, special_instruction_prompt_attr, render_special_instruction_prompt
173
- )
174
- workflow_prompt = get_workflow_prompt(ctx, modes_attr, render_modes)
175
- if conversation_history is None:
176
- conversation_history = ConversationHistory()
177
- conversation_context, new_user_message = extract_conversation_context(user_message)
178
- new_system_prompt = "\n".join(
179
- [
180
- make_prompt_section("Persona", persona),
181
- make_prompt_section("System Prompt", base_system_prompt),
182
- make_prompt_section("Special Instruction", special_instruction_prompt),
183
- make_prompt_section("Special Workflows", workflow_prompt),
184
- make_prompt_section(
185
- "Past Conversation",
186
- "\n".join(
187
- [
188
- make_prompt_section(
189
- "Summary",
190
- conversation_history.past_conversation_summary,
191
- as_code=True,
192
- ),
193
- make_prompt_section(
194
- "Last Transcript",
195
- conversation_history.past_conversation_transcript,
196
- as_code=True,
197
- ),
198
- ]
199
- ),
200
- ),
201
- make_prompt_section(
202
- "Notes",
203
- "\n".join(
204
- [
205
- make_prompt_section(
206
- "Long Term",
207
- conversation_history.long_term_note,
208
- as_code=True,
209
- ),
210
- make_prompt_section(
211
- "Contextual",
212
- conversation_history.contextual_note,
213
- as_code=True,
214
- ),
215
- ]
216
- ),
217
- ),
218
- make_prompt_section("Conversation Context", conversation_context),
219
- ]
220
- )
221
- return new_system_prompt, new_user_message
222
-
223
-
224
- def extract_conversation_context(user_message: str) -> tuple[str, str]:
225
- modified_user_message = user_message
226
- # Match “@” + any non-space/comma sequence that contains at least one “/”
227
- pattern = r"(?<!\w)@(?=[^,\s]*\/)([^,\?\!\s]+)"
228
- potential_resource_path = re.findall(pattern, user_message)
229
- apendixes = []
230
- for i, ref in enumerate(potential_resource_path):
231
- resource_path = os.path.abspath(os.path.expanduser(ref))
232
- content = ""
233
- ref_type = ""
234
- if os.path.isfile(resource_path):
235
- content = read_file_with_line_numbers(resource_path)
236
- ref_type = "file"
237
- elif os.path.isdir(resource_path):
238
- content = read_dir(resource_path)
239
- ref_type = "directory"
240
- if content != "":
241
- # Replace the @-reference in the user message with the placeholder
242
- placeholder = f"[Reference {i+1}: {os.path.basename(ref)}]"
243
- modified_user_message = modified_user_message.replace(
244
- f"@{ref}", placeholder, 1
245
- )
246
- apendixes.append(
247
- make_prompt_section(
248
- f"{placeholder} ({ref_type} path: `{resource_path}`)",
249
- content,
250
- as_code=True,
251
- )
252
- )
253
- conversation_context = "\n".join(
254
- [
255
- make_prompt_section("Current OS", platform.system()),
256
- make_prompt_section("OS Version", platform.version()),
257
- make_prompt_section("Python Version", platform.python_version()),
258
- ]
259
- )
260
- iso_date = datetime.now(timezone.utc).astimezone().isoformat()
261
- current_directory = os.getcwd()
262
- modified_user_message = "\n".join(
263
- [
264
- make_prompt_section("User Message", modified_user_message),
265
- make_prompt_section(
266
- "Context",
267
- "\n".join(
268
- [
269
- make_prompt_section(
270
- "Current working directory", current_directory
271
- ),
272
- make_prompt_section("Current time", iso_date),
273
- make_prompt_section("Apendixes", "\n".join(apendixes)),
274
- ]
275
- ),
276
- ),
277
- ]
278
- )
279
- return conversation_context, modified_user_message
280
-
281
-
282
- def get_user_message(
283
- ctx: AnyContext,
284
- message_attr: StrAttr | None,
285
- render_user_message: bool,
286
- ) -> str:
287
- """Gets the user message, rendering and providing a default."""
288
- return get_str_attr(
289
- ctx, message_attr, "How are you?", auto_render=render_user_message
290
- )
291
-
292
-
293
- def get_summarization_system_prompt(
294
- ctx: AnyContext,
295
- summarization_prompt_attr: StrAttr | None,
296
- render_summarization_prompt: bool,
297
- ) -> str:
298
- """Gets the summarization prompt, rendering if configured and handling defaults."""
299
- summarization_prompt = get_attr(
300
- ctx,
301
- summarization_prompt_attr,
302
- None,
303
- auto_render=render_summarization_prompt,
304
- )
305
- if summarization_prompt is not None:
306
- return summarization_prompt
307
- return llm_config.default_summarization_prompt
308
-
309
-
310
- def get_attachments(
311
- ctx: AnyContext,
312
- attachment: "UserContent | list[UserContent] | Callable[[AnySharedContext], UserContent | list[UserContent]] | None" = None, # noqa
313
- ) -> "list[UserContent]":
314
- if attachment is None:
315
- return []
316
- if callable(attachment):
317
- result = attachment(ctx)
318
- if result is None:
319
- return []
320
- if isinstance(result, list):
321
- return result
322
- return [result]
323
- if isinstance(attachment, list):
324
- return attachment
325
- return [attachment]
@@ -1,220 +0,0 @@
1
- import functools
2
- import inspect
3
- import traceback
4
- import typing
5
- from collections.abc import Callable
6
- from typing import TYPE_CHECKING, Any
7
-
8
- from zrb.context.any_context import AnyContext
9
- from zrb.task.llm.error import ToolExecutionError
10
- from zrb.util.callable import get_callable_name
11
- from zrb.util.cli.style import (
12
- stylize_blue,
13
- stylize_error,
14
- stylize_green,
15
- stylize_yellow,
16
- )
17
- from zrb.util.run import run_async
18
- from zrb.util.string.conversion import to_boolean
19
-
20
- if TYPE_CHECKING:
21
- from pydantic_ai import Tool
22
-
23
-
24
- def wrap_tool(func: Callable, ctx: AnyContext, is_yolo_mode: bool) -> "Tool":
25
- """Wraps a tool function to handle exceptions and context propagation."""
26
- from pydantic_ai import RunContext, Tool
27
-
28
- original_sig = inspect.signature(func)
29
- needs_run_context_for_pydantic = _has_context_parameter(original_sig, RunContext)
30
- wrapper = wrap_func(func, ctx, is_yolo_mode)
31
- return Tool(wrapper, takes_ctx=needs_run_context_for_pydantic)
32
-
33
-
34
- def wrap_func(func: Callable, ctx: AnyContext, is_yolo_mode: bool) -> Callable:
35
- original_sig = inspect.signature(func)
36
- needs_any_context_for_injection = _has_context_parameter(original_sig, AnyContext)
37
- takes_no_args = len(original_sig.parameters) == 0
38
- # Pass individual flags to the wrapper creator
39
- wrapper = _create_wrapper(
40
- func, original_sig, ctx, needs_any_context_for_injection, is_yolo_mode
41
- )
42
- _adjust_signature(wrapper, original_sig, takes_no_args)
43
- return wrapper
44
-
45
-
46
- def _has_context_parameter(original_sig: inspect.Signature, context_type: type) -> bool:
47
- """
48
- Checks if the function signature includes a parameter with the specified
49
- context type annotation.
50
- """
51
- return any(
52
- _is_annotated_with_context(param.annotation, context_type)
53
- for param in original_sig.parameters.values()
54
- )
55
-
56
-
57
- def _is_annotated_with_context(param_annotation, context_type):
58
- """
59
- Checks if the parameter annotation is the specified context type
60
- or a generic type containing it (e.g., Optional[ContextType]).
61
- """
62
- if param_annotation is inspect.Parameter.empty:
63
- return False
64
- if param_annotation is context_type:
65
- return True
66
- # Check for generic types like Optional[ContextType] or Union[ContextType, ...]
67
- origin = typing.get_origin(param_annotation)
68
- args = typing.get_args(param_annotation)
69
- if origin is not None and args:
70
- # Check if context_type is one of the arguments of the generic type
71
- return any(arg is context_type for arg in args)
72
- return False
73
-
74
-
75
- def _create_wrapper(
76
- func: Callable,
77
- original_sig: inspect.Signature,
78
- ctx: AnyContext,
79
- needs_any_context_for_injection: bool,
80
- is_yolo_mode: bool,
81
- ) -> Callable:
82
- """Creates the core wrapper function."""
83
-
84
- @functools.wraps(func)
85
- async def wrapper(*args, **kwargs):
86
- # Identify AnyContext parameter name from the original signature if needed
87
- any_context_param_name = None
88
- if needs_any_context_for_injection:
89
- for param in original_sig.parameters.values():
90
- if _is_annotated_with_context(param.annotation, AnyContext):
91
- any_context_param_name = param.name
92
- break # Found it, no need to continue
93
- if any_context_param_name is None:
94
- # This should not happen if needs_any_context_for_injection is True,
95
- # but check for safety
96
- raise ValueError(
97
- "AnyContext parameter name not found in function signature."
98
- )
99
- # Inject the captured ctx into kwargs. This will overwrite if the LLM
100
- # somehow provided it.
101
- kwargs[any_context_param_name] = ctx
102
- # If the dummy argument was added for schema generation and is present in kwargs,
103
- # remove it before calling the original function, unless the original function
104
- # actually expects a parameter named '_dummy'.
105
- if "_dummy" in kwargs and "_dummy" not in original_sig.parameters:
106
- del kwargs["_dummy"]
107
- try:
108
- if not is_yolo_mode and not ctx.is_web_mode and ctx.is_tty:
109
- approval, reason = await _ask_for_approval(ctx, func, args, kwargs)
110
- if not approval:
111
- raise ValueError(f"User disapproving: {reason}")
112
- return await run_async(func(*args, **kwargs))
113
- except Exception as e:
114
- error_model = ToolExecutionError(
115
- tool_name=func.__name__,
116
- error_type=type(e).__name__,
117
- message=str(e),
118
- details=traceback.format_exc(),
119
- )
120
- return error_model.model_dump_json()
121
-
122
- return wrapper
123
-
124
-
125
- async def _ask_for_approval(
126
- ctx: AnyContext, func: Callable, args: list[Any], kwargs: dict[str, Any]
127
- ) -> tuple[bool, str]:
128
- func_name = get_callable_name(func)
129
- normalized_args = [stylize_green(_truncate_arg(arg)) for arg in args]
130
- normalized_kwargs = []
131
- for key, val in kwargs.items():
132
- truncated_val = _truncate_arg(f"{val}")
133
- normalized_kwargs.append(
134
- f"{stylize_yellow(key)}={stylize_green(truncated_val)}"
135
- )
136
- func_param_str = ",".join(normalized_args + normalized_kwargs)
137
- func_call_str = (
138
- f"{stylize_blue(func_name + '(')}{func_param_str}{stylize_blue(')')}"
139
- )
140
- while True:
141
- ctx.print(
142
- f"\n✅ >> Allow to run tool: {func_call_str} (Yes | No, <reason>) ",
143
- plain=True,
144
- )
145
- user_input = await _read_line()
146
- ctx.print("", plain=True)
147
- user_responses = [val.strip() for val in user_input.split(",", maxsplit=1)]
148
- while len(user_responses) < 2:
149
- user_responses.append("")
150
- approval_str, reason = user_responses
151
- try:
152
- approved = True if approval_str.strip() == "" else to_boolean(approval_str)
153
- if not approved and reason == "":
154
- ctx.print(
155
- stylize_error(
156
- f"You must specify rejection reason (i.e., No, <why>) for {func_call_str}" # noqa
157
- ),
158
- plain=True,
159
- )
160
- continue
161
- return approved, reason
162
- except Exception:
163
- ctx.print(
164
- stylize_error(
165
- f"Invalid approval value for {func_call_str}: {approval_str}"
166
- ),
167
- plain=True,
168
- )
169
- continue
170
-
171
-
172
- def _truncate_arg(arg: str, length: int = 19) -> str:
173
- if len(arg) > length:
174
- return f"{arg[:length-4]} ..."
175
- return arg
176
-
177
-
178
- async def _read_line():
179
- from prompt_toolkit import PromptSession
180
-
181
- reader = PromptSession()
182
- return await reader.prompt_async()
183
-
184
-
185
- def _adjust_signature(
186
- wrapper: Callable, original_sig: inspect.Signature, takes_no_args: bool
187
- ):
188
- """Adjusts the wrapper function's signature for schema generation."""
189
- # The wrapper's signature should represent the arguments the *LLM* needs to provide.
190
- # The LLM does not provide RunContext (pydantic-ai injects it) or AnyContext
191
- # (we inject it). So, the wrapper's signature should be the original signature,
192
- # minus any parameters annotated with RunContext or AnyContext.
193
-
194
- from pydantic_ai import RunContext
195
-
196
- params_for_schema = [
197
- param
198
- for param in original_sig.parameters.values()
199
- if not _is_annotated_with_context(param.annotation, RunContext)
200
- and not _is_annotated_with_context(param.annotation, AnyContext)
201
- ]
202
-
203
- # If after removing context parameters, there are no parameters left,
204
- # and the original function took no args, keep the dummy.
205
- # If after removing context parameters, there are no parameters left,
206
- # but the original function *did* take args (only context), then the schema
207
- # should have no parameters.
208
- if not params_for_schema and takes_no_args:
209
- # Keep the dummy if the original function truly had no parameters
210
- new_sig = inspect.Signature(
211
- parameters=[
212
- inspect.Parameter(
213
- "_dummy", inspect.Parameter.POSITIONAL_OR_KEYWORD, default=None
214
- )
215
- ]
216
- )
217
- else:
218
- new_sig = inspect.Signature(parameters=params_for_schema)
219
-
220
- wrapper.__signature__ = new_sig
zrb/task/llm/typing.py DELETED
@@ -1,3 +0,0 @@
1
- from typing import Any
2
-
3
- ListOfDict = list[dict[str, Any]]