zrb 1.21.29__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of zrb might be problematic. Click here for more details.

Files changed (192) hide show
  1. zrb/__init__.py +118 -129
  2. zrb/builtin/__init__.py +54 -2
  3. zrb/builtin/llm/chat.py +147 -0
  4. zrb/callback/callback.py +8 -1
  5. zrb/cmd/cmd_result.py +2 -1
  6. zrb/config/config.py +491 -280
  7. zrb/config/helper.py +84 -0
  8. zrb/config/web_auth_config.py +50 -35
  9. zrb/context/any_shared_context.py +13 -2
  10. zrb/context/context.py +31 -3
  11. zrb/context/print_fn.py +13 -0
  12. zrb/context/shared_context.py +14 -1
  13. zrb/input/option_input.py +30 -2
  14. zrb/llm/agent/__init__.py +9 -0
  15. zrb/llm/agent/agent.py +215 -0
  16. zrb/llm/agent/summarizer.py +20 -0
  17. zrb/llm/app/__init__.py +10 -0
  18. zrb/llm/app/completion.py +281 -0
  19. zrb/llm/app/confirmation/allow_tool.py +66 -0
  20. zrb/llm/app/confirmation/handler.py +178 -0
  21. zrb/llm/app/confirmation/replace_confirmation.py +77 -0
  22. zrb/llm/app/keybinding.py +34 -0
  23. zrb/llm/app/layout.py +117 -0
  24. zrb/llm/app/lexer.py +155 -0
  25. zrb/llm/app/redirection.py +28 -0
  26. zrb/llm/app/style.py +16 -0
  27. zrb/llm/app/ui.py +733 -0
  28. zrb/llm/config/__init__.py +4 -0
  29. zrb/llm/config/config.py +122 -0
  30. zrb/llm/config/limiter.py +247 -0
  31. zrb/llm/history_manager/__init__.py +4 -0
  32. zrb/llm/history_manager/any_history_manager.py +23 -0
  33. zrb/llm/history_manager/file_history_manager.py +91 -0
  34. zrb/llm/history_processor/summarizer.py +108 -0
  35. zrb/llm/note/__init__.py +3 -0
  36. zrb/llm/note/manager.py +122 -0
  37. zrb/llm/prompt/__init__.py +29 -0
  38. zrb/llm/prompt/claude_compatibility.py +92 -0
  39. zrb/llm/prompt/compose.py +55 -0
  40. zrb/llm/prompt/default.py +51 -0
  41. zrb/llm/prompt/markdown/mandate.md +23 -0
  42. zrb/llm/prompt/markdown/persona.md +3 -0
  43. zrb/llm/prompt/markdown/summarizer.md +21 -0
  44. zrb/llm/prompt/note.py +41 -0
  45. zrb/llm/prompt/system_context.py +46 -0
  46. zrb/llm/prompt/zrb.py +41 -0
  47. zrb/llm/skill/__init__.py +3 -0
  48. zrb/llm/skill/manager.py +86 -0
  49. zrb/llm/task/__init__.py +4 -0
  50. zrb/llm/task/llm_chat_task.py +316 -0
  51. zrb/llm/task/llm_task.py +245 -0
  52. zrb/llm/tool/__init__.py +39 -0
  53. zrb/llm/tool/bash.py +75 -0
  54. zrb/llm/tool/code.py +266 -0
  55. zrb/llm/tool/file.py +419 -0
  56. zrb/llm/tool/note.py +70 -0
  57. zrb/{builtin/llm → llm}/tool/rag.py +8 -5
  58. zrb/llm/tool/search/brave.py +53 -0
  59. zrb/llm/tool/search/searxng.py +47 -0
  60. zrb/llm/tool/search/serpapi.py +47 -0
  61. zrb/llm/tool/skill.py +19 -0
  62. zrb/llm/tool/sub_agent.py +70 -0
  63. zrb/llm/tool/web.py +97 -0
  64. zrb/llm/tool/zrb_task.py +66 -0
  65. zrb/llm/util/attachment.py +101 -0
  66. zrb/llm/util/prompt.py +104 -0
  67. zrb/llm/util/stream_response.py +178 -0
  68. zrb/session/any_session.py +0 -3
  69. zrb/session/session.py +1 -1
  70. zrb/task/base/context.py +25 -13
  71. zrb/task/base/execution.py +52 -47
  72. zrb/task/base/lifecycle.py +7 -4
  73. zrb/task/base_task.py +48 -49
  74. zrb/task/base_trigger.py +4 -1
  75. zrb/task/cmd_task.py +6 -0
  76. zrb/task/http_check.py +11 -5
  77. zrb/task/make_task.py +3 -0
  78. zrb/task/rsync_task.py +5 -0
  79. zrb/task/scaffolder.py +7 -4
  80. zrb/task/scheduler.py +3 -0
  81. zrb/task/tcp_check.py +6 -4
  82. zrb/util/ascii_art/art/bee.txt +17 -0
  83. zrb/util/ascii_art/art/cat.txt +9 -0
  84. zrb/util/ascii_art/art/ghost.txt +16 -0
  85. zrb/util/ascii_art/art/panda.txt +17 -0
  86. zrb/util/ascii_art/art/rose.txt +14 -0
  87. zrb/util/ascii_art/art/unicorn.txt +15 -0
  88. zrb/util/ascii_art/banner.py +92 -0
  89. zrb/util/cli/markdown.py +22 -2
  90. zrb/util/cmd/command.py +33 -10
  91. zrb/util/file.py +51 -32
  92. zrb/util/match.py +78 -0
  93. zrb/util/run.py +3 -3
  94. {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/METADATA +9 -15
  95. {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/RECORD +100 -128
  96. zrb/attr/__init__.py +0 -0
  97. zrb/builtin/llm/attachment.py +0 -40
  98. zrb/builtin/llm/chat_completion.py +0 -274
  99. zrb/builtin/llm/chat_session.py +0 -270
  100. zrb/builtin/llm/chat_session_cmd.py +0 -288
  101. zrb/builtin/llm/chat_trigger.py +0 -79
  102. zrb/builtin/llm/history.py +0 -71
  103. zrb/builtin/llm/input.py +0 -27
  104. zrb/builtin/llm/llm_ask.py +0 -269
  105. zrb/builtin/llm/previous-session.js +0 -21
  106. zrb/builtin/llm/tool/__init__.py +0 -0
  107. zrb/builtin/llm/tool/api.py +0 -75
  108. zrb/builtin/llm/tool/cli.py +0 -52
  109. zrb/builtin/llm/tool/code.py +0 -236
  110. zrb/builtin/llm/tool/file.py +0 -560
  111. zrb/builtin/llm/tool/note.py +0 -84
  112. zrb/builtin/llm/tool/sub_agent.py +0 -150
  113. zrb/builtin/llm/tool/web.py +0 -171
  114. zrb/builtin/project/__init__.py +0 -0
  115. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/__init__.py +0 -0
  116. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/_zrb/module/template/app_template/module/my_module/service/__init__.py +0 -0
  117. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/common/__init__.py +0 -0
  118. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/__init__.py +0 -0
  119. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/__init__.py +0 -0
  120. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/permission/__init__.py +0 -0
  121. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/role/__init__.py +0 -0
  122. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/module/auth/service/user/__init__.py +0 -0
  123. zrb/builtin/project/add/fastapp/fastapp_template/my_app_name/schema/__init__.py +0 -0
  124. zrb/builtin/project/create/__init__.py +0 -0
  125. zrb/builtin/shell/__init__.py +0 -0
  126. zrb/builtin/shell/autocomplete/__init__.py +0 -0
  127. zrb/callback/__init__.py +0 -0
  128. zrb/cmd/__init__.py +0 -0
  129. zrb/config/default_prompt/interactive_system_prompt.md +0 -29
  130. zrb/config/default_prompt/persona.md +0 -1
  131. zrb/config/default_prompt/summarization_prompt.md +0 -57
  132. zrb/config/default_prompt/system_prompt.md +0 -38
  133. zrb/config/llm_config.py +0 -339
  134. zrb/config/llm_context/config.py +0 -166
  135. zrb/config/llm_context/config_parser.py +0 -40
  136. zrb/config/llm_context/workflow.py +0 -81
  137. zrb/config/llm_rate_limitter.py +0 -190
  138. zrb/content_transformer/__init__.py +0 -0
  139. zrb/context/__init__.py +0 -0
  140. zrb/dot_dict/__init__.py +0 -0
  141. zrb/env/__init__.py +0 -0
  142. zrb/group/__init__.py +0 -0
  143. zrb/input/__init__.py +0 -0
  144. zrb/runner/__init__.py +0 -0
  145. zrb/runner/web_route/__init__.py +0 -0
  146. zrb/runner/web_route/home_page/__init__.py +0 -0
  147. zrb/session/__init__.py +0 -0
  148. zrb/session_state_log/__init__.py +0 -0
  149. zrb/session_state_logger/__init__.py +0 -0
  150. zrb/task/__init__.py +0 -0
  151. zrb/task/base/__init__.py +0 -0
  152. zrb/task/llm/__init__.py +0 -0
  153. zrb/task/llm/agent.py +0 -204
  154. zrb/task/llm/agent_runner.py +0 -152
  155. zrb/task/llm/config.py +0 -122
  156. zrb/task/llm/conversation_history.py +0 -209
  157. zrb/task/llm/conversation_history_model.py +0 -67
  158. zrb/task/llm/default_workflow/coding/workflow.md +0 -41
  159. zrb/task/llm/default_workflow/copywriting/workflow.md +0 -68
  160. zrb/task/llm/default_workflow/git/workflow.md +0 -118
  161. zrb/task/llm/default_workflow/golang/workflow.md +0 -128
  162. zrb/task/llm/default_workflow/html-css/workflow.md +0 -135
  163. zrb/task/llm/default_workflow/java/workflow.md +0 -146
  164. zrb/task/llm/default_workflow/javascript/workflow.md +0 -158
  165. zrb/task/llm/default_workflow/python/workflow.md +0 -160
  166. zrb/task/llm/default_workflow/researching/workflow.md +0 -153
  167. zrb/task/llm/default_workflow/rust/workflow.md +0 -162
  168. zrb/task/llm/default_workflow/shell/workflow.md +0 -299
  169. zrb/task/llm/error.py +0 -95
  170. zrb/task/llm/file_replacement.py +0 -206
  171. zrb/task/llm/file_tool_model.py +0 -57
  172. zrb/task/llm/history_processor.py +0 -206
  173. zrb/task/llm/history_summarization.py +0 -25
  174. zrb/task/llm/print_node.py +0 -221
  175. zrb/task/llm/prompt.py +0 -321
  176. zrb/task/llm/subagent_conversation_history.py +0 -41
  177. zrb/task/llm/tool_wrapper.py +0 -361
  178. zrb/task/llm/typing.py +0 -3
  179. zrb/task/llm/workflow.py +0 -76
  180. zrb/task/llm_task.py +0 -379
  181. zrb/task_status/__init__.py +0 -0
  182. zrb/util/__init__.py +0 -0
  183. zrb/util/cli/__init__.py +0 -0
  184. zrb/util/cmd/__init__.py +0 -0
  185. zrb/util/codemod/__init__.py +0 -0
  186. zrb/util/string/__init__.py +0 -0
  187. zrb/xcom/__init__.py +0 -0
  188. /zrb/{config/default_prompt/file_extractor_system_prompt.md → llm/prompt/markdown/file_extractor.md} +0 -0
  189. /zrb/{config/default_prompt/repo_extractor_system_prompt.md → llm/prompt/markdown/repo_extractor.md} +0 -0
  190. /zrb/{config/default_prompt/repo_summarizer_system_prompt.md → llm/prompt/markdown/repo_summarizer.md} +0 -0
  191. {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/WHEEL +0 -0
  192. {zrb-1.21.29.dist-info → zrb-2.0.0a4.dist-info}/entry_points.txt +0 -0
@@ -1,206 +0,0 @@
1
- import json
2
- import sys
3
- import traceback
4
- from typing import TYPE_CHECKING, Any, Callable, Coroutine
5
-
6
- from zrb.config.llm_config import llm_config
7
- from zrb.config.llm_rate_limitter import LLMRateLimitter
8
- from zrb.config.llm_rate_limitter import llm_rate_limitter as default_llm_rate_limitter
9
- from zrb.context.any_context import AnyContext
10
- from zrb.task.llm.agent_runner import run_agent_iteration
11
- from zrb.util.cli.style import stylize_faint
12
- from zrb.util.markdown import make_markdown_section
13
-
14
- if sys.version_info >= (3, 12):
15
- from typing import TypedDict
16
- else:
17
- from typing_extensions import TypedDict
18
-
19
-
20
- if TYPE_CHECKING:
21
- from pydantic_ai import ModelMessage
22
- from pydantic_ai.models import Model
23
- from pydantic_ai.settings import ModelSettings
24
-
25
-
26
- class SingleMessage(TypedDict):
27
- """
28
- SingleConversation
29
-
30
- Attributes:
31
- role: Either AI, User, Tool Call, or Tool Result
32
- time: yyyy-mm-ddTHH:MM:SSZ:
33
- content: The content of the message (summarize if too long)
34
- """
35
-
36
- role: str
37
- time: str
38
- content: str
39
-
40
-
41
- class ConversationSummary(TypedDict):
42
- """
43
- Conversation history
44
-
45
- Attributes:
46
- transcript: Several last transcript of the conversation
47
- summary: Descriptive conversation summary
48
- """
49
-
50
- transcript: list[SingleMessage]
51
- summary: str
52
-
53
-
54
- def save_conversation_summary(conversation_summary: ConversationSummary):
55
- """
56
- Write conversation summary for main assistant to continue conversation.
57
- """
58
- return conversation_summary
59
-
60
-
61
- def create_summarize_history_processor(
62
- ctx: AnyContext,
63
- system_prompt: str,
64
- rate_limitter: LLMRateLimitter | None = None,
65
- summarization_model: "Model | str | None" = None,
66
- summarization_model_settings: "ModelSettings | None" = None,
67
- summarization_system_prompt: str | None = None,
68
- summarization_token_threshold: int | None = None,
69
- summarization_retries: int = 2,
70
- ) -> Callable[[list["ModelMessage"]], Coroutine[None, None, list["ModelMessage"]]]:
71
- from pydantic_ai import Agent, ModelMessage, ModelRequest
72
- from pydantic_ai.messages import ModelMessagesTypeAdapter, UserPromptPart
73
-
74
- if rate_limitter is None:
75
- rate_limitter = default_llm_rate_limitter
76
- if summarization_model is None:
77
- summarization_model = llm_config.default_small_model
78
- if summarization_model_settings is None:
79
- summarization_model_settings = llm_config.default_small_model_settings
80
- if summarization_system_prompt is None:
81
- summarization_system_prompt = llm_config.default_summarization_prompt
82
- if summarization_token_threshold is None:
83
- summarization_token_threshold = (
84
- llm_config.default_history_summarization_token_threshold
85
- )
86
-
87
- async def maybe_summarize_history(
88
- messages: list[ModelMessage],
89
- ) -> list[ModelMessage]:
90
- history_list = json.loads(ModelMessagesTypeAdapter.dump_json(messages))
91
- history_json_str = json.dumps(history_list)
92
- # Estimate token usage
93
- # Note: Pydantic ai has run context parameter
94
- # (https://ai.pydantic.dev/message-history/#runcontext-parameter)
95
- # But we cannot use run_ctx.usage.total_tokens because total token keep increasing
96
- # even after summariztion.
97
- estimated_token_usage = rate_limitter.count_token(history_json_str)
98
- _print_request_info(
99
- ctx, estimated_token_usage, summarization_token_threshold, messages
100
- )
101
- if estimated_token_usage < summarization_token_threshold or len(messages) == 1:
102
- return messages
103
- history_list_without_instruction = [
104
- {
105
- key: obj[key]
106
- for key in obj
107
- if index == len(history_list) - 1 or key != "instructions"
108
- }
109
- for index, obj in enumerate(history_list)
110
- ]
111
- history_json_str_without_instruction = json.dumps(
112
- history_list_without_instruction
113
- )
114
- summarization_message = f"Summarize the following conversation: {history_json_str_without_instruction}"
115
- summarization_agent = Agent[None, ConversationSummary](
116
- model=summarization_model,
117
- output_type=save_conversation_summary,
118
- instructions=summarization_system_prompt,
119
- model_settings=summarization_model_settings,
120
- retries=summarization_retries,
121
- )
122
- try:
123
- _print_info(ctx, "📝 Rollup Conversation", 2)
124
- summary_run = await run_agent_iteration(
125
- ctx=ctx,
126
- agent=summarization_agent,
127
- user_prompt=summarization_message,
128
- attachments=[],
129
- history_list=[],
130
- rate_limitter=rate_limitter,
131
- log_indent_level=2,
132
- )
133
- if summary_run and summary_run.result and summary_run.result.output:
134
- usage = summary_run.result.usage()
135
- _print_info(ctx, f"📝 Rollup Conversation Token: {usage}", 2)
136
- ctx.print(plain=True)
137
- ctx.log_info("History summarized and updated.")
138
- condensed_message = make_markdown_section(
139
- header="Past Conversation",
140
- content="\n".join(
141
- [
142
- make_markdown_section(
143
- "Summary", _extract_summary(summary_run.result.output)
144
- ),
145
- make_markdown_section(
146
- "Past Trancript",
147
- _extract_transcript(summary_run.result.output),
148
- ),
149
- ]
150
- ),
151
- )
152
- return [
153
- ModelRequest(
154
- instructions=system_prompt,
155
- parts=[UserPromptPart(condensed_message)],
156
- )
157
- ]
158
- ctx.log_warning("History summarization failed or returned no data.")
159
- except BaseException as e:
160
- ctx.log_warning(f"Error during history summarization: {e}")
161
- traceback.print_exc()
162
- return messages
163
-
164
- return maybe_summarize_history
165
-
166
-
167
- def _print_request_info(
168
- ctx: AnyContext,
169
- estimated_token_usage: int,
170
- summarization_token_threshold: int,
171
- messages: list["ModelMessage"],
172
- ):
173
- _print_info(ctx, f"Current request token (estimated): {estimated_token_usage}")
174
- _print_info(ctx, f"Summarization token threshold: {summarization_token_threshold}")
175
- _print_info(ctx, f"History length: {len(messages)}")
176
-
177
-
178
- def _print_info(ctx: AnyContext, text: str, log_indent_level: int = 0):
179
- log_prefix = (2 * (log_indent_level + 1)) * " "
180
- ctx.print(stylize_faint(f"{log_prefix}{text}"), plain=True)
181
-
182
-
183
- def _extract_summary(summary_result_output: dict[str, Any] | str) -> str:
184
- summary = (
185
- summary_result_output.get("summary", "")
186
- if isinstance(summary_result_output, dict)
187
- else ""
188
- )
189
- return summary
190
-
191
-
192
- def _extract_transcript(summary_result_output: dict[str, Any] | str) -> str:
193
- transcript_list = (
194
- summary_result_output.get("transcript", [])
195
- if isinstance(summary_result_output, dict)
196
- else []
197
- )
198
- transcript_list = [] if not isinstance(transcript_list, list) else transcript_list
199
- return "\n".join(_format_transcript_message(message) for message in transcript_list)
200
-
201
-
202
- def _format_transcript_message(message: dict[str, str]) -> str:
203
- role = message.get("role", "Message")
204
- time = message.get("time", "<unknown>")
205
- content = message.get("content", "<empty>")
206
- return f"{role} ({time}): {content}"
@@ -1,25 +0,0 @@
1
- from zrb.attr.type import IntAttr
2
- from zrb.config.llm_config import llm_config
3
- from zrb.context.any_context import AnyContext
4
- from zrb.util.attr import get_int_attr
5
-
6
-
7
- def get_history_summarization_token_threshold(
8
- ctx: AnyContext,
9
- history_summarization_token_threshold_attr: IntAttr | None,
10
- render_history_summarization_token_threshold: bool,
11
- ) -> int:
12
- """Gets the history summarization token threshold, handling defaults and errors."""
13
- try:
14
- return get_int_attr(
15
- ctx,
16
- history_summarization_token_threshold_attr,
17
- llm_config.default_history_summarization_token_threshold,
18
- auto_render=render_history_summarization_token_threshold,
19
- )
20
- except ValueError as e:
21
- ctx.log_warning(
22
- f"Could not convert history_summarization_token_threshold to int: {e}. "
23
- "Defaulting to -1 (no threshold)."
24
- )
25
- return -1
@@ -1,221 +0,0 @@
1
- import json
2
- from collections.abc import Callable
3
- from typing import Any
4
-
5
- from zrb.config.config import CFG
6
- from zrb.util.cli.style import stylize_faint
7
-
8
-
9
- async def print_node(
10
- print_func: Callable, agent_run: Any, node: Any, log_indent_level: int = 0
11
- ):
12
- """Prints the details of an agent execution node using a provided print function."""
13
- from pydantic_ai import Agent
14
- from pydantic_ai.exceptions import UnexpectedModelBehavior
15
- from pydantic_ai.messages import (
16
- FinalResultEvent,
17
- FunctionToolCallEvent,
18
- FunctionToolResultEvent,
19
- PartDeltaEvent,
20
- PartStartEvent,
21
- TextPartDelta,
22
- ThinkingPartDelta,
23
- ToolCallPartDelta,
24
- )
25
-
26
- meta = getattr(node, "id", None) or getattr(node, "request_id", None)
27
- if Agent.is_user_prompt_node(node):
28
- print_func(_format_header("🔠 Receiving input...", log_indent_level))
29
- elif Agent.is_model_request_node(node):
30
- # A model request node => We can stream tokens from the model's request
31
- print_func(_format_header("🧠 Processing...", log_indent_level))
32
- # Reference: https://ai.pydantic.dev/agents/#streaming-all-events-and-output
33
- try:
34
- async with node.stream(agent_run.ctx) as request_stream:
35
- is_streaming = False
36
- async for event in request_stream:
37
- if isinstance(event, PartStartEvent) and event.part:
38
- if is_streaming:
39
- print_func("")
40
- content = _get_event_part_content(event)
41
- print_func(_format_content(content, log_indent_level), end="")
42
- is_streaming = True
43
- elif isinstance(event, PartDeltaEvent):
44
- if isinstance(event.delta, TextPartDelta):
45
- content_delta = event.delta.content_delta
46
- print_func(
47
- _format_stream_content(content_delta, log_indent_level),
48
- end="",
49
- )
50
- elif isinstance(event.delta, ThinkingPartDelta):
51
- content_delta = event.delta.content_delta
52
- print_func(
53
- _format_stream_content(content_delta, log_indent_level),
54
- end="",
55
- )
56
- elif isinstance(event.delta, ToolCallPartDelta):
57
- args_delta = event.delta.args_delta
58
- if isinstance(args_delta, dict):
59
- args_delta = json.dumps(args_delta)
60
- print_func(
61
- _format_stream_content(args_delta, log_indent_level),
62
- end="",
63
- )
64
- is_streaming = True
65
- elif isinstance(event, FinalResultEvent) and event.tool_name:
66
- if is_streaming:
67
- print_func("")
68
- tool_name = event.tool_name
69
- print_func(
70
- _format_content(
71
- f"Result: tool_name={tool_name}", log_indent_level
72
- )
73
- )
74
- is_streaming = False
75
- if is_streaming:
76
- print_func("")
77
- except UnexpectedModelBehavior as e:
78
- print_func("") # ensure newline consistency
79
- print_func(
80
- _format_content(
81
- (
82
- f"🟡 Unexpected Model Behavior: {e}. "
83
- f"Cause: {e.__cause__}. Node.Id: {meta}"
84
- ),
85
- log_indent_level,
86
- )
87
- )
88
- elif Agent.is_call_tools_node(node):
89
- # A handle-response node => The model returned some data, potentially calls a tool
90
- print_func(_format_header("🧰 Calling Tool...", log_indent_level))
91
- try:
92
- async with node.stream(agent_run.ctx) as handle_stream:
93
- async for event in handle_stream:
94
- if isinstance(event, FunctionToolCallEvent):
95
- args = _get_event_part_args(event)
96
- call_id = event.part.tool_call_id
97
- tool_name = event.part.tool_name
98
- print_func(
99
- _format_content(
100
- f"{call_id} | Call {tool_name} {args}", log_indent_level
101
- )
102
- )
103
- elif (
104
- isinstance(event, FunctionToolResultEvent)
105
- and event.tool_call_id
106
- ):
107
- call_id = event.tool_call_id
108
- if CFG.LLM_SHOW_TOOL_CALL_RESULT:
109
- result_content = event.result.content
110
- print_func(
111
- _format_content(
112
- f"{call_id} | Return {result_content}",
113
- log_indent_level,
114
- )
115
- )
116
- else:
117
- print_func(
118
- _format_content(
119
- f"{call_id} | Executed", log_indent_level
120
- )
121
- )
122
- except UnexpectedModelBehavior as e:
123
- print_func("") # ensure newline consistency
124
- print_func(
125
- _format_content(
126
- (
127
- f"🟡 Unexpected Model Behavior: {e}. "
128
- f"Cause: {e.__cause__}. Node.Id: {meta}"
129
- ),
130
- log_indent_level,
131
- )
132
- )
133
- elif Agent.is_end_node(node):
134
- # Once an End node is reached, the agent run is complete
135
- print_func(_format_header("✅ Completed...", log_indent_level))
136
-
137
-
138
- def _format_header(text: str | None, log_indent_level: int = 0) -> str:
139
- return _format(
140
- text,
141
- base_indent=2,
142
- first_indent=0,
143
- indent=0,
144
- log_indent_level=log_indent_level,
145
- )
146
-
147
-
148
- def _format_content(text: str | None, log_indent_level: int = 0) -> str:
149
- return _format(
150
- text,
151
- base_indent=2,
152
- first_indent=3,
153
- indent=3,
154
- log_indent_level=log_indent_level,
155
- )
156
-
157
-
158
- def _format_stream_content(text: str | None, log_indent_level: int = 0) -> str:
159
- return _format(
160
- text,
161
- base_indent=2,
162
- indent=3,
163
- log_indent_level=log_indent_level,
164
- is_stream=True,
165
- )
166
-
167
-
168
- def _format(
169
- text: str | None,
170
- base_indent: int = 0,
171
- first_indent: int = 0,
172
- indent: int = 0,
173
- log_indent_level: int = 0,
174
- is_stream: bool = False,
175
- ) -> str:
176
- if text is None:
177
- text = ""
178
- line_prefix = (base_indent * (log_indent_level + 1) + indent) * " "
179
- processed_text = text.replace("\n", f"\n{line_prefix}")
180
- if is_stream:
181
- return stylize_faint(processed_text)
182
- first_line_prefix = (base_indent * (log_indent_level + 1) + first_indent) * " "
183
- return stylize_faint(f"{first_line_prefix}{processed_text}")
184
-
185
-
186
- def _get_event_part_args(event: Any) -> Any:
187
- # Handle empty arguments across different providers
188
- if event.part.args == "" or event.part.args is None:
189
- return {}
190
- if isinstance(event.part.args, str):
191
- # Some providers might send "null" or "{}" as a string
192
- if event.part.args.strip() in ["null", "{}"]:
193
- return {}
194
- try:
195
- obj = json.loads(event.part.args)
196
- if isinstance(obj, dict):
197
- return _truncate_kwargs(obj)
198
- except json.JSONDecodeError:
199
- pass
200
- # Handle dummy property if present (from our schema sanitization)
201
- if isinstance(event.part.args, dict):
202
- return _truncate_kwargs(event.part.args)
203
- return event.part.args
204
-
205
-
206
- def _truncate_kwargs(kwargs: dict[str, Any]) -> dict[str, Any]:
207
- return {key: _truncate_arg(val) for key, val in kwargs.items()}
208
-
209
-
210
- def _truncate_arg(arg: str, length: int = 19) -> str:
211
- if isinstance(arg, str) and len(arg) > length:
212
- return f"{arg[:length-4]} ..."
213
- return arg
214
-
215
-
216
- def _get_event_part_content(event: Any) -> str:
217
- if not hasattr(event, "part"):
218
- return f"{event}"
219
- if not hasattr(event.part, "content"):
220
- return f"{event.part}"
221
- return getattr(event.part, "content")