fast-agent-mcp 0.3.15__py3-none-any.whl → 0.3.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +2 -0
- fast_agent/agents/agent_types.py +5 -0
- fast_agent/agents/llm_agent.py +7 -0
- fast_agent/agents/llm_decorator.py +6 -0
- fast_agent/agents/mcp_agent.py +134 -10
- fast_agent/cli/__main__.py +35 -0
- fast_agent/cli/commands/check_config.py +85 -0
- fast_agent/cli/commands/go.py +100 -36
- fast_agent/cli/constants.py +15 -1
- fast_agent/cli/main.py +2 -1
- fast_agent/config.py +39 -10
- fast_agent/constants.py +8 -0
- fast_agent/context.py +24 -15
- fast_agent/core/direct_decorators.py +9 -0
- fast_agent/core/fastagent.py +101 -1
- fast_agent/core/logging/listeners.py +8 -0
- fast_agent/interfaces.py +12 -0
- fast_agent/llm/fastagent_llm.py +45 -0
- fast_agent/llm/memory.py +26 -1
- fast_agent/llm/model_database.py +4 -1
- fast_agent/llm/model_factory.py +4 -2
- fast_agent/llm/model_info.py +19 -43
- fast_agent/llm/provider/anthropic/llm_anthropic.py +112 -0
- fast_agent/llm/provider/google/llm_google_native.py +238 -7
- fast_agent/llm/provider/openai/llm_openai.py +382 -19
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/resources/setup/agent.py +2 -0
- fast_agent/resources/setup/fastagent.config.yaml +6 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +208 -0
- fast_agent/tools/shell_runtime.py +404 -0
- fast_agent/ui/console_display.py +47 -996
- fast_agent/ui/elicitation_form.py +76 -24
- fast_agent/ui/elicitation_style.py +2 -2
- fast_agent/ui/enhanced_prompt.py +107 -37
- fast_agent/ui/history_display.py +20 -5
- fast_agent/ui/interactive_prompt.py +108 -3
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +103 -45
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/streaming.py +638 -0
- fast_agent/ui/tool_display.py +417 -0
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/METADATA +8 -7
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/RECORD +47 -39
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,638 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import math
|
|
5
|
+
import time
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Protocol
|
|
7
|
+
|
|
8
|
+
from rich.console import Group
|
|
9
|
+
from rich.live import Live
|
|
10
|
+
from rich.markdown import Markdown
|
|
11
|
+
from rich.text import Text
|
|
12
|
+
|
|
13
|
+
from fast_agent.core.logging.logger import get_logger
|
|
14
|
+
from fast_agent.ui import console
|
|
15
|
+
from fast_agent.ui.markdown_helpers import prepare_markdown_content
|
|
16
|
+
from fast_agent.ui.markdown_truncator import MarkdownTruncator
|
|
17
|
+
from fast_agent.ui.plain_text_truncator import PlainTextTruncator
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
|
|
21
|
+
from fast_agent.ui.console_display import ConsoleDisplay
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
logger = get_logger(__name__)
|
|
25
|
+
|
|
26
|
+
MARKDOWN_STREAM_TARGET_RATIO = 0.75
|
|
27
|
+
MARKDOWN_STREAM_REFRESH_PER_SECOND = 4
|
|
28
|
+
MARKDOWN_STREAM_HEIGHT_FUDGE = 1
|
|
29
|
+
PLAIN_STREAM_TARGET_RATIO = 0.9
|
|
30
|
+
PLAIN_STREAM_REFRESH_PER_SECOND = 20
|
|
31
|
+
PLAIN_STREAM_HEIGHT_FUDGE = 1
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class NullStreamingHandle:
|
|
35
|
+
"""No-op streaming handle used when streaming is disabled."""
|
|
36
|
+
|
|
37
|
+
def update(self, _chunk: str) -> None:
|
|
38
|
+
return
|
|
39
|
+
|
|
40
|
+
def finalize(self, _message: "PromptMessageExtended | str") -> None:
|
|
41
|
+
return
|
|
42
|
+
|
|
43
|
+
def close(self) -> None:
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
def handle_tool_event(self, _event_type: str, info: dict[str, Any] | None = None) -> None:
|
|
47
|
+
return
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class StreamingMessageHandle:
|
|
51
|
+
"""Helper that manages live rendering for streaming assistant responses."""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
*,
|
|
56
|
+
display: "ConsoleDisplay",
|
|
57
|
+
bottom_items: list[str] | None,
|
|
58
|
+
highlight_index: int | None,
|
|
59
|
+
max_item_length: int | None,
|
|
60
|
+
use_plain_text: bool = False,
|
|
61
|
+
header_left: str = "",
|
|
62
|
+
header_right: str = "",
|
|
63
|
+
progress_display: Any = None,
|
|
64
|
+
) -> None:
|
|
65
|
+
self._display = display
|
|
66
|
+
self._bottom_items = bottom_items
|
|
67
|
+
self._highlight_index = highlight_index
|
|
68
|
+
self._max_item_length = max_item_length
|
|
69
|
+
self._use_plain_text = use_plain_text
|
|
70
|
+
self._header_left = header_left
|
|
71
|
+
self._header_right = header_right
|
|
72
|
+
self._progress_display = progress_display
|
|
73
|
+
self._progress_paused = False
|
|
74
|
+
self._buffer: list[str] = []
|
|
75
|
+
self._plain_text_style: str | None = None
|
|
76
|
+
self._convert_literal_newlines = False
|
|
77
|
+
self._pending_literal_backslashes = ""
|
|
78
|
+
initial_renderable = (
|
|
79
|
+
Text("", style=self._plain_text_style) if self._use_plain_text else Markdown("")
|
|
80
|
+
)
|
|
81
|
+
refresh_rate = (
|
|
82
|
+
PLAIN_STREAM_REFRESH_PER_SECOND
|
|
83
|
+
if self._use_plain_text
|
|
84
|
+
else MARKDOWN_STREAM_REFRESH_PER_SECOND
|
|
85
|
+
)
|
|
86
|
+
self._min_render_interval = 1.0 / refresh_rate if refresh_rate else None
|
|
87
|
+
self._last_render_time = 0.0
|
|
88
|
+
try:
|
|
89
|
+
self._loop: asyncio.AbstractEventLoop | None = asyncio.get_running_loop()
|
|
90
|
+
except RuntimeError:
|
|
91
|
+
self._loop = None
|
|
92
|
+
self._async_mode = self._loop is not None
|
|
93
|
+
self._queue: asyncio.Queue[object] | None = asyncio.Queue() if self._async_mode else None
|
|
94
|
+
self._stop_sentinel: object = object()
|
|
95
|
+
self._worker_task: asyncio.Task[None] | None = None
|
|
96
|
+
self._live: Live | None = Live(
|
|
97
|
+
initial_renderable,
|
|
98
|
+
console=console.console,
|
|
99
|
+
vertical_overflow="ellipsis",
|
|
100
|
+
refresh_per_second=refresh_rate,
|
|
101
|
+
transient=True,
|
|
102
|
+
)
|
|
103
|
+
self._live_started = False
|
|
104
|
+
self._active = True
|
|
105
|
+
self._finalized = False
|
|
106
|
+
self._in_table = False
|
|
107
|
+
self._pending_table_row = ""
|
|
108
|
+
self._truncator = MarkdownTruncator(target_height_ratio=MARKDOWN_STREAM_TARGET_RATIO)
|
|
109
|
+
self._plain_truncator = (
|
|
110
|
+
PlainTextTruncator(target_height_ratio=PLAIN_STREAM_TARGET_RATIO)
|
|
111
|
+
if self._use_plain_text
|
|
112
|
+
else None
|
|
113
|
+
)
|
|
114
|
+
self._max_render_height = 0
|
|
115
|
+
|
|
116
|
+
if self._async_mode and self._loop and self._queue is not None:
|
|
117
|
+
self._worker_task = self._loop.create_task(self._render_worker())
|
|
118
|
+
|
|
119
|
+
def update(self, chunk: str) -> None:
|
|
120
|
+
if not self._active or not chunk:
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
if self._async_mode and self._queue is not None:
|
|
124
|
+
self._enqueue_chunk(chunk)
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
if self._handle_chunk(chunk):
|
|
128
|
+
self._render_current_buffer()
|
|
129
|
+
|
|
130
|
+
def _build_header(self) -> Text:
|
|
131
|
+
width = console.console.size.width
|
|
132
|
+
left_text = Text.from_markup(self._header_left)
|
|
133
|
+
|
|
134
|
+
if self._header_right and self._header_right.strip():
|
|
135
|
+
right_text = Text()
|
|
136
|
+
right_text.append("[", style="dim")
|
|
137
|
+
right_text.append_text(Text.from_markup(self._header_right))
|
|
138
|
+
right_text.append("]", style="dim")
|
|
139
|
+
separator_count = width - left_text.cell_len - right_text.cell_len
|
|
140
|
+
if separator_count < 1:
|
|
141
|
+
separator_count = 1
|
|
142
|
+
else:
|
|
143
|
+
right_text = Text("")
|
|
144
|
+
separator_count = width - left_text.cell_len
|
|
145
|
+
|
|
146
|
+
combined = Text()
|
|
147
|
+
combined.append_text(left_text)
|
|
148
|
+
combined.append(" ", style="default")
|
|
149
|
+
combined.append("─" * (separator_count - 1), style="dim")
|
|
150
|
+
combined.append_text(right_text)
|
|
151
|
+
return combined
|
|
152
|
+
|
|
153
|
+
def _pause_progress_display(self) -> None:
|
|
154
|
+
if self._progress_display and not self._progress_paused:
|
|
155
|
+
try:
|
|
156
|
+
self._progress_display.pause()
|
|
157
|
+
self._progress_paused = True
|
|
158
|
+
except Exception:
|
|
159
|
+
self._progress_paused = False
|
|
160
|
+
|
|
161
|
+
def _resume_progress_display(self) -> None:
|
|
162
|
+
if self._progress_display and self._progress_paused:
|
|
163
|
+
try:
|
|
164
|
+
self._progress_display.resume()
|
|
165
|
+
except Exception:
|
|
166
|
+
pass
|
|
167
|
+
finally:
|
|
168
|
+
self._progress_paused = False
|
|
169
|
+
|
|
170
|
+
def _ensure_started(self) -> None:
|
|
171
|
+
if not self._live or self._live_started:
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
self._pause_progress_display()
|
|
175
|
+
|
|
176
|
+
if self._live and not self._live_started:
|
|
177
|
+
self._live.__enter__()
|
|
178
|
+
self._live_started = True
|
|
179
|
+
|
|
180
|
+
def _close_incomplete_code_blocks(self, text: str) -> str:
|
|
181
|
+
import re
|
|
182
|
+
|
|
183
|
+
opening_fences = len(re.findall(r"^```", text, re.MULTILINE))
|
|
184
|
+
closing_fences = len(re.findall(r"^```\s*$", text, re.MULTILINE))
|
|
185
|
+
|
|
186
|
+
if opening_fences > closing_fences:
|
|
187
|
+
if not re.search(r"```\s*$", text):
|
|
188
|
+
return text + "\n```\n"
|
|
189
|
+
|
|
190
|
+
return text
|
|
191
|
+
|
|
192
|
+
def _trim_to_displayable(self, text: str) -> str:
|
|
193
|
+
if not text:
|
|
194
|
+
return text
|
|
195
|
+
|
|
196
|
+
terminal_height = console.console.size.height - 1
|
|
197
|
+
|
|
198
|
+
if self._use_plain_text and self._plain_truncator:
|
|
199
|
+
terminal_width = console.console.size.width
|
|
200
|
+
return self._plain_truncator.truncate(
|
|
201
|
+
text,
|
|
202
|
+
terminal_height=terminal_height,
|
|
203
|
+
terminal_width=terminal_width,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
return self._truncator.truncate(
|
|
207
|
+
text,
|
|
208
|
+
terminal_height=terminal_height,
|
|
209
|
+
console=console.console,
|
|
210
|
+
code_theme=self._display.code_style,
|
|
211
|
+
prefer_recent=True,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
def _switch_to_plain_text(self) -> None:
|
|
215
|
+
if not self._use_plain_text:
|
|
216
|
+
self._use_plain_text = True
|
|
217
|
+
if not self._plain_truncator:
|
|
218
|
+
self._plain_truncator = PlainTextTruncator(
|
|
219
|
+
target_height_ratio=PLAIN_STREAM_TARGET_RATIO
|
|
220
|
+
)
|
|
221
|
+
self._plain_text_style = "dim"
|
|
222
|
+
self._convert_literal_newlines = True
|
|
223
|
+
|
|
224
|
+
def finalize(self, _message: "PromptMessageExtended | str") -> None:
|
|
225
|
+
if not self._active or self._finalized:
|
|
226
|
+
return
|
|
227
|
+
|
|
228
|
+
self._finalized = True
|
|
229
|
+
self.close()
|
|
230
|
+
|
|
231
|
+
def close(self) -> None:
|
|
232
|
+
if not self._active:
|
|
233
|
+
return
|
|
234
|
+
|
|
235
|
+
self._active = False
|
|
236
|
+
if self._async_mode:
|
|
237
|
+
if self._queue and self._loop:
|
|
238
|
+
try:
|
|
239
|
+
current_loop = asyncio.get_running_loop()
|
|
240
|
+
except RuntimeError:
|
|
241
|
+
current_loop = None
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
if current_loop is self._loop:
|
|
245
|
+
self._queue.put_nowait(self._stop_sentinel)
|
|
246
|
+
else:
|
|
247
|
+
self._loop.call_soon_threadsafe(self._queue.put_nowait, self._stop_sentinel)
|
|
248
|
+
except RuntimeError as exc:
|
|
249
|
+
logger.debug(
|
|
250
|
+
"RuntimeError while closing streaming display (expected during shutdown)",
|
|
251
|
+
data={"error": str(exc)},
|
|
252
|
+
)
|
|
253
|
+
except Exception as exc:
|
|
254
|
+
logger.warning(
|
|
255
|
+
"Unexpected error while closing streaming display",
|
|
256
|
+
exc_info=True,
|
|
257
|
+
data={"error": str(exc)},
|
|
258
|
+
)
|
|
259
|
+
if self._worker_task:
|
|
260
|
+
self._worker_task.cancel()
|
|
261
|
+
self._worker_task = None
|
|
262
|
+
self._shutdown_live_resources()
|
|
263
|
+
self._max_render_height = 0
|
|
264
|
+
|
|
265
|
+
def _extract_trailing_paragraph(self, text: str) -> str:
|
|
266
|
+
if not text:
|
|
267
|
+
return ""
|
|
268
|
+
double_break = text.rfind("\n\n")
|
|
269
|
+
if double_break != -1:
|
|
270
|
+
candidate = text[double_break + 2 :]
|
|
271
|
+
else:
|
|
272
|
+
candidate = text
|
|
273
|
+
if "\n" in candidate:
|
|
274
|
+
candidate = candidate.split("\n")[-1]
|
|
275
|
+
return candidate
|
|
276
|
+
|
|
277
|
+
def _wrap_plain_chunk(self, chunk: str) -> str:
|
|
278
|
+
width = max(1, console.console.size.width)
|
|
279
|
+
if not chunk or width <= 1:
|
|
280
|
+
return chunk
|
|
281
|
+
|
|
282
|
+
result_segments: list[str] = []
|
|
283
|
+
start = 0
|
|
284
|
+
length = len(chunk)
|
|
285
|
+
|
|
286
|
+
while start < length:
|
|
287
|
+
newline_pos = chunk.find("\n", start)
|
|
288
|
+
if newline_pos == -1:
|
|
289
|
+
line = chunk[start:]
|
|
290
|
+
delimiter = ""
|
|
291
|
+
start = length
|
|
292
|
+
else:
|
|
293
|
+
line = chunk[start:newline_pos]
|
|
294
|
+
delimiter = "\n"
|
|
295
|
+
start = newline_pos + 1
|
|
296
|
+
|
|
297
|
+
if len(line.expandtabs()) > width:
|
|
298
|
+
wrapped = self._wrap_plain_line(line, width)
|
|
299
|
+
result_segments.append("\n".join(wrapped))
|
|
300
|
+
else:
|
|
301
|
+
result_segments.append(line)
|
|
302
|
+
|
|
303
|
+
result_segments.append(delimiter)
|
|
304
|
+
|
|
305
|
+
return "".join(result_segments)
|
|
306
|
+
|
|
307
|
+
@staticmethod
|
|
308
|
+
def _wrap_plain_line(line: str, width: int) -> list[str]:
|
|
309
|
+
if not line:
|
|
310
|
+
return [""]
|
|
311
|
+
|
|
312
|
+
segments: list[str] = []
|
|
313
|
+
remaining = line
|
|
314
|
+
|
|
315
|
+
while len(remaining) > width:
|
|
316
|
+
break_at = remaining.rfind(" ", 0, width)
|
|
317
|
+
if break_at == -1 or break_at < width // 2:
|
|
318
|
+
break_at = width
|
|
319
|
+
segments.append(remaining[:break_at])
|
|
320
|
+
remaining = remaining[break_at:]
|
|
321
|
+
else:
|
|
322
|
+
segments.append(remaining[:break_at])
|
|
323
|
+
remaining = remaining[break_at + 1 :]
|
|
324
|
+
segments.append(remaining)
|
|
325
|
+
return segments
|
|
326
|
+
|
|
327
|
+
def _decode_literal_newlines(self, chunk: str) -> str:
|
|
328
|
+
if not chunk:
|
|
329
|
+
return chunk
|
|
330
|
+
|
|
331
|
+
text = chunk
|
|
332
|
+
if self._pending_literal_backslashes:
|
|
333
|
+
text = self._pending_literal_backslashes + text
|
|
334
|
+
self._pending_literal_backslashes = ""
|
|
335
|
+
|
|
336
|
+
result: list[str] = []
|
|
337
|
+
length = len(text)
|
|
338
|
+
index = 0
|
|
339
|
+
|
|
340
|
+
while index < length:
|
|
341
|
+
char = text[index]
|
|
342
|
+
if char == "\\":
|
|
343
|
+
start = index
|
|
344
|
+
while index < length and text[index] == "\\":
|
|
345
|
+
index += 1
|
|
346
|
+
count = index - start
|
|
347
|
+
|
|
348
|
+
if index >= length:
|
|
349
|
+
self._pending_literal_backslashes = "\\" * count
|
|
350
|
+
break
|
|
351
|
+
|
|
352
|
+
next_char = text[index]
|
|
353
|
+
if next_char == "n" and count % 2 == 1:
|
|
354
|
+
if count > 1:
|
|
355
|
+
result.append("\\" * (count - 1))
|
|
356
|
+
result.append("\n")
|
|
357
|
+
index += 1
|
|
358
|
+
else:
|
|
359
|
+
result.append("\\" * count)
|
|
360
|
+
continue
|
|
361
|
+
else:
|
|
362
|
+
result.append(char)
|
|
363
|
+
index += 1
|
|
364
|
+
|
|
365
|
+
return "".join(result)
|
|
366
|
+
|
|
367
|
+
def _estimate_plain_render_height(self, text: str) -> int:
|
|
368
|
+
if not text:
|
|
369
|
+
return 0
|
|
370
|
+
|
|
371
|
+
width = max(1, console.console.size.width)
|
|
372
|
+
lines = text.split("\n")
|
|
373
|
+
total = 0
|
|
374
|
+
for line in lines:
|
|
375
|
+
expanded_len = len(line.expandtabs())
|
|
376
|
+
total += max(1, math.ceil(expanded_len / width)) if expanded_len else 1
|
|
377
|
+
return total
|
|
378
|
+
|
|
379
|
+
def _enqueue_chunk(self, chunk: str) -> None:
|
|
380
|
+
if not self._queue or not self._loop:
|
|
381
|
+
return
|
|
382
|
+
|
|
383
|
+
try:
|
|
384
|
+
current_loop = asyncio.get_running_loop()
|
|
385
|
+
except RuntimeError:
|
|
386
|
+
current_loop = None
|
|
387
|
+
|
|
388
|
+
if current_loop is self._loop:
|
|
389
|
+
try:
|
|
390
|
+
self._queue.put_nowait(chunk)
|
|
391
|
+
except asyncio.QueueFull:
|
|
392
|
+
pass
|
|
393
|
+
else:
|
|
394
|
+
try:
|
|
395
|
+
self._loop.call_soon_threadsafe(self._queue.put_nowait, chunk)
|
|
396
|
+
except RuntimeError as exc:
|
|
397
|
+
logger.debug(
|
|
398
|
+
"RuntimeError while enqueuing chunk (expected during shutdown)",
|
|
399
|
+
data={"error": str(exc), "chunk_length": len(chunk)},
|
|
400
|
+
)
|
|
401
|
+
except Exception as exc:
|
|
402
|
+
logger.warning(
|
|
403
|
+
"Unexpected error while enqueuing chunk",
|
|
404
|
+
exc_info=True,
|
|
405
|
+
data={"error": str(exc), "chunk_length": len(chunk)},
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
def _handle_chunk(self, chunk: str) -> bool:
|
|
409
|
+
if not chunk:
|
|
410
|
+
return False
|
|
411
|
+
|
|
412
|
+
if self._use_plain_text:
|
|
413
|
+
if self._convert_literal_newlines:
|
|
414
|
+
chunk = self._decode_literal_newlines(chunk)
|
|
415
|
+
if not chunk:
|
|
416
|
+
if self._pending_table_row:
|
|
417
|
+
self._buffer.append(self._pending_table_row)
|
|
418
|
+
self._pending_table_row = ""
|
|
419
|
+
return False
|
|
420
|
+
chunk = self._wrap_plain_chunk(chunk)
|
|
421
|
+
if self._pending_table_row:
|
|
422
|
+
self._buffer.append(self._pending_table_row)
|
|
423
|
+
self._pending_table_row = ""
|
|
424
|
+
else:
|
|
425
|
+
text_so_far = "".join(self._buffer)
|
|
426
|
+
lines = text_so_far.strip().split("\n")
|
|
427
|
+
last_line = lines[-1] if lines else ""
|
|
428
|
+
currently_in_table = last_line.strip().startswith("|")
|
|
429
|
+
|
|
430
|
+
if currently_in_table and "\n" not in chunk:
|
|
431
|
+
self._pending_table_row += chunk
|
|
432
|
+
return False
|
|
433
|
+
|
|
434
|
+
if self._pending_table_row:
|
|
435
|
+
self._buffer.append(self._pending_table_row)
|
|
436
|
+
self._pending_table_row = ""
|
|
437
|
+
|
|
438
|
+
self._buffer.append(chunk)
|
|
439
|
+
return True
|
|
440
|
+
|
|
441
|
+
def _render_current_buffer(self) -> None:
|
|
442
|
+
if not self._buffer:
|
|
443
|
+
return
|
|
444
|
+
|
|
445
|
+
self._ensure_started()
|
|
446
|
+
|
|
447
|
+
if not self._live:
|
|
448
|
+
return
|
|
449
|
+
|
|
450
|
+
text = "".join(self._buffer)
|
|
451
|
+
|
|
452
|
+
if self._use_plain_text:
|
|
453
|
+
trimmed = self._trim_to_displayable(text)
|
|
454
|
+
if trimmed != text:
|
|
455
|
+
text = trimmed
|
|
456
|
+
self._buffer = [trimmed]
|
|
457
|
+
trailing_paragraph = self._extract_trailing_paragraph(text)
|
|
458
|
+
if trailing_paragraph and "\n" not in trailing_paragraph:
|
|
459
|
+
width = max(1, console.console.size.width)
|
|
460
|
+
target_ratio = (
|
|
461
|
+
PLAIN_STREAM_TARGET_RATIO if self._use_plain_text else MARKDOWN_STREAM_TARGET_RATIO
|
|
462
|
+
)
|
|
463
|
+
target_rows = max(1, int(console.console.size.height * target_ratio) - 1)
|
|
464
|
+
estimated_rows = math.ceil(len(trailing_paragraph.expandtabs()) / width)
|
|
465
|
+
if estimated_rows > target_rows:
|
|
466
|
+
trimmed_text = self._trim_to_displayable(text)
|
|
467
|
+
if trimmed_text != text:
|
|
468
|
+
text = trimmed_text
|
|
469
|
+
self._buffer = [trimmed_text]
|
|
470
|
+
|
|
471
|
+
if len(self._buffer) > 10:
|
|
472
|
+
text = self._trim_to_displayable(text)
|
|
473
|
+
self._buffer = [text]
|
|
474
|
+
|
|
475
|
+
header = self._build_header()
|
|
476
|
+
max_allowed_height = max(1, console.console.size.height - 2)
|
|
477
|
+
self._max_render_height = min(self._max_render_height, max_allowed_height)
|
|
478
|
+
|
|
479
|
+
if self._use_plain_text:
|
|
480
|
+
content_height = self._estimate_plain_render_height(text)
|
|
481
|
+
budget_height = min(content_height + PLAIN_STREAM_HEIGHT_FUDGE, max_allowed_height)
|
|
482
|
+
|
|
483
|
+
if budget_height > self._max_render_height:
|
|
484
|
+
self._max_render_height = budget_height
|
|
485
|
+
|
|
486
|
+
padding_lines = max(0, self._max_render_height - content_height)
|
|
487
|
+
display_text = text + ("\n" * padding_lines if padding_lines else "")
|
|
488
|
+
content = (
|
|
489
|
+
Text(display_text, style=self._plain_text_style)
|
|
490
|
+
if self._plain_text_style
|
|
491
|
+
else Text(display_text)
|
|
492
|
+
)
|
|
493
|
+
else:
|
|
494
|
+
prepared = prepare_markdown_content(text, self._display._escape_xml)
|
|
495
|
+
prepared_for_display = self._close_incomplete_code_blocks(prepared)
|
|
496
|
+
|
|
497
|
+
content_height = self._truncator.measure_rendered_height(
|
|
498
|
+
prepared_for_display, console.console, self._display.code_style
|
|
499
|
+
)
|
|
500
|
+
budget_height = min(content_height + MARKDOWN_STREAM_HEIGHT_FUDGE, max_allowed_height)
|
|
501
|
+
|
|
502
|
+
if budget_height > self._max_render_height:
|
|
503
|
+
self._max_render_height = budget_height
|
|
504
|
+
|
|
505
|
+
padding_lines = max(0, self._max_render_height - content_height)
|
|
506
|
+
if padding_lines:
|
|
507
|
+
prepared_for_display = prepared_for_display + ("\n" * padding_lines)
|
|
508
|
+
|
|
509
|
+
content = Markdown(prepared_for_display, code_theme=self._display.code_style)
|
|
510
|
+
|
|
511
|
+
header_with_spacing = header.copy()
|
|
512
|
+
header_with_spacing.append("\n", style="default")
|
|
513
|
+
|
|
514
|
+
combined = Group(header_with_spacing, content)
|
|
515
|
+
try:
|
|
516
|
+
self._live.update(combined)
|
|
517
|
+
self._last_render_time = time.monotonic()
|
|
518
|
+
except Exception:
|
|
519
|
+
pass
|
|
520
|
+
|
|
521
|
+
async def _render_worker(self) -> None:
|
|
522
|
+
assert self._queue is not None
|
|
523
|
+
try:
|
|
524
|
+
while True:
|
|
525
|
+
try:
|
|
526
|
+
item = await self._queue.get()
|
|
527
|
+
except asyncio.CancelledError:
|
|
528
|
+
break
|
|
529
|
+
|
|
530
|
+
if item is self._stop_sentinel:
|
|
531
|
+
break
|
|
532
|
+
|
|
533
|
+
stop_requested = False
|
|
534
|
+
chunks = [item]
|
|
535
|
+
while True:
|
|
536
|
+
try:
|
|
537
|
+
next_item = self._queue.get_nowait()
|
|
538
|
+
except asyncio.QueueEmpty:
|
|
539
|
+
break
|
|
540
|
+
if next_item is self._stop_sentinel:
|
|
541
|
+
stop_requested = True
|
|
542
|
+
break
|
|
543
|
+
chunks.append(next_item)
|
|
544
|
+
|
|
545
|
+
should_render = False
|
|
546
|
+
for chunk in chunks:
|
|
547
|
+
if isinstance(chunk, str):
|
|
548
|
+
should_render = self._handle_chunk(chunk) or should_render
|
|
549
|
+
|
|
550
|
+
if should_render:
|
|
551
|
+
self._render_current_buffer()
|
|
552
|
+
if self._min_render_interval:
|
|
553
|
+
try:
|
|
554
|
+
await asyncio.sleep(self._min_render_interval)
|
|
555
|
+
except asyncio.CancelledError:
|
|
556
|
+
break
|
|
557
|
+
|
|
558
|
+
if stop_requested:
|
|
559
|
+
break
|
|
560
|
+
except asyncio.CancelledError:
|
|
561
|
+
pass
|
|
562
|
+
finally:
|
|
563
|
+
self._shutdown_live_resources()
|
|
564
|
+
|
|
565
|
+
def _shutdown_live_resources(self) -> None:
|
|
566
|
+
if self._live and self._live_started:
|
|
567
|
+
try:
|
|
568
|
+
self._live.__exit__(None, None, None)
|
|
569
|
+
except Exception:
|
|
570
|
+
pass
|
|
571
|
+
self._live = None
|
|
572
|
+
self._live_started = False
|
|
573
|
+
|
|
574
|
+
self._resume_progress_display()
|
|
575
|
+
self._active = False
|
|
576
|
+
|
|
577
|
+
def handle_tool_event(self, event_type: str, info: dict[str, Any] | None = None) -> None:
|
|
578
|
+
try:
|
|
579
|
+
if not self._active:
|
|
580
|
+
return
|
|
581
|
+
|
|
582
|
+
streams_arguments = info.get("streams_arguments", False) if info else False
|
|
583
|
+
tool_name = info.get("tool_name", "unknown") if info else "unknown"
|
|
584
|
+
|
|
585
|
+
if event_type == "start":
|
|
586
|
+
if streams_arguments:
|
|
587
|
+
self._switch_to_plain_text()
|
|
588
|
+
self.update(f"\n→ Calling {tool_name}\n")
|
|
589
|
+
else:
|
|
590
|
+
self._pause_progress_display()
|
|
591
|
+
self._switch_to_plain_text()
|
|
592
|
+
self.update(f"\n→ Calling {tool_name}\n")
|
|
593
|
+
return
|
|
594
|
+
if event_type == "delta":
|
|
595
|
+
if streams_arguments and info and "chunk" in info:
|
|
596
|
+
self.update(info["chunk"])
|
|
597
|
+
elif event_type == "text":
|
|
598
|
+
self._pause_progress_display()
|
|
599
|
+
elif event_type == "stop":
|
|
600
|
+
if streams_arguments:
|
|
601
|
+
self.update("\n")
|
|
602
|
+
self.close()
|
|
603
|
+
else:
|
|
604
|
+
self.update("\n")
|
|
605
|
+
self.close()
|
|
606
|
+
except Exception as exc:
|
|
607
|
+
logger.warning(
|
|
608
|
+
"Error handling tool event",
|
|
609
|
+
exc_info=True,
|
|
610
|
+
data={
|
|
611
|
+
"event_type": event_type,
|
|
612
|
+
"streams_arguments": info.get("streams_arguments") if info else None,
|
|
613
|
+
"error": str(exc),
|
|
614
|
+
},
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
__all__ = [
|
|
619
|
+
"NullStreamingHandle",
|
|
620
|
+
"StreamingMessageHandle",
|
|
621
|
+
"StreamingHandle",
|
|
622
|
+
"MARKDOWN_STREAM_TARGET_RATIO",
|
|
623
|
+
"MARKDOWN_STREAM_REFRESH_PER_SECOND",
|
|
624
|
+
"MARKDOWN_STREAM_HEIGHT_FUDGE",
|
|
625
|
+
"PLAIN_STREAM_TARGET_RATIO",
|
|
626
|
+
"PLAIN_STREAM_REFRESH_PER_SECOND",
|
|
627
|
+
"PLAIN_STREAM_HEIGHT_FUDGE",
|
|
628
|
+
]
|
|
629
|
+
|
|
630
|
+
|
|
631
|
+
class StreamingHandle(Protocol):
|
|
632
|
+
def update(self, chunk: str) -> None: ...
|
|
633
|
+
|
|
634
|
+
def finalize(self, message: "PromptMessageExtended | str") -> None: ...
|
|
635
|
+
|
|
636
|
+
def close(self) -> None: ...
|
|
637
|
+
|
|
638
|
+
def handle_tool_event(self, event_type: str, info: dict[str, Any] | None = None) -> None: ...
|