fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,774 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import math
|
|
5
|
+
import time
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Protocol
|
|
7
|
+
|
|
8
|
+
from rich.console import Group
|
|
9
|
+
from rich.live import Live
|
|
10
|
+
from rich.markdown import Markdown
|
|
11
|
+
from rich.text import Text
|
|
12
|
+
|
|
13
|
+
from fast_agent.core.logging.logger import get_logger
|
|
14
|
+
from fast_agent.llm.stream_types import StreamChunk
|
|
15
|
+
from fast_agent.ui import console
|
|
16
|
+
from fast_agent.ui.markdown_helpers import prepare_markdown_content
|
|
17
|
+
from fast_agent.ui.markdown_truncator import MarkdownTruncator
|
|
18
|
+
from fast_agent.ui.plain_text_truncator import PlainTextTruncator
|
|
19
|
+
from fast_agent.utils.reasoning_stream_parser import ReasoningSegment, ReasoningStreamParser
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from fast_agent.mcp.prompt_message_extended import PromptMessageExtended
|
|
23
|
+
from fast_agent.ui.console_display import ConsoleDisplay
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
logger = get_logger(__name__)
|
|
27
|
+
|
|
28
|
+
MARKDOWN_STREAM_TARGET_RATIO = 0.75
|
|
29
|
+
MARKDOWN_STREAM_REFRESH_PER_SECOND = 4
|
|
30
|
+
MARKDOWN_STREAM_HEIGHT_FUDGE = 1
|
|
31
|
+
PLAIN_STREAM_TARGET_RATIO = 0.9
|
|
32
|
+
PLAIN_STREAM_REFRESH_PER_SECOND = 20
|
|
33
|
+
PLAIN_STREAM_HEIGHT_FUDGE = 1
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class NullStreamingHandle:
|
|
37
|
+
"""No-op streaming handle used when streaming is disabled."""
|
|
38
|
+
|
|
39
|
+
def update(self, _chunk: str) -> None:
|
|
40
|
+
return
|
|
41
|
+
|
|
42
|
+
def update_chunk(self, _chunk: StreamChunk) -> None:
|
|
43
|
+
return
|
|
44
|
+
|
|
45
|
+
def finalize(self, _message: "PromptMessageExtended | str") -> None:
|
|
46
|
+
return
|
|
47
|
+
|
|
48
|
+
def close(self) -> None:
|
|
49
|
+
return
|
|
50
|
+
|
|
51
|
+
def handle_tool_event(self, _event_type: str, info: dict[str, Any] | None = None) -> None:
|
|
52
|
+
return
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class StreamingMessageHandle:
|
|
56
|
+
"""Helper that manages live rendering for streaming assistant responses."""
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
*,
|
|
61
|
+
display: "ConsoleDisplay",
|
|
62
|
+
bottom_items: list[str] | None,
|
|
63
|
+
highlight_index: int | None,
|
|
64
|
+
max_item_length: int | None,
|
|
65
|
+
use_plain_text: bool = False,
|
|
66
|
+
header_left: str = "",
|
|
67
|
+
header_right: str = "",
|
|
68
|
+
progress_display: Any = None,
|
|
69
|
+
) -> None:
|
|
70
|
+
self._display = display
|
|
71
|
+
self._bottom_items = bottom_items
|
|
72
|
+
self._highlight_index = highlight_index
|
|
73
|
+
self._max_item_length = max_item_length
|
|
74
|
+
self._use_plain_text = use_plain_text
|
|
75
|
+
self._header_left = header_left
|
|
76
|
+
self._header_right = header_right
|
|
77
|
+
self._progress_display = progress_display
|
|
78
|
+
self._progress_paused = False
|
|
79
|
+
self._buffer: list[str] = []
|
|
80
|
+
self._plain_text_style: str | None = None
|
|
81
|
+
self._convert_literal_newlines = False
|
|
82
|
+
self._pending_literal_backslashes = ""
|
|
83
|
+
initial_renderable = (
|
|
84
|
+
Text("", style=self._plain_text_style) if self._use_plain_text else Markdown("")
|
|
85
|
+
)
|
|
86
|
+
refresh_rate = (
|
|
87
|
+
PLAIN_STREAM_REFRESH_PER_SECOND
|
|
88
|
+
if self._use_plain_text
|
|
89
|
+
else MARKDOWN_STREAM_REFRESH_PER_SECOND
|
|
90
|
+
)
|
|
91
|
+
self._min_render_interval = 1.0 / refresh_rate if refresh_rate else None
|
|
92
|
+
self._last_render_time = 0.0
|
|
93
|
+
try:
|
|
94
|
+
self._loop: asyncio.AbstractEventLoop | None = asyncio.get_running_loop()
|
|
95
|
+
except RuntimeError:
|
|
96
|
+
self._loop = None
|
|
97
|
+
self._async_mode = self._loop is not None
|
|
98
|
+
self._queue: asyncio.Queue[object] | None = asyncio.Queue() if self._async_mode else None
|
|
99
|
+
self._stop_sentinel: object = object()
|
|
100
|
+
self._worker_task: asyncio.Task[None] | None = None
|
|
101
|
+
self._live: Live | None = Live(
|
|
102
|
+
initial_renderable,
|
|
103
|
+
console=console.console,
|
|
104
|
+
vertical_overflow="ellipsis",
|
|
105
|
+
refresh_per_second=refresh_rate,
|
|
106
|
+
transient=True,
|
|
107
|
+
)
|
|
108
|
+
self._live_started = False
|
|
109
|
+
self._active = True
|
|
110
|
+
self._finalized = False
|
|
111
|
+
self._in_table = False
|
|
112
|
+
self._pending_table_row = ""
|
|
113
|
+
self._truncator = MarkdownTruncator(target_height_ratio=MARKDOWN_STREAM_TARGET_RATIO)
|
|
114
|
+
self._plain_truncator = (
|
|
115
|
+
PlainTextTruncator(target_height_ratio=PLAIN_STREAM_TARGET_RATIO)
|
|
116
|
+
if self._use_plain_text
|
|
117
|
+
else None
|
|
118
|
+
)
|
|
119
|
+
self._max_render_height = 0
|
|
120
|
+
self._reasoning_parser = ReasoningStreamParser()
|
|
121
|
+
self._styled_buffer: list[tuple[str, bool]] = []
|
|
122
|
+
self._has_reasoning = False
|
|
123
|
+
|
|
124
|
+
if self._async_mode and self._loop and self._queue is not None:
|
|
125
|
+
self._worker_task = self._loop.create_task(self._render_worker())
|
|
126
|
+
|
|
127
|
+
def update(self, chunk: str) -> None:
|
|
128
|
+
if not self._active or not chunk:
|
|
129
|
+
return
|
|
130
|
+
|
|
131
|
+
if self._async_mode and self._queue is not None:
|
|
132
|
+
self._enqueue_chunk(chunk)
|
|
133
|
+
return
|
|
134
|
+
|
|
135
|
+
if self._handle_chunk(chunk):
|
|
136
|
+
self._render_current_buffer()
|
|
137
|
+
|
|
138
|
+
def update_chunk(self, chunk: StreamChunk) -> None:
|
|
139
|
+
"""Structured streaming update with an explicit reasoning flag."""
|
|
140
|
+
if not self._active or not chunk or not chunk.text:
|
|
141
|
+
return
|
|
142
|
+
|
|
143
|
+
if self._async_mode and self._queue is not None:
|
|
144
|
+
self._enqueue_chunk(chunk)
|
|
145
|
+
return
|
|
146
|
+
|
|
147
|
+
if self._handle_stream_chunk(chunk):
|
|
148
|
+
self._render_current_buffer()
|
|
149
|
+
|
|
150
|
+
def _build_header(self) -> Text:
|
|
151
|
+
width = console.console.size.width
|
|
152
|
+
left_text = Text.from_markup(self._header_left)
|
|
153
|
+
|
|
154
|
+
if self._header_right and self._header_right.strip():
|
|
155
|
+
right_text = Text()
|
|
156
|
+
right_text.append("[", style="dim")
|
|
157
|
+
right_text.append_text(Text.from_markup(self._header_right))
|
|
158
|
+
right_text.append("]", style="dim")
|
|
159
|
+
separator_count = width - left_text.cell_len - right_text.cell_len
|
|
160
|
+
if separator_count < 1:
|
|
161
|
+
separator_count = 1
|
|
162
|
+
else:
|
|
163
|
+
right_text = Text("")
|
|
164
|
+
separator_count = width - left_text.cell_len
|
|
165
|
+
|
|
166
|
+
combined = Text()
|
|
167
|
+
combined.append_text(left_text)
|
|
168
|
+
combined.append(" ", style="default")
|
|
169
|
+
combined.append("─" * (separator_count - 1), style="dim")
|
|
170
|
+
combined.append_text(right_text)
|
|
171
|
+
return combined
|
|
172
|
+
|
|
173
|
+
def _pause_progress_display(self) -> None:
|
|
174
|
+
if self._progress_display and not self._progress_paused:
|
|
175
|
+
try:
|
|
176
|
+
self._progress_display.pause()
|
|
177
|
+
self._progress_paused = True
|
|
178
|
+
except Exception:
|
|
179
|
+
self._progress_paused = False
|
|
180
|
+
|
|
181
|
+
def _resume_progress_display(self) -> None:
|
|
182
|
+
if self._progress_display and self._progress_paused:
|
|
183
|
+
try:
|
|
184
|
+
self._progress_display.resume()
|
|
185
|
+
except Exception:
|
|
186
|
+
pass
|
|
187
|
+
finally:
|
|
188
|
+
self._progress_paused = False
|
|
189
|
+
|
|
190
|
+
def _ensure_started(self) -> None:
|
|
191
|
+
if not self._live or self._live_started:
|
|
192
|
+
return
|
|
193
|
+
|
|
194
|
+
self._pause_progress_display()
|
|
195
|
+
|
|
196
|
+
if self._live and not self._live_started:
|
|
197
|
+
self._live.__enter__()
|
|
198
|
+
self._live_started = True
|
|
199
|
+
|
|
200
|
+
def _close_incomplete_code_blocks(self, text: str) -> str:
|
|
201
|
+
import re
|
|
202
|
+
|
|
203
|
+
opening_fences = len(re.findall(r"^```", text, re.MULTILINE))
|
|
204
|
+
closing_fences = len(re.findall(r"^```\s*$", text, re.MULTILINE))
|
|
205
|
+
|
|
206
|
+
if opening_fences > closing_fences:
|
|
207
|
+
if not re.search(r"```\s*$", text):
|
|
208
|
+
return text + "\n```\n"
|
|
209
|
+
|
|
210
|
+
return text
|
|
211
|
+
|
|
212
|
+
def _trim_to_displayable(self, text: str) -> str:
|
|
213
|
+
if not text:
|
|
214
|
+
return text
|
|
215
|
+
|
|
216
|
+
terminal_height = console.console.size.height - 1
|
|
217
|
+
|
|
218
|
+
if self._use_plain_text and self._plain_truncator:
|
|
219
|
+
terminal_width = console.console.size.width
|
|
220
|
+
return self._plain_truncator.truncate(
|
|
221
|
+
text,
|
|
222
|
+
terminal_height=terminal_height,
|
|
223
|
+
terminal_width=terminal_width,
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
return self._truncator.truncate(
|
|
227
|
+
text,
|
|
228
|
+
terminal_height=terminal_height,
|
|
229
|
+
console=console.console,
|
|
230
|
+
code_theme=self._display.code_style,
|
|
231
|
+
prefer_recent=True,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
def _switch_to_plain_text(self, style: str | None = "dim") -> None:
|
|
235
|
+
if not self._use_plain_text:
|
|
236
|
+
self._use_plain_text = True
|
|
237
|
+
if not self._plain_truncator:
|
|
238
|
+
self._plain_truncator = PlainTextTruncator(
|
|
239
|
+
target_height_ratio=PLAIN_STREAM_TARGET_RATIO
|
|
240
|
+
)
|
|
241
|
+
self._plain_text_style = style
|
|
242
|
+
self._convert_literal_newlines = True
|
|
243
|
+
|
|
244
|
+
def finalize(self, _message: "PromptMessageExtended | str") -> None:
|
|
245
|
+
if not self._active or self._finalized:
|
|
246
|
+
return
|
|
247
|
+
|
|
248
|
+
# Flush any buffered reasoning content before closing the live view
|
|
249
|
+
self._process_reasoning_chunk("")
|
|
250
|
+
if self._buffer:
|
|
251
|
+
self._render_current_buffer()
|
|
252
|
+
|
|
253
|
+
self._finalized = True
|
|
254
|
+
self.close()
|
|
255
|
+
|
|
256
|
+
def close(self) -> None:
|
|
257
|
+
if not self._active:
|
|
258
|
+
return
|
|
259
|
+
|
|
260
|
+
self._active = False
|
|
261
|
+
if self._async_mode:
|
|
262
|
+
if self._queue and self._loop:
|
|
263
|
+
try:
|
|
264
|
+
current_loop = asyncio.get_running_loop()
|
|
265
|
+
except RuntimeError:
|
|
266
|
+
current_loop = None
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
if current_loop is self._loop:
|
|
270
|
+
self._queue.put_nowait(self._stop_sentinel)
|
|
271
|
+
else:
|
|
272
|
+
self._loop.call_soon_threadsafe(self._queue.put_nowait, self._stop_sentinel)
|
|
273
|
+
except RuntimeError as exc:
|
|
274
|
+
logger.debug(
|
|
275
|
+
"RuntimeError while closing streaming display (expected during shutdown)",
|
|
276
|
+
data={"error": str(exc)},
|
|
277
|
+
)
|
|
278
|
+
except Exception as exc:
|
|
279
|
+
logger.warning(
|
|
280
|
+
"Unexpected error while closing streaming display",
|
|
281
|
+
exc_info=True,
|
|
282
|
+
data={"error": str(exc)},
|
|
283
|
+
)
|
|
284
|
+
if self._worker_task:
|
|
285
|
+
self._worker_task.cancel()
|
|
286
|
+
self._worker_task = None
|
|
287
|
+
self._shutdown_live_resources()
|
|
288
|
+
self._max_render_height = 0
|
|
289
|
+
|
|
290
|
+
def _extract_trailing_paragraph(self, text: str) -> str:
|
|
291
|
+
if not text:
|
|
292
|
+
return ""
|
|
293
|
+
double_break = text.rfind("\n\n")
|
|
294
|
+
if double_break != -1:
|
|
295
|
+
candidate = text[double_break + 2 :]
|
|
296
|
+
else:
|
|
297
|
+
candidate = text
|
|
298
|
+
if "\n" in candidate:
|
|
299
|
+
candidate = candidate.split("\n")[-1]
|
|
300
|
+
return candidate
|
|
301
|
+
|
|
302
|
+
def _wrap_plain_chunk(self, chunk: str) -> str:
|
|
303
|
+
width = max(1, console.console.size.width)
|
|
304
|
+
if not chunk or width <= 1:
|
|
305
|
+
return chunk
|
|
306
|
+
|
|
307
|
+
result_segments: list[str] = []
|
|
308
|
+
start = 0
|
|
309
|
+
length = len(chunk)
|
|
310
|
+
|
|
311
|
+
while start < length:
|
|
312
|
+
newline_pos = chunk.find("\n", start)
|
|
313
|
+
if newline_pos == -1:
|
|
314
|
+
line = chunk[start:]
|
|
315
|
+
delimiter = ""
|
|
316
|
+
start = length
|
|
317
|
+
else:
|
|
318
|
+
line = chunk[start:newline_pos]
|
|
319
|
+
delimiter = "\n"
|
|
320
|
+
start = newline_pos + 1
|
|
321
|
+
|
|
322
|
+
if len(line.expandtabs()) > width:
|
|
323
|
+
wrapped = self._wrap_plain_line(line, width)
|
|
324
|
+
result_segments.append("\n".join(wrapped))
|
|
325
|
+
else:
|
|
326
|
+
result_segments.append(line)
|
|
327
|
+
|
|
328
|
+
result_segments.append(delimiter)
|
|
329
|
+
|
|
330
|
+
return "".join(result_segments)
|
|
331
|
+
|
|
332
|
+
@staticmethod
|
|
333
|
+
def _wrap_plain_line(line: str, width: int) -> list[str]:
|
|
334
|
+
if not line:
|
|
335
|
+
return [""]
|
|
336
|
+
|
|
337
|
+
segments: list[str] = []
|
|
338
|
+
remaining = line
|
|
339
|
+
|
|
340
|
+
while len(remaining) > width:
|
|
341
|
+
break_at = remaining.rfind(" ", 0, width)
|
|
342
|
+
if break_at == -1 or break_at < width // 2:
|
|
343
|
+
break_at = width
|
|
344
|
+
segments.append(remaining[:break_at])
|
|
345
|
+
remaining = remaining[break_at:]
|
|
346
|
+
else:
|
|
347
|
+
segments.append(remaining[:break_at])
|
|
348
|
+
remaining = remaining[break_at + 1 :]
|
|
349
|
+
segments.append(remaining)
|
|
350
|
+
return segments
|
|
351
|
+
|
|
352
|
+
def _decode_literal_newlines(self, chunk: str) -> str:
|
|
353
|
+
if not chunk:
|
|
354
|
+
return chunk
|
|
355
|
+
|
|
356
|
+
text = chunk
|
|
357
|
+
if self._pending_literal_backslashes:
|
|
358
|
+
text = self._pending_literal_backslashes + text
|
|
359
|
+
self._pending_literal_backslashes = ""
|
|
360
|
+
|
|
361
|
+
result: list[str] = []
|
|
362
|
+
length = len(text)
|
|
363
|
+
index = 0
|
|
364
|
+
|
|
365
|
+
while index < length:
|
|
366
|
+
char = text[index]
|
|
367
|
+
if char == "\\":
|
|
368
|
+
start = index
|
|
369
|
+
while index < length and text[index] == "\\":
|
|
370
|
+
index += 1
|
|
371
|
+
count = index - start
|
|
372
|
+
|
|
373
|
+
if index >= length:
|
|
374
|
+
self._pending_literal_backslashes = "\\" * count
|
|
375
|
+
break
|
|
376
|
+
|
|
377
|
+
next_char = text[index]
|
|
378
|
+
if next_char == "n" and count % 2 == 1:
|
|
379
|
+
if count > 1:
|
|
380
|
+
result.append("\\" * (count - 1))
|
|
381
|
+
result.append("\n")
|
|
382
|
+
index += 1
|
|
383
|
+
else:
|
|
384
|
+
result.append("\\" * count)
|
|
385
|
+
continue
|
|
386
|
+
else:
|
|
387
|
+
result.append(char)
|
|
388
|
+
index += 1
|
|
389
|
+
|
|
390
|
+
return "".join(result)
|
|
391
|
+
|
|
392
|
+
def _estimate_plain_render_height(self, text: str) -> int:
|
|
393
|
+
if not text:
|
|
394
|
+
return 0
|
|
395
|
+
|
|
396
|
+
width = max(1, console.console.size.width)
|
|
397
|
+
lines = text.split("\n")
|
|
398
|
+
total = 0
|
|
399
|
+
for line in lines:
|
|
400
|
+
expanded_len = len(line.expandtabs())
|
|
401
|
+
total += max(1, math.ceil(expanded_len / width)) if expanded_len else 1
|
|
402
|
+
return total
|
|
403
|
+
|
|
404
|
+
def _enqueue_chunk(self, chunk: object) -> None:
|
|
405
|
+
if not self._queue or not self._loop:
|
|
406
|
+
return
|
|
407
|
+
|
|
408
|
+
try:
|
|
409
|
+
current_loop = asyncio.get_running_loop()
|
|
410
|
+
except RuntimeError:
|
|
411
|
+
current_loop = None
|
|
412
|
+
|
|
413
|
+
if current_loop is self._loop:
|
|
414
|
+
try:
|
|
415
|
+
self._queue.put_nowait(chunk)
|
|
416
|
+
except asyncio.QueueFull:
|
|
417
|
+
pass
|
|
418
|
+
else:
|
|
419
|
+
try:
|
|
420
|
+
self._loop.call_soon_threadsafe(self._queue.put_nowait, chunk)
|
|
421
|
+
except RuntimeError as exc:
|
|
422
|
+
logger.debug(
|
|
423
|
+
"RuntimeError while enqueuing chunk (expected during shutdown)",
|
|
424
|
+
data={"error": str(exc), "chunk_repr": repr(chunk)},
|
|
425
|
+
)
|
|
426
|
+
except Exception as exc:
|
|
427
|
+
logger.warning(
|
|
428
|
+
"Unexpected error while enqueuing chunk",
|
|
429
|
+
exc_info=True,
|
|
430
|
+
data={"error": str(exc), "chunk_repr": repr(chunk)},
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
def _process_reasoning_chunk(self, chunk: str) -> bool:
|
|
434
|
+
"""
|
|
435
|
+
Detect and style reasoning-tagged content (<think>...</think>) when present.
|
|
436
|
+
|
|
437
|
+
Returns True if the chunk was handled by reasoning-aware processing.
|
|
438
|
+
"""
|
|
439
|
+
should_process = (
|
|
440
|
+
self._reasoning_parser.in_think or "<think>" in chunk or "</think>" in chunk
|
|
441
|
+
)
|
|
442
|
+
if not should_process and not self._has_reasoning:
|
|
443
|
+
return False
|
|
444
|
+
|
|
445
|
+
self._switch_to_plain_text(style=None)
|
|
446
|
+
segments: list[ReasoningSegment] = []
|
|
447
|
+
if chunk:
|
|
448
|
+
segments = self._reasoning_parser.feed(chunk)
|
|
449
|
+
elif self._reasoning_parser.in_think:
|
|
450
|
+
segments = self._reasoning_parser.flush()
|
|
451
|
+
|
|
452
|
+
if not segments:
|
|
453
|
+
return False
|
|
454
|
+
|
|
455
|
+
self._has_reasoning = True
|
|
456
|
+
|
|
457
|
+
for segment in segments:
|
|
458
|
+
processed = segment.text
|
|
459
|
+
if self._convert_literal_newlines:
|
|
460
|
+
processed = self._decode_literal_newlines(processed)
|
|
461
|
+
if not processed:
|
|
462
|
+
continue
|
|
463
|
+
processed = self._wrap_plain_chunk(processed)
|
|
464
|
+
if self._pending_table_row:
|
|
465
|
+
self._buffer.append(self._pending_table_row)
|
|
466
|
+
self._pending_table_row = ""
|
|
467
|
+
self._buffer.append(processed)
|
|
468
|
+
self._styled_buffer.append((processed, segment.is_thinking))
|
|
469
|
+
|
|
470
|
+
return True
|
|
471
|
+
|
|
472
|
+
def _handle_stream_chunk(self, chunk: StreamChunk) -> bool:
|
|
473
|
+
"""Process a typed stream chunk with explicit reasoning flag."""
|
|
474
|
+
if not chunk.text:
|
|
475
|
+
return False
|
|
476
|
+
|
|
477
|
+
self._switch_to_plain_text(style=None)
|
|
478
|
+
|
|
479
|
+
processed = chunk.text
|
|
480
|
+
if self._convert_literal_newlines:
|
|
481
|
+
processed = self._decode_literal_newlines(processed)
|
|
482
|
+
if not processed:
|
|
483
|
+
return False
|
|
484
|
+
processed = self._wrap_plain_chunk(processed)
|
|
485
|
+
if self._pending_table_row:
|
|
486
|
+
self._buffer.append(self._pending_table_row)
|
|
487
|
+
self._pending_table_row = ""
|
|
488
|
+
self._buffer.append(processed)
|
|
489
|
+
self._styled_buffer.append((processed, chunk.is_reasoning))
|
|
490
|
+
if chunk.is_reasoning:
|
|
491
|
+
self._has_reasoning = True
|
|
492
|
+
return True
|
|
493
|
+
|
|
494
|
+
def _handle_chunk(self, chunk: str) -> bool:
|
|
495
|
+
if not chunk:
|
|
496
|
+
return False
|
|
497
|
+
|
|
498
|
+
if self._process_reasoning_chunk(chunk):
|
|
499
|
+
return True
|
|
500
|
+
|
|
501
|
+
if self._use_plain_text:
|
|
502
|
+
if self._convert_literal_newlines:
|
|
503
|
+
chunk = self._decode_literal_newlines(chunk)
|
|
504
|
+
if not chunk:
|
|
505
|
+
if self._pending_table_row:
|
|
506
|
+
self._buffer.append(self._pending_table_row)
|
|
507
|
+
self._pending_table_row = ""
|
|
508
|
+
return False
|
|
509
|
+
chunk = self._wrap_plain_chunk(chunk)
|
|
510
|
+
if self._pending_table_row:
|
|
511
|
+
self._buffer.append(self._pending_table_row)
|
|
512
|
+
self._pending_table_row = ""
|
|
513
|
+
else:
|
|
514
|
+
text_so_far = "".join(self._buffer)
|
|
515
|
+
lines = text_so_far.strip().split("\n")
|
|
516
|
+
last_line = lines[-1] if lines else ""
|
|
517
|
+
currently_in_table = last_line.strip().startswith("|")
|
|
518
|
+
|
|
519
|
+
if currently_in_table and "\n" not in chunk:
|
|
520
|
+
self._pending_table_row += chunk
|
|
521
|
+
return False
|
|
522
|
+
|
|
523
|
+
if self._pending_table_row:
|
|
524
|
+
self._buffer.append(self._pending_table_row)
|
|
525
|
+
self._pending_table_row = ""
|
|
526
|
+
|
|
527
|
+
self._buffer.append(chunk)
|
|
528
|
+
return True
|
|
529
|
+
|
|
530
|
+
def _slice_styled_segments(self, target_text: str) -> list[tuple[str, bool]]:
|
|
531
|
+
"""Trim styled buffer to the tail matching the provided text length."""
|
|
532
|
+
if not self._styled_buffer:
|
|
533
|
+
return []
|
|
534
|
+
|
|
535
|
+
remaining = len(target_text)
|
|
536
|
+
selected: list[tuple[str, bool]] = []
|
|
537
|
+
|
|
538
|
+
for text, is_thinking in reversed(self._styled_buffer):
|
|
539
|
+
if remaining <= 0:
|
|
540
|
+
break
|
|
541
|
+
if len(text) <= remaining:
|
|
542
|
+
selected.append((text, is_thinking))
|
|
543
|
+
remaining -= len(text)
|
|
544
|
+
else:
|
|
545
|
+
selected.append((text[-remaining:], is_thinking))
|
|
546
|
+
remaining = 0
|
|
547
|
+
|
|
548
|
+
selected.reverse()
|
|
549
|
+
return selected
|
|
550
|
+
|
|
551
|
+
def _build_styled_text(self, text: str) -> Text:
|
|
552
|
+
"""Build a Rich Text object with dim/italic styling for reasoning segments."""
|
|
553
|
+
if not self._has_reasoning or not self._styled_buffer:
|
|
554
|
+
return Text(text, style=self._plain_text_style) if self._plain_text_style else Text(text)
|
|
555
|
+
|
|
556
|
+
segments = self._slice_styled_segments(text)
|
|
557
|
+
self._styled_buffer = segments
|
|
558
|
+
|
|
559
|
+
styled_text = Text()
|
|
560
|
+
for segment_text, is_thinking in segments:
|
|
561
|
+
style = "dim italic" if is_thinking else self._plain_text_style
|
|
562
|
+
styled_text.append(segment_text, style=style)
|
|
563
|
+
return styled_text
|
|
564
|
+
|
|
565
|
+
def _render_current_buffer(self) -> None:
|
|
566
|
+
if not self._buffer:
|
|
567
|
+
return
|
|
568
|
+
|
|
569
|
+
self._ensure_started()
|
|
570
|
+
|
|
571
|
+
if not self._live:
|
|
572
|
+
return
|
|
573
|
+
|
|
574
|
+
# Consolidate buffer if it gets fragmented (>10 items)
|
|
575
|
+
# Then check if we need to truncate to keep only recent content
|
|
576
|
+
if len(self._buffer) > 10:
|
|
577
|
+
text = "".join(self._buffer)
|
|
578
|
+
trimmed = self._trim_to_displayable(text)
|
|
579
|
+
# Only update buffer if truncation actually reduced content
|
|
580
|
+
# This keeps buffer size manageable for continuous scrolling
|
|
581
|
+
if len(trimmed) < len(text):
|
|
582
|
+
self._buffer = [trimmed]
|
|
583
|
+
if self._has_reasoning:
|
|
584
|
+
self._styled_buffer = self._slice_styled_segments(trimmed)
|
|
585
|
+
else:
|
|
586
|
+
self._buffer = [text]
|
|
587
|
+
|
|
588
|
+
text = "".join(self._buffer)
|
|
589
|
+
|
|
590
|
+
# Check if trailing paragraph is too long and needs trimming
|
|
591
|
+
trailing_paragraph = self._extract_trailing_paragraph(text)
|
|
592
|
+
if trailing_paragraph and "\n" not in trailing_paragraph:
|
|
593
|
+
width = max(1, console.console.size.width)
|
|
594
|
+
target_ratio = (
|
|
595
|
+
PLAIN_STREAM_TARGET_RATIO if self._use_plain_text else MARKDOWN_STREAM_TARGET_RATIO
|
|
596
|
+
)
|
|
597
|
+
target_rows = max(1, int(console.console.size.height * target_ratio) - 1)
|
|
598
|
+
estimated_rows = math.ceil(len(trailing_paragraph.expandtabs()) / width)
|
|
599
|
+
if estimated_rows > target_rows:
|
|
600
|
+
trimmed = self._trim_to_displayable(text)
|
|
601
|
+
if len(trimmed) < len(text):
|
|
602
|
+
text = trimmed
|
|
603
|
+
self._buffer = [trimmed]
|
|
604
|
+
if self._has_reasoning:
|
|
605
|
+
self._styled_buffer = self._slice_styled_segments(trimmed)
|
|
606
|
+
|
|
607
|
+
header = self._build_header()
|
|
608
|
+
max_allowed_height = max(1, console.console.size.height - 2)
|
|
609
|
+
self._max_render_height = min(self._max_render_height, max_allowed_height)
|
|
610
|
+
|
|
611
|
+
if self._use_plain_text:
|
|
612
|
+
content_height = self._estimate_plain_render_height(text)
|
|
613
|
+
budget_height = min(content_height + PLAIN_STREAM_HEIGHT_FUDGE, max_allowed_height)
|
|
614
|
+
|
|
615
|
+
if budget_height > self._max_render_height:
|
|
616
|
+
self._max_render_height = budget_height
|
|
617
|
+
|
|
618
|
+
padding_lines = max(0, self._max_render_height - content_height)
|
|
619
|
+
content = self._build_styled_text(text)
|
|
620
|
+
if padding_lines:
|
|
621
|
+
content.append("\n" * padding_lines)
|
|
622
|
+
else:
|
|
623
|
+
prepared = prepare_markdown_content(text, self._display._escape_xml)
|
|
624
|
+
prepared_for_display = self._close_incomplete_code_blocks(prepared)
|
|
625
|
+
|
|
626
|
+
content_height = self._truncator.measure_rendered_height(
|
|
627
|
+
prepared_for_display, console.console, self._display.code_style
|
|
628
|
+
)
|
|
629
|
+
budget_height = min(content_height + MARKDOWN_STREAM_HEIGHT_FUDGE, max_allowed_height)
|
|
630
|
+
|
|
631
|
+
if budget_height > self._max_render_height:
|
|
632
|
+
self._max_render_height = budget_height
|
|
633
|
+
|
|
634
|
+
padding_lines = max(0, self._max_render_height - content_height)
|
|
635
|
+
if padding_lines:
|
|
636
|
+
prepared_for_display = prepared_for_display + ("\n" * padding_lines)
|
|
637
|
+
|
|
638
|
+
content = Markdown(prepared_for_display, code_theme=self._display.code_style)
|
|
639
|
+
|
|
640
|
+
header_with_spacing = header.copy()
|
|
641
|
+
header_with_spacing.append("\n", style="default")
|
|
642
|
+
|
|
643
|
+
combined = Group(header_with_spacing, content)
|
|
644
|
+
try:
|
|
645
|
+
self._live.update(combined)
|
|
646
|
+
self._last_render_time = time.monotonic()
|
|
647
|
+
except Exception as exc:
|
|
648
|
+
logger.warning(
|
|
649
|
+
"Error updating live display during streaming",
|
|
650
|
+
exc_info=True,
|
|
651
|
+
data={"error": str(exc)},
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
async def _render_worker(self) -> None:
|
|
655
|
+
assert self._queue is not None
|
|
656
|
+
try:
|
|
657
|
+
while True:
|
|
658
|
+
try:
|
|
659
|
+
item = await self._queue.get()
|
|
660
|
+
except asyncio.CancelledError:
|
|
661
|
+
break
|
|
662
|
+
|
|
663
|
+
if item is self._stop_sentinel:
|
|
664
|
+
break
|
|
665
|
+
|
|
666
|
+
stop_requested = False
|
|
667
|
+
chunks = [item]
|
|
668
|
+
while True:
|
|
669
|
+
try:
|
|
670
|
+
next_item = self._queue.get_nowait()
|
|
671
|
+
except asyncio.QueueEmpty:
|
|
672
|
+
break
|
|
673
|
+
if next_item is self._stop_sentinel:
|
|
674
|
+
stop_requested = True
|
|
675
|
+
break
|
|
676
|
+
chunks.append(next_item)
|
|
677
|
+
|
|
678
|
+
should_render = False
|
|
679
|
+
for chunk in chunks:
|
|
680
|
+
if isinstance(chunk, StreamChunk):
|
|
681
|
+
should_render = self._handle_stream_chunk(chunk) or should_render
|
|
682
|
+
elif isinstance(chunk, str):
|
|
683
|
+
should_render = self._handle_chunk(chunk) or should_render
|
|
684
|
+
|
|
685
|
+
if should_render:
|
|
686
|
+
self._render_current_buffer()
|
|
687
|
+
if self._min_render_interval:
|
|
688
|
+
try:
|
|
689
|
+
await asyncio.sleep(self._min_render_interval)
|
|
690
|
+
except asyncio.CancelledError:
|
|
691
|
+
break
|
|
692
|
+
|
|
693
|
+
if stop_requested:
|
|
694
|
+
break
|
|
695
|
+
except asyncio.CancelledError:
|
|
696
|
+
pass
|
|
697
|
+
finally:
|
|
698
|
+
self._shutdown_live_resources()
|
|
699
|
+
|
|
700
|
+
def _shutdown_live_resources(self) -> None:
|
|
701
|
+
if self._live and self._live_started:
|
|
702
|
+
try:
|
|
703
|
+
self._live.__exit__(None, None, None)
|
|
704
|
+
except Exception:
|
|
705
|
+
pass
|
|
706
|
+
self._live = None
|
|
707
|
+
self._live_started = False
|
|
708
|
+
|
|
709
|
+
self._resume_progress_display()
|
|
710
|
+
self._active = False
|
|
711
|
+
|
|
712
|
+
def handle_tool_event(self, event_type: str, info: dict[str, Any] | None = None) -> None:
|
|
713
|
+
try:
|
|
714
|
+
if not self._active:
|
|
715
|
+
return
|
|
716
|
+
|
|
717
|
+
streams_arguments = info.get("streams_arguments", False) if info else False
|
|
718
|
+
tool_name = info.get("tool_name", "unknown") if info else "unknown"
|
|
719
|
+
|
|
720
|
+
if event_type == "start":
|
|
721
|
+
if streams_arguments:
|
|
722
|
+
self._switch_to_plain_text()
|
|
723
|
+
self.update(f"\n→ Calling {tool_name}\n")
|
|
724
|
+
else:
|
|
725
|
+
self._pause_progress_display()
|
|
726
|
+
self._switch_to_plain_text()
|
|
727
|
+
self.update(f"\n→ Calling {tool_name}\n")
|
|
728
|
+
return
|
|
729
|
+
if event_type == "delta":
|
|
730
|
+
if streams_arguments and info and "chunk" in info:
|
|
731
|
+
self.update(info["chunk"])
|
|
732
|
+
elif event_type == "text":
|
|
733
|
+
self._pause_progress_display()
|
|
734
|
+
elif event_type == "stop":
|
|
735
|
+
if streams_arguments:
|
|
736
|
+
self.update("\n")
|
|
737
|
+
self.close()
|
|
738
|
+
else:
|
|
739
|
+
self.update("\n")
|
|
740
|
+
self.close()
|
|
741
|
+
except Exception as exc:
|
|
742
|
+
logger.warning(
|
|
743
|
+
"Error handling tool event",
|
|
744
|
+
exc_info=True,
|
|
745
|
+
data={
|
|
746
|
+
"event_type": event_type,
|
|
747
|
+
"streams_arguments": info.get("streams_arguments") if info else None,
|
|
748
|
+
"error": str(exc),
|
|
749
|
+
},
|
|
750
|
+
)
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
__all__ = [
|
|
754
|
+
"NullStreamingHandle",
|
|
755
|
+
"StreamingMessageHandle",
|
|
756
|
+
"StreamingHandle",
|
|
757
|
+
"MARKDOWN_STREAM_TARGET_RATIO",
|
|
758
|
+
"MARKDOWN_STREAM_REFRESH_PER_SECOND",
|
|
759
|
+
"MARKDOWN_STREAM_HEIGHT_FUDGE",
|
|
760
|
+
"PLAIN_STREAM_TARGET_RATIO",
|
|
761
|
+
"PLAIN_STREAM_REFRESH_PER_SECOND",
|
|
762
|
+
"PLAIN_STREAM_HEIGHT_FUDGE",
|
|
763
|
+
]
|
|
764
|
+
|
|
765
|
+
|
|
766
|
+
class StreamingHandle(Protocol):
|
|
767
|
+
def update(self, chunk: str) -> None: ...
|
|
768
|
+
def update_chunk(self, chunk: StreamChunk) -> None: ...
|
|
769
|
+
|
|
770
|
+
def finalize(self, message: "PromptMessageExtended | str") -> None: ...
|
|
771
|
+
|
|
772
|
+
def close(self) -> None: ...
|
|
773
|
+
|
|
774
|
+
def handle_tool_event(self, event_type: str, info: dict[str, Any] | None = None) -> None: ...
|