fast-agent-mcp 0.3.15__py3-none-any.whl → 0.3.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +2 -0
- fast_agent/agents/agent_types.py +5 -0
- fast_agent/agents/llm_agent.py +7 -0
- fast_agent/agents/llm_decorator.py +6 -0
- fast_agent/agents/mcp_agent.py +134 -10
- fast_agent/cli/__main__.py +35 -0
- fast_agent/cli/commands/check_config.py +85 -0
- fast_agent/cli/commands/go.py +100 -36
- fast_agent/cli/constants.py +15 -1
- fast_agent/cli/main.py +2 -1
- fast_agent/config.py +39 -10
- fast_agent/constants.py +8 -0
- fast_agent/context.py +24 -15
- fast_agent/core/direct_decorators.py +9 -0
- fast_agent/core/fastagent.py +101 -1
- fast_agent/core/logging/listeners.py +8 -0
- fast_agent/interfaces.py +12 -0
- fast_agent/llm/fastagent_llm.py +45 -0
- fast_agent/llm/memory.py +26 -1
- fast_agent/llm/model_database.py +4 -1
- fast_agent/llm/model_factory.py +4 -2
- fast_agent/llm/model_info.py +19 -43
- fast_agent/llm/provider/anthropic/llm_anthropic.py +112 -0
- fast_agent/llm/provider/google/llm_google_native.py +238 -7
- fast_agent/llm/provider/openai/llm_openai.py +382 -19
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/resources/setup/agent.py +2 -0
- fast_agent/resources/setup/fastagent.config.yaml +6 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +208 -0
- fast_agent/tools/shell_runtime.py +404 -0
- fast_agent/ui/console_display.py +47 -996
- fast_agent/ui/elicitation_form.py +76 -24
- fast_agent/ui/elicitation_style.py +2 -2
- fast_agent/ui/enhanced_prompt.py +107 -37
- fast_agent/ui/history_display.py +20 -5
- fast_agent/ui/interactive_prompt.py +108 -3
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +103 -45
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/streaming.py +638 -0
- fast_agent/ui/tool_display.py +417 -0
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/METADATA +8 -7
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/RECORD +47 -39
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/licenses/LICENSE +0 -0
|
@@ -14,6 +14,7 @@ Usage:
|
|
|
14
14
|
)
|
|
15
15
|
"""
|
|
16
16
|
|
|
17
|
+
from pathlib import Path
|
|
17
18
|
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Union, cast
|
|
18
19
|
|
|
19
20
|
if TYPE_CHECKING:
|
|
@@ -169,6 +170,9 @@ class InteractivePrompt:
|
|
|
169
170
|
# Handle tools list display
|
|
170
171
|
await self._list_tools(prompt_provider, agent)
|
|
171
172
|
continue
|
|
173
|
+
elif "list_skills" in command_dict:
|
|
174
|
+
await self._list_skills(prompt_provider, agent)
|
|
175
|
+
continue
|
|
172
176
|
elif "show_usage" in command_dict:
|
|
173
177
|
# Handle usage display
|
|
174
178
|
await self._show_usage(prompt_provider, agent)
|
|
@@ -189,6 +193,41 @@ class InteractivePrompt:
|
|
|
189
193
|
usage = getattr(agent_obj, "usage_accumulator", None)
|
|
190
194
|
display_history_overview(target_agent, history, usage)
|
|
191
195
|
continue
|
|
196
|
+
elif "clear_last" in command_dict:
|
|
197
|
+
clear_info = command_dict.get("clear_last")
|
|
198
|
+
clear_agent = (
|
|
199
|
+
clear_info.get("agent") if isinstance(clear_info, dict) else None
|
|
200
|
+
)
|
|
201
|
+
target_agent = clear_agent or agent
|
|
202
|
+
try:
|
|
203
|
+
agent_obj = prompt_provider._agent(target_agent)
|
|
204
|
+
except Exception:
|
|
205
|
+
rich_print(f"[red]Unable to load agent '{target_agent}'[/red]")
|
|
206
|
+
continue
|
|
207
|
+
|
|
208
|
+
removed_message = None
|
|
209
|
+
pop_callable = getattr(agent_obj, "pop_last_message", None)
|
|
210
|
+
if callable(pop_callable):
|
|
211
|
+
removed_message = pop_callable()
|
|
212
|
+
else:
|
|
213
|
+
history = getattr(agent_obj, "message_history", [])
|
|
214
|
+
if history:
|
|
215
|
+
try:
|
|
216
|
+
removed_message = history.pop()
|
|
217
|
+
except Exception:
|
|
218
|
+
removed_message = None
|
|
219
|
+
|
|
220
|
+
if removed_message:
|
|
221
|
+
role = getattr(removed_message, "role", "message")
|
|
222
|
+
role_display = role.capitalize() if isinstance(role, str) else "Message"
|
|
223
|
+
rich_print(
|
|
224
|
+
f"[green]Removed last {role_display} for agent '{target_agent}'.[/green]"
|
|
225
|
+
)
|
|
226
|
+
else:
|
|
227
|
+
rich_print(
|
|
228
|
+
f"[yellow]No messages to remove for agent '{target_agent}'.[/yellow]"
|
|
229
|
+
)
|
|
230
|
+
continue
|
|
192
231
|
elif "clear_history" in command_dict:
|
|
193
232
|
clear_info = command_dict.get("clear_history")
|
|
194
233
|
clear_agent = (
|
|
@@ -857,19 +896,21 @@ class InteractivePrompt:
|
|
|
857
896
|
rich_print()
|
|
858
897
|
|
|
859
898
|
# Display tools using clean compact format
|
|
860
|
-
|
|
899
|
+
index = 1
|
|
900
|
+
for tool in tools_result.tools:
|
|
861
901
|
# Main line: [ 1] tool_name Title
|
|
862
902
|
from rich.text import Text
|
|
863
903
|
|
|
904
|
+
meta = getattr(tool, "meta", {}) or {}
|
|
905
|
+
|
|
864
906
|
tool_line = Text()
|
|
865
|
-
tool_line.append(f"[{
|
|
907
|
+
tool_line.append(f"[{index:2}] ", style="dim cyan")
|
|
866
908
|
tool_line.append(tool.name, style="bright_blue bold")
|
|
867
909
|
|
|
868
910
|
# Add title if available
|
|
869
911
|
if tool.title and tool.title.strip():
|
|
870
912
|
tool_line.append(f" {tool.title}", style="default")
|
|
871
913
|
|
|
872
|
-
meta = getattr(tool, "meta", {}) or {}
|
|
873
914
|
if meta.get("openai/skybridgeEnabled"):
|
|
874
915
|
tool_line.append(" (skybridge)", style="cyan")
|
|
875
916
|
|
|
@@ -932,13 +973,77 @@ class InteractivePrompt:
|
|
|
932
973
|
rich_print(f" [dim magenta]template:[/dim magenta] {template}")
|
|
933
974
|
|
|
934
975
|
rich_print() # Space between tools
|
|
976
|
+
index += 1
|
|
935
977
|
|
|
978
|
+
if index == 1:
|
|
979
|
+
rich_print("[yellow]No MCP tools available for this agent[/yellow]")
|
|
936
980
|
except Exception as e:
|
|
937
981
|
import traceback
|
|
938
982
|
|
|
939
983
|
rich_print(f"[red]Error listing tools: {e}[/red]")
|
|
940
984
|
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
|
941
985
|
|
|
986
|
+
async def _list_skills(self, prompt_provider: "AgentApp", agent_name: str) -> None:
|
|
987
|
+
"""List available local skills for an agent."""
|
|
988
|
+
|
|
989
|
+
try:
|
|
990
|
+
assert hasattr(prompt_provider, "_agent"), (
|
|
991
|
+
"Interactive prompt expects an AgentApp with _agent()"
|
|
992
|
+
)
|
|
993
|
+
agent = prompt_provider._agent(agent_name)
|
|
994
|
+
|
|
995
|
+
rich_print(f"\n[bold]Skills for agent [cyan]{agent_name}[/cyan]:[/bold]")
|
|
996
|
+
|
|
997
|
+
skill_manifests = getattr(agent, "_skill_manifests", None)
|
|
998
|
+
manifests = list(skill_manifests) if skill_manifests else []
|
|
999
|
+
|
|
1000
|
+
if not manifests:
|
|
1001
|
+
rich_print("[yellow]No skills available for this agent[/yellow]")
|
|
1002
|
+
return
|
|
1003
|
+
|
|
1004
|
+
rich_print()
|
|
1005
|
+
|
|
1006
|
+
for index, manifest in enumerate(manifests, 1):
|
|
1007
|
+
from rich.text import Text
|
|
1008
|
+
|
|
1009
|
+
name = getattr(manifest, "name", "")
|
|
1010
|
+
description = getattr(manifest, "description", "")
|
|
1011
|
+
path = Path(getattr(manifest, "path", Path()))
|
|
1012
|
+
|
|
1013
|
+
tool_line = Text()
|
|
1014
|
+
tool_line.append(f"[{index:2}] ", style="dim cyan")
|
|
1015
|
+
tool_line.append(name, style="bright_blue bold")
|
|
1016
|
+
rich_print(tool_line)
|
|
1017
|
+
|
|
1018
|
+
if description:
|
|
1019
|
+
import textwrap
|
|
1020
|
+
|
|
1021
|
+
wrapped_lines = textwrap.wrap(
|
|
1022
|
+
description.strip(), width=72, subsequent_indent=" "
|
|
1023
|
+
)
|
|
1024
|
+
for line in wrapped_lines:
|
|
1025
|
+
if line.startswith(" "):
|
|
1026
|
+
rich_print(f" [white]{line[5:]}[/white]")
|
|
1027
|
+
else:
|
|
1028
|
+
rich_print(f" [white]{line}[/white]")
|
|
1029
|
+
|
|
1030
|
+
source_path = path if path else Path(".")
|
|
1031
|
+
if source_path.is_file():
|
|
1032
|
+
source_path = source_path.parent
|
|
1033
|
+
try:
|
|
1034
|
+
display_path = source_path.relative_to(Path.cwd())
|
|
1035
|
+
except ValueError:
|
|
1036
|
+
display_path = source_path
|
|
1037
|
+
|
|
1038
|
+
rich_print(f" [dim green]source:[/dim green] {display_path}")
|
|
1039
|
+
rich_print()
|
|
1040
|
+
|
|
1041
|
+
except Exception as exc: # noqa: BLE001
|
|
1042
|
+
import traceback
|
|
1043
|
+
|
|
1044
|
+
rich_print(f"[red]Error listing skills: {exc}[/red]")
|
|
1045
|
+
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
|
1046
|
+
|
|
942
1047
|
async def _show_usage(self, prompt_provider: "AgentApp", agent_name: str) -> None:
|
|
943
1048
|
"""
|
|
944
1049
|
Show usage statistics for the current agent(s) in a colorful table format.
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Iterable, Iterator
|
|
4
|
+
|
|
5
|
+
HTML_ESCAPE_CHARS: dict[str, str] = {
|
|
6
|
+
"&": "&",
|
|
7
|
+
"<": "<",
|
|
8
|
+
">": ">",
|
|
9
|
+
'"': """,
|
|
10
|
+
"'": "'",
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _flatten_tokens(tokens: Iterable[Any]) -> Iterator[Any]:
|
|
15
|
+
"""Recursively flatten markdown-it token trees."""
|
|
16
|
+
for token in tokens:
|
|
17
|
+
yield token
|
|
18
|
+
if token.children:
|
|
19
|
+
yield from _flatten_tokens(token.children)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def prepare_markdown_content(content: str, escape_xml: bool = True) -> str:
|
|
23
|
+
"""Prepare content for markdown rendering, escaping HTML/XML outside code blocks."""
|
|
24
|
+
if not escape_xml or not isinstance(content, str):
|
|
25
|
+
return content
|
|
26
|
+
|
|
27
|
+
from markdown_it import MarkdownIt
|
|
28
|
+
|
|
29
|
+
parser = MarkdownIt()
|
|
30
|
+
try:
|
|
31
|
+
tokens = parser.parse(content)
|
|
32
|
+
except Exception:
|
|
33
|
+
result = content
|
|
34
|
+
for char, replacement in HTML_ESCAPE_CHARS.items():
|
|
35
|
+
result = result.replace(char, replacement)
|
|
36
|
+
return result
|
|
37
|
+
|
|
38
|
+
protected_ranges: list[tuple[int, int]] = []
|
|
39
|
+
lines = content.split("\n")
|
|
40
|
+
|
|
41
|
+
for token in _flatten_tokens(tokens):
|
|
42
|
+
if token.map is not None:
|
|
43
|
+
if token.type in ("fence", "code_block"):
|
|
44
|
+
start_line = token.map[0]
|
|
45
|
+
end_line = token.map[1]
|
|
46
|
+
start_pos = sum(len(line) + 1 for line in lines[:start_line])
|
|
47
|
+
end_pos = sum(len(line) + 1 for line in lines[:end_line])
|
|
48
|
+
protected_ranges.append((start_pos, end_pos))
|
|
49
|
+
|
|
50
|
+
if token.type == "code_inline":
|
|
51
|
+
code_content = token.content
|
|
52
|
+
if code_content:
|
|
53
|
+
pattern = f"`{code_content}`"
|
|
54
|
+
start = 0
|
|
55
|
+
while True:
|
|
56
|
+
pos = content.find(pattern, start)
|
|
57
|
+
if pos == -1:
|
|
58
|
+
break
|
|
59
|
+
in_protected = any(s <= pos < e for s, e in protected_ranges)
|
|
60
|
+
if not in_protected:
|
|
61
|
+
protected_ranges.append((pos, pos + len(pattern)))
|
|
62
|
+
start = pos + len(pattern)
|
|
63
|
+
|
|
64
|
+
import re
|
|
65
|
+
|
|
66
|
+
fence_pattern = r"^```"
|
|
67
|
+
fences = list(re.finditer(fence_pattern, content, re.MULTILINE))
|
|
68
|
+
|
|
69
|
+
if len(fences) % 2 == 1:
|
|
70
|
+
last_fence_pos = fences[-1].start()
|
|
71
|
+
in_protected = any(s <= last_fence_pos < e for s, e in protected_ranges)
|
|
72
|
+
if not in_protected:
|
|
73
|
+
protected_ranges.append((last_fence_pos, len(content)))
|
|
74
|
+
|
|
75
|
+
protected_ranges.sort(key=lambda x: x[0])
|
|
76
|
+
|
|
77
|
+
merged_ranges: list[tuple[int, int]] = []
|
|
78
|
+
for start, end in protected_ranges:
|
|
79
|
+
if merged_ranges and start <= merged_ranges[-1][1]:
|
|
80
|
+
merged_ranges[-1] = (merged_ranges[-1][0], max(end, merged_ranges[-1][1]))
|
|
81
|
+
else:
|
|
82
|
+
merged_ranges.append((start, end))
|
|
83
|
+
|
|
84
|
+
result_segments: list[str] = []
|
|
85
|
+
last_end = 0
|
|
86
|
+
|
|
87
|
+
for start, end in merged_ranges:
|
|
88
|
+
unprotected_text = content[last_end:start]
|
|
89
|
+
for char, replacement in HTML_ESCAPE_CHARS.items():
|
|
90
|
+
unprotected_text = unprotected_text.replace(char, replacement)
|
|
91
|
+
result_segments.append(unprotected_text)
|
|
92
|
+
|
|
93
|
+
result_segments.append(content[start:end])
|
|
94
|
+
last_end = end
|
|
95
|
+
|
|
96
|
+
remainder_text = content[last_end:]
|
|
97
|
+
for char, replacement in HTML_ESCAPE_CHARS.items():
|
|
98
|
+
remainder_text = remainder_text.replace(char, replacement)
|
|
99
|
+
result_segments.append(remainder_text)
|
|
100
|
+
|
|
101
|
+
return "".join(result_segments)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
__all__ = ["HTML_ESCAPE_CHARS", "prepare_markdown_content"]
|
|
@@ -57,6 +57,7 @@ class CodeBlockInfo:
|
|
|
57
57
|
end_pos: int
|
|
58
58
|
fence_line: int
|
|
59
59
|
language: str
|
|
60
|
+
fence_text: str | None
|
|
60
61
|
token: Token
|
|
61
62
|
|
|
62
63
|
|
|
@@ -89,6 +90,13 @@ class MarkdownTruncator:
|
|
|
89
90
|
self._last_full_text: str | None = None
|
|
90
91
|
self._last_truncated_text: str | None = None
|
|
91
92
|
self._last_terminal_height: int | None = None
|
|
93
|
+
# Markdown parse cache
|
|
94
|
+
self._cache_source: str | None = None
|
|
95
|
+
self._cache_tokens: List[Token] | None = None
|
|
96
|
+
self._cache_lines: List[str] | None = None
|
|
97
|
+
self._cache_safe_points: List[TruncationPoint] | None = None
|
|
98
|
+
self._cache_code_blocks: List[CodeBlockInfo] | None = None
|
|
99
|
+
self._cache_tables: List[TableInfo] | None = None
|
|
92
100
|
|
|
93
101
|
def truncate(
|
|
94
102
|
self,
|
|
@@ -285,6 +293,18 @@ class MarkdownTruncator:
|
|
|
285
293
|
|
|
286
294
|
return truncated_text
|
|
287
295
|
|
|
296
|
+
def _ensure_parse_cache(self, text: str) -> None:
|
|
297
|
+
if self._cache_source == text:
|
|
298
|
+
return
|
|
299
|
+
|
|
300
|
+
tokens = self.parser.parse(text)
|
|
301
|
+
self._cache_source = text
|
|
302
|
+
self._cache_tokens = tokens
|
|
303
|
+
self._cache_lines = text.split("\n")
|
|
304
|
+
self._cache_safe_points = None
|
|
305
|
+
self._cache_code_blocks = None
|
|
306
|
+
self._cache_tables = None
|
|
307
|
+
|
|
288
308
|
def _find_safe_truncation_points(self, text: str) -> List[TruncationPoint]:
|
|
289
309
|
"""Find safe positions to truncate at (block boundaries).
|
|
290
310
|
|
|
@@ -294,11 +314,16 @@ class MarkdownTruncator:
|
|
|
294
314
|
Returns:
|
|
295
315
|
List of TruncationPoint objects representing safe truncation positions.
|
|
296
316
|
"""
|
|
297
|
-
|
|
298
|
-
|
|
317
|
+
self._ensure_parse_cache(text)
|
|
318
|
+
if self._cache_safe_points is not None:
|
|
319
|
+
return list(self._cache_safe_points)
|
|
299
320
|
|
|
300
|
-
|
|
301
|
-
|
|
321
|
+
assert self._cache_tokens is not None
|
|
322
|
+
assert self._cache_lines is not None
|
|
323
|
+
|
|
324
|
+
safe_points: List[TruncationPoint] = []
|
|
325
|
+
tokens = self._cache_tokens
|
|
326
|
+
lines = self._cache_lines
|
|
302
327
|
|
|
303
328
|
for token in tokens:
|
|
304
329
|
# We're interested in block-level tokens with map information
|
|
@@ -319,13 +344,13 @@ class MarkdownTruncator:
|
|
|
319
344
|
is_closing=(token.nesting == 0), # Self-closing or block end
|
|
320
345
|
)
|
|
321
346
|
)
|
|
322
|
-
|
|
323
|
-
return safe_points
|
|
347
|
+
self._cache_safe_points = safe_points
|
|
348
|
+
return list(safe_points)
|
|
324
349
|
|
|
325
350
|
def _get_code_block_info(self, text: str) -> List[CodeBlockInfo]:
|
|
326
351
|
"""Extract code block positions and metadata using markdown-it.
|
|
327
352
|
|
|
328
|
-
Uses same technique as
|
|
353
|
+
Uses same technique as prepare_markdown_content in markdown_helpers.py:
|
|
329
354
|
parse once with markdown-it, extract exact positions from tokens.
|
|
330
355
|
|
|
331
356
|
Args:
|
|
@@ -334,9 +359,16 @@ class MarkdownTruncator:
|
|
|
334
359
|
Returns:
|
|
335
360
|
List of CodeBlockInfo objects with position and language metadata.
|
|
336
361
|
"""
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
362
|
+
self._ensure_parse_cache(text)
|
|
363
|
+
if self._cache_code_blocks is not None:
|
|
364
|
+
return list(self._cache_code_blocks)
|
|
365
|
+
|
|
366
|
+
assert self._cache_tokens is not None
|
|
367
|
+
assert self._cache_lines is not None
|
|
368
|
+
|
|
369
|
+
tokens = self._cache_tokens
|
|
370
|
+
lines = self._cache_lines
|
|
371
|
+
code_blocks: List[CodeBlockInfo] = []
|
|
340
372
|
|
|
341
373
|
for token in self._flatten_tokens(tokens):
|
|
342
374
|
if token.type in ("fence", "code_block") and token.map:
|
|
@@ -345,6 +377,9 @@ class MarkdownTruncator:
|
|
|
345
377
|
start_pos = sum(len(line) + 1 for line in lines[:start_line])
|
|
346
378
|
end_pos = sum(len(line) + 1 for line in lines[:end_line])
|
|
347
379
|
language = token.info or "" if hasattr(token, "info") else ""
|
|
380
|
+
fence_text: str | None = None
|
|
381
|
+
if token.type == "fence":
|
|
382
|
+
fence_text = lines[start_line] if 0 <= start_line < len(lines) else None
|
|
348
383
|
|
|
349
384
|
code_blocks.append(
|
|
350
385
|
CodeBlockInfo(
|
|
@@ -352,11 +387,35 @@ class MarkdownTruncator:
|
|
|
352
387
|
end_pos=end_pos,
|
|
353
388
|
fence_line=start_line,
|
|
354
389
|
language=language,
|
|
390
|
+
fence_text=fence_text,
|
|
355
391
|
token=token,
|
|
356
392
|
)
|
|
357
393
|
)
|
|
394
|
+
self._cache_code_blocks = code_blocks
|
|
395
|
+
return list(code_blocks)
|
|
396
|
+
|
|
397
|
+
def _build_code_block_prefix(self, block: CodeBlockInfo) -> str | None:
|
|
398
|
+
"""Construct the opening fence text for a code block if applicable."""
|
|
399
|
+
token = block.token
|
|
400
|
+
|
|
401
|
+
if token.type == "fence":
|
|
402
|
+
if block.fence_text:
|
|
403
|
+
fence_line = block.fence_text
|
|
404
|
+
else:
|
|
405
|
+
markup = getattr(token, "markup", "") or "```"
|
|
406
|
+
info = (getattr(token, "info", "") or "").strip()
|
|
407
|
+
fence_line = f"{markup}{info}" if info else markup
|
|
408
|
+
return fence_line if fence_line.endswith("\n") else fence_line + "\n"
|
|
409
|
+
|
|
410
|
+
if token.type == "code_block":
|
|
411
|
+
info = (getattr(token, "info", "") or "").strip()
|
|
412
|
+
if info:
|
|
413
|
+
return f"```{info}\n"
|
|
414
|
+
if block.language:
|
|
415
|
+
return f"```{block.language}\n"
|
|
416
|
+
return "```\n"
|
|
358
417
|
|
|
359
|
-
return
|
|
418
|
+
return None
|
|
360
419
|
|
|
361
420
|
def _get_table_info(self, text: str) -> List[TableInfo]:
|
|
362
421
|
"""Extract table positions and metadata using markdown-it.
|
|
@@ -370,9 +429,16 @@ class MarkdownTruncator:
|
|
|
370
429
|
Returns:
|
|
371
430
|
List of TableInfo objects with position and header metadata.
|
|
372
431
|
"""
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
432
|
+
self._ensure_parse_cache(text)
|
|
433
|
+
if self._cache_tables is not None:
|
|
434
|
+
return list(self._cache_tables)
|
|
435
|
+
|
|
436
|
+
assert self._cache_tokens is not None
|
|
437
|
+
assert self._cache_lines is not None
|
|
438
|
+
|
|
439
|
+
tokens = self._cache_tokens
|
|
440
|
+
lines = self._cache_lines
|
|
441
|
+
tables: List[TableInfo] = []
|
|
376
442
|
|
|
377
443
|
for i, token in enumerate(tokens):
|
|
378
444
|
if token.type == "table_open" and token.map:
|
|
@@ -435,8 +501,8 @@ class MarkdownTruncator:
|
|
|
435
501
|
header_lines=header_lines,
|
|
436
502
|
)
|
|
437
503
|
)
|
|
438
|
-
|
|
439
|
-
return tables
|
|
504
|
+
self._cache_tables = tables
|
|
505
|
+
return list(tables)
|
|
440
506
|
|
|
441
507
|
def _find_best_truncation_point(
|
|
442
508
|
self,
|
|
@@ -571,8 +637,8 @@ class MarkdownTruncator:
|
|
|
571
637
|
# If truncation happened after the fence line, it scrolled off
|
|
572
638
|
if truncation_point.char_position > code_block.start_pos:
|
|
573
639
|
# Check if fence is already at the beginning (avoid duplicates)
|
|
574
|
-
fence =
|
|
575
|
-
if not truncated_text.startswith(fence):
|
|
640
|
+
fence = self._build_code_block_prefix(code_block)
|
|
641
|
+
if fence and not truncated_text.startswith(fence):
|
|
576
642
|
# Fence scrolled off - prepend it
|
|
577
643
|
return fence + truncated_text
|
|
578
644
|
|
|
@@ -611,10 +677,8 @@ class MarkdownTruncator:
|
|
|
611
677
|
# Truncated within this code block
|
|
612
678
|
# Simple check: did truncation remove the fence?
|
|
613
679
|
if truncation_pos > block.start_pos:
|
|
614
|
-
|
|
615
|
-
fence
|
|
616
|
-
if not truncated_text.startswith(fence):
|
|
617
|
-
# Fence scrolled off - prepend it
|
|
680
|
+
fence = self._build_code_block_prefix(block)
|
|
681
|
+
if fence and not truncated_text.startswith(fence):
|
|
618
682
|
return fence + truncated_text
|
|
619
683
|
# Fence still on screen or already prepended
|
|
620
684
|
return truncated_text
|
|
@@ -875,32 +939,26 @@ class MarkdownTruncator:
|
|
|
875
939
|
if not truncated_text or truncated_text == original_text:
|
|
876
940
|
return truncated_text
|
|
877
941
|
|
|
942
|
+
original_fragment = truncated_text
|
|
943
|
+
|
|
878
944
|
# Find where the truncated text starts in the original
|
|
879
|
-
truncation_pos = original_text.
|
|
945
|
+
truncation_pos = original_text.rfind(original_fragment)
|
|
880
946
|
if truncation_pos == -1:
|
|
881
|
-
|
|
882
|
-
|
|
947
|
+
truncation_pos = max(0, len(original_text) - len(original_fragment))
|
|
948
|
+
|
|
949
|
+
code_blocks = self._get_code_block_info(original_text)
|
|
950
|
+
active_block = None
|
|
951
|
+
for block in code_blocks:
|
|
952
|
+
if block.start_pos <= truncation_pos < block.end_pos:
|
|
953
|
+
active_block = block
|
|
954
|
+
|
|
955
|
+
if active_block:
|
|
956
|
+
fence = self._build_code_block_prefix(active_block)
|
|
957
|
+
if fence and not truncated_text.startswith(fence):
|
|
958
|
+
truncated_text = fence + truncated_text
|
|
883
959
|
|
|
884
|
-
# Check for incomplete code
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
# If we removed an odd number of fences, we're inside a code block
|
|
888
|
-
if original_fence_count % 2 == 1:
|
|
889
|
-
# Find the last opening fence before truncation point
|
|
890
|
-
import re
|
|
891
|
-
before_truncation = original_text[:truncation_pos]
|
|
892
|
-
fences = list(re.finditer(r'^```(\w*)', before_truncation, re.MULTILINE))
|
|
893
|
-
if fences:
|
|
894
|
-
last_fence = fences[-1]
|
|
895
|
-
language = last_fence.group(1) if last_fence.group(1) else ''
|
|
896
|
-
fence = f'```{language}\n'
|
|
897
|
-
if not truncated_text.startswith(fence):
|
|
898
|
-
truncated_text = fence + truncated_text
|
|
899
|
-
|
|
900
|
-
# Check for incomplete tables
|
|
901
|
-
# Only if we're not inside a code block
|
|
902
|
-
if original_fence_count % 2 == 0 and '|' in truncated_text:
|
|
903
|
-
# Use the existing table header restoration logic
|
|
960
|
+
# Check for incomplete tables when not inside a code block
|
|
961
|
+
if active_block is None and '|' in truncated_text:
|
|
904
962
|
tables = self._get_table_info(original_text)
|
|
905
963
|
for table in tables:
|
|
906
964
|
if table.thead_end_pos <= truncation_pos < table.tbody_end_pos:
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MessageType(Enum):
|
|
7
|
+
"""Types of messages that can be displayed."""
|
|
8
|
+
|
|
9
|
+
USER = "user"
|
|
10
|
+
ASSISTANT = "assistant"
|
|
11
|
+
SYSTEM = "system"
|
|
12
|
+
TOOL_CALL = "tool_call"
|
|
13
|
+
TOOL_RESULT = "tool_result"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
MESSAGE_CONFIGS: dict[MessageType, dict[str, str]] = {
|
|
17
|
+
MessageType.USER: {
|
|
18
|
+
"block_color": "blue",
|
|
19
|
+
"arrow": "▶",
|
|
20
|
+
"arrow_style": "dim blue",
|
|
21
|
+
"highlight_color": "blue",
|
|
22
|
+
},
|
|
23
|
+
MessageType.ASSISTANT: {
|
|
24
|
+
"block_color": "green",
|
|
25
|
+
"arrow": "◀",
|
|
26
|
+
"arrow_style": "dim green",
|
|
27
|
+
"highlight_color": "bright_green",
|
|
28
|
+
},
|
|
29
|
+
MessageType.SYSTEM: {
|
|
30
|
+
"block_color": "yellow",
|
|
31
|
+
"arrow": "●",
|
|
32
|
+
"arrow_style": "dim yellow",
|
|
33
|
+
"highlight_color": "bright_yellow",
|
|
34
|
+
},
|
|
35
|
+
MessageType.TOOL_CALL: {
|
|
36
|
+
"block_color": "magenta",
|
|
37
|
+
"arrow": "◀",
|
|
38
|
+
"arrow_style": "dim magenta",
|
|
39
|
+
"highlight_color": "magenta",
|
|
40
|
+
},
|
|
41
|
+
MessageType.TOOL_RESULT: {
|
|
42
|
+
"block_color": "magenta",
|
|
43
|
+
"arrow": "▶",
|
|
44
|
+
"arrow_style": "dim magenta",
|
|
45
|
+
"highlight_color": "magenta",
|
|
46
|
+
},
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
__all__ = ["MessageType", "MESSAGE_CONFIGS"]
|