fast-agent-mcp 0.2.46__py3-none-any.whl → 0.2.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

@@ -10,6 +10,7 @@ from mcp_agent.agents.workflow.evaluator_optimizer import (
10
10
  EvaluatorOptimizerAgent,
11
11
  QualityRating,
12
12
  )
13
+ from mcp_agent.agents.workflow.iterative_planner import IterativePlanner
13
14
  from mcp_agent.agents.workflow.orchestrator_agent import OrchestratorAgent
14
15
  from mcp_agent.agents.workflow.parallel_agent import ParallelAgent
15
16
  from mcp_agent.agents.workflow.router_agent import RouterAgent
@@ -21,9 +22,10 @@ from mcp_agent.event_progress import ProgressAction
21
22
  from mcp_agent.llm.augmented_llm import RequestParams
22
23
  from mcp_agent.llm.model_factory import ModelFactory
23
24
  from mcp_agent.logging.logger import get_logger
25
+ from mcp_agent.mcp.interfaces import AgentProtocol
24
26
 
25
27
  # Type aliases for improved readability and IDE support
26
- AgentDict = Dict[str, Agent]
28
+ AgentDict = Dict[str, AgentProtocol]
27
29
  AgentConfigDict = Dict[str, Dict[str, Any]]
28
30
  T = TypeVar("T") # For generic types
29
31
 
@@ -153,7 +155,7 @@ async def create_agents_by_type(
153
155
  await agent.attach_llm(
154
156
  llm_factory,
155
157
  request_params=config.default_request_params,
156
- api_key=config.api_key
158
+ api_key=config.api_key,
157
159
  )
158
160
  result_agents[name] = agent
159
161
 
@@ -172,11 +174,11 @@ async def create_agents_by_type(
172
174
  await agent.attach_llm(
173
175
  llm_factory,
174
176
  request_params=config.default_request_params,
175
- api_key=config.api_key
177
+ api_key=config.api_key,
176
178
  )
177
179
  result_agents[name] = agent
178
180
 
179
- elif agent_type == AgentType.ORCHESTRATOR:
181
+ elif agent_type == AgentType.ORCHESTRATOR or agent_type == AgentType.ITERATIVE_PLANNER:
180
182
  # Get base params configured with model settings
181
183
  base_params = (
182
184
  config.default_request_params.model_copy()
@@ -193,24 +195,35 @@ async def create_agents_by_type(
193
195
  agent = active_agents[agent_name]
194
196
  child_agents.append(agent)
195
197
 
196
- # Create the orchestrator
197
- orchestrator = OrchestratorAgent(
198
- config=config,
199
- context=app_instance.context,
200
- agents=child_agents,
201
- plan_iterations=agent_data.get("plan_iterations", 5),
202
- plan_type=agent_data.get("plan_type", "full"),
203
- )
198
+ if AgentType.ORCHESTRATOR == agent_type:
199
+ # Create the orchestrator
200
+ orchestrator = OrchestratorAgent(
201
+ config=config,
202
+ context=app_instance.context,
203
+ agents=child_agents,
204
+ plan_iterations=agent_data.get("plan_iterations", 5),
205
+ plan_type=agent_data.get("plan_type", "full"),
206
+ )
207
+ else:
208
+ orchestrator = IterativePlanner(
209
+ config=config,
210
+ context=app_instance.context,
211
+ agents=child_agents,
212
+ plan_iterations=agent_data.get("plan_iterations", 5),
213
+ plan_type=agent_data.get("plan_type", "full"),
214
+ )
204
215
 
205
216
  # Initialize the orchestrator
206
217
  await orchestrator.initialize()
207
218
 
208
219
  # Attach LLM to the orchestrator
209
220
  llm_factory = model_factory_func(model=config.model)
221
+
222
+ # print("************", config.default_request_params.instruction)
210
223
  await orchestrator.attach_llm(
211
224
  llm_factory,
212
225
  request_params=config.default_request_params,
213
- api_key=config.api_key
226
+ api_key=config.api_key,
214
227
  )
215
228
 
216
229
  result_agents[name] = orchestrator
@@ -274,7 +287,7 @@ async def create_agents_by_type(
274
287
  await router.attach_llm(
275
288
  llm_factory,
276
289
  request_params=config.default_request_params,
277
- api_key=config.api_key
290
+ api_key=config.api_key,
278
291
  )
279
292
  result_agents[name] = router
280
293
 
@@ -461,7 +474,6 @@ async def create_agents_in_dependency_order(
461
474
  )
462
475
  active_agents.update(evaluator_agents)
463
476
 
464
- # Create orchestrator agents last since they might depend on other agents
465
477
  if AgentType.ORCHESTRATOR.value in [agents_dict[name]["type"] for name in group]:
466
478
  orchestrator_agents = await create_agents_by_type(
467
479
  app_instance,
@@ -476,6 +488,21 @@ async def create_agents_in_dependency_order(
476
488
  )
477
489
  active_agents.update(orchestrator_agents)
478
490
 
491
+ # Create orchestrator2 agents last since they might depend on other agents
492
+ if AgentType.ITERATIVE_PLANNER.value in [agents_dict[name]["type"] for name in group]:
493
+ orchestrator2_agents = await create_agents_by_type(
494
+ app_instance,
495
+ {
496
+ name: agents_dict[name]
497
+ for name in group
498
+ if agents_dict[name]["type"] == AgentType.ITERATIVE_PLANNER.value
499
+ },
500
+ AgentType.ITERATIVE_PLANNER,
501
+ active_agents,
502
+ model_factory_func,
503
+ )
504
+ active_agents.update(orchestrator2_agents)
505
+
479
506
  return active_agents
480
507
 
481
508
 
@@ -31,6 +31,9 @@ from mcp_agent.core.direct_decorators import (
31
31
  from mcp_agent.core.direct_decorators import (
32
32
  evaluator_optimizer as evaluator_optimizer_decorator,
33
33
  )
34
+ from mcp_agent.core.direct_decorators import (
35
+ iterative_planner as orchestrator2_decorator,
36
+ )
34
37
  from mcp_agent.core.direct_decorators import (
35
38
  orchestrator as orchestrator_decorator,
36
39
  )
@@ -249,6 +252,7 @@ class FastAgent:
249
252
  agent = agent_decorator
250
253
  custom = custom_decorator
251
254
  orchestrator = orchestrator_decorator
255
+ iterative_planner = orchestrator2_decorator
252
256
  router = router_decorator
253
257
  chain = chain_decorator
254
258
  parallel = parallel_decorator
@@ -0,0 +1,170 @@
1
+ """Utilities for detecting and processing Mermaid diagrams in text content."""
2
+
3
+ import base64
4
+ import re
5
+ import zlib
6
+ from dataclasses import dataclass
7
+ from typing import List, Optional
8
+
9
+ # Mermaid chart viewer URL prefix
10
+ MERMAID_VIEWER_URL = "https://www.mermaidchart.com/play#"
11
+ # mermaid.live#pako= also works but the playground has better ux
12
+
13
+
14
+ @dataclass
15
+ class MermaidDiagram:
16
+ """Represents a detected Mermaid diagram."""
17
+
18
+ content: str
19
+ title: Optional[str] = None
20
+ start_pos: int = 0
21
+ end_pos: int = 0
22
+
23
+
24
+ def extract_mermaid_diagrams(text: str) -> List[MermaidDiagram]:
25
+ """
26
+ Extract all Mermaid diagram blocks from text content.
27
+
28
+ Handles both simple mermaid blocks and blocks with titles:
29
+ - ```mermaid
30
+ - ```mermaid title={Some Title}
31
+
32
+ Also extracts titles from within the diagram content.
33
+
34
+ Args:
35
+ text: The text content to search for Mermaid diagrams
36
+
37
+ Returns:
38
+ List of MermaidDiagram objects found in the text
39
+ """
40
+ diagrams = []
41
+
42
+ # Pattern to match mermaid code blocks with optional title
43
+ # Matches: ```mermaid or ```mermaid title={...}
44
+ pattern = r"```mermaid(?:\s+title=\{([^}]+)\})?\s*\n(.*?)```"
45
+
46
+ for match in re.finditer(pattern, text, re.DOTALL):
47
+ title = match.group(1) # May be None if no title
48
+ content = match.group(2).strip()
49
+
50
+ if content: # Only add if there's actual diagram content
51
+ # If no title from code fence, look for title in the content
52
+ if not title:
53
+ # Look for various title patterns in mermaid diagrams
54
+ # pie title, graph title, etc.
55
+ title_patterns = [
56
+ r"^\s*title\s+(.+?)(?:\n|$)", # Generic title
57
+ r"^\s*pie\s+title\s+(.+?)(?:\n|$)", # Pie chart title
58
+ r"^\s*gantt\s+title\s+(.+?)(?:\n|$)", # Gantt chart title
59
+ ]
60
+
61
+ for title_pattern in title_patterns:
62
+ title_match = re.search(title_pattern, content, re.MULTILINE)
63
+ if title_match:
64
+ title = title_match.group(1).strip()
65
+ break
66
+
67
+ diagrams.append(
68
+ MermaidDiagram(
69
+ content=content, title=title, start_pos=match.start(), end_pos=match.end()
70
+ )
71
+ )
72
+
73
+ return diagrams
74
+
75
+
76
+ def create_mermaid_live_link(diagram_content: str) -> str:
77
+ """
78
+ Create a Mermaid Live Editor link from diagram content.
79
+
80
+ The link uses pako compression (zlib) and base64 encoding.
81
+
82
+ Args:
83
+ diagram_content: The Mermaid diagram source code
84
+
85
+ Returns:
86
+ Complete URL to Mermaid Live Editor
87
+ """
88
+ # Create the JSON structure expected by Mermaid Live
89
+ # Escape newlines and quotes in the diagram content
90
+ escaped_content = diagram_content.replace('"', '\\"').replace("\n", "\\n")
91
+ json_str = f'{{"code":"{escaped_content}","mermaid":{{"theme":"default"}},"updateEditor":false,"autoSync":true,"updateDiagram":false}}'
92
+
93
+ # Compress using zlib (pako compatible)
94
+ compressed = zlib.compress(json_str.encode("utf-8"))
95
+
96
+ # Base64 encode
97
+ encoded = base64.urlsafe_b64encode(compressed).decode("utf-8")
98
+
99
+ # Remove padding characters as Mermaid Live doesn't use them
100
+ encoded = encoded.rstrip("=")
101
+
102
+ return f"{MERMAID_VIEWER_URL}pako:{encoded}"
103
+
104
+
105
+ def format_mermaid_links(diagrams: List[MermaidDiagram]) -> List[str]:
106
+ """
107
+ Format Mermaid diagrams as markdown links.
108
+
109
+ Args:
110
+ diagrams: List of MermaidDiagram objects
111
+
112
+ Returns:
113
+ List of formatted markdown strings
114
+ """
115
+ links = []
116
+
117
+ for i, diagram in enumerate(diagrams, 1):
118
+ link = create_mermaid_live_link(diagram.content)
119
+
120
+ if diagram.title:
121
+ # Use the title from the diagram with number
122
+ markdown = f"Diagram {i} - {diagram.title}: [Open Diagram]({link})"
123
+ else:
124
+ # Use generic numbering
125
+ markdown = f"Diagram {i}: [Open Diagram]({link})"
126
+
127
+ links.append(markdown)
128
+
129
+ return links
130
+
131
+
132
+ def detect_diagram_type(content: str) -> str:
133
+ """
134
+ Detect the type of mermaid diagram from content.
135
+
136
+ Args:
137
+ content: The mermaid diagram source code
138
+
139
+ Returns:
140
+ Human-readable diagram type name
141
+ """
142
+ content_lower = content.strip().lower()
143
+
144
+ # Check for common diagram types
145
+ if content_lower.startswith(("graph ", "flowchart ")):
146
+ return "Flowchart"
147
+ elif content_lower.startswith("sequencediagram"):
148
+ return "Sequence"
149
+ elif content_lower.startswith("pie"):
150
+ return "Pie Chart"
151
+ elif content_lower.startswith("gantt"):
152
+ return "Gantt Chart"
153
+ elif content_lower.startswith("classdiagram"):
154
+ return "Class Diagram"
155
+ elif content_lower.startswith("statediagram"):
156
+ return "State Diagram"
157
+ elif content_lower.startswith("erdiagram"):
158
+ return "ER Diagram"
159
+ elif content_lower.startswith("journey"):
160
+ return "User Journey"
161
+ elif content_lower.startswith("gitgraph"):
162
+ return "Git Graph"
163
+ elif content_lower.startswith("c4context"):
164
+ return "C4 Context"
165
+ elif content_lower.startswith("mindmap"):
166
+ return "Mind Map"
167
+ elif content_lower.startswith("timeline"):
168
+ return "Timeline"
169
+ else:
170
+ return "Diagram"
@@ -130,15 +130,11 @@ class ModelDatabase:
130
130
  )
131
131
 
132
132
  # FIXME: xAI has not documented the max output tokens for Grok 4. Using Grok 3 as a placeholder. Will need to update when available (if ever)
133
- GROK_4 = ModelParameters(
134
- context_window=256000, max_output_tokens=16385, tokenizes=XAI_VISION
135
- )
133
+ GROK_4 = ModelParameters(context_window=256000, max_output_tokens=16385, tokenizes=XAI_VISION)
136
134
 
137
135
  # Source for Grok 3 max output: https://www.reddit.com/r/grok/comments/1j7209p/exploring_grok_3_beta_output_capacity_a_simple/
138
136
  # xAI does not document Grok 3 max output tokens, using the above source as a reference.
139
- GROK_3 = ModelParameters(
140
- context_window=131072, max_output_tokens=16385, tokenizes=TEXT_ONLY
141
- )
137
+ GROK_3 = ModelParameters(context_window=131072, max_output_tokens=16385, tokenizes=TEXT_ONLY)
142
138
 
143
139
  # Model configuration database
144
140
  MODELS: Dict[str, ModelParameters] = {
@@ -193,7 +189,6 @@ class ModelDatabase:
193
189
  "claude-3-7-sonnet": ANTHROPIC_37_SERIES,
194
190
  "claude-3-7-sonnet-20250219": ANTHROPIC_37_SERIES,
195
191
  "claude-3-7-sonnet-latest": ANTHROPIC_37_SERIES,
196
- "claude-sonnet-4": ANTHROPIC_SONNET_4_VERSIONED,
197
192
  "claude-sonnet-4-0": ANTHROPIC_SONNET_4_VERSIONED,
198
193
  "claude-sonnet-4-20250514": ANTHROPIC_SONNET_4_VERSIONED,
199
194
  "claude-opus-4": ANTHROPIC_OPUS_4_VERSIONED,
@@ -18,7 +18,7 @@ class AliyunAugmentedLLM(OpenAIAugmentedLLM):
18
18
  model=chosen_model,
19
19
  systemPrompt=self.instruction,
20
20
  parallel_tool_calls=True,
21
- max_iterations=10,
21
+ max_iterations=20,
22
22
  use_history=True,
23
23
  )
24
24
 
@@ -46,7 +46,7 @@ from mcp_agent.llm.augmented_llm import (
46
46
  )
47
47
  from mcp_agent.logging.logger import get_logger
48
48
 
49
- DEFAULT_ANTHROPIC_MODEL = "claude-3-7-sonnet-latest"
49
+ DEFAULT_ANTHROPIC_MODEL = "claude-sonnet-4-0"
50
50
 
51
51
 
52
52
  class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
@@ -28,7 +28,7 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
28
28
  model=chosen_model,
29
29
  systemPrompt=self.instruction,
30
30
  parallel_tool_calls=True,
31
- max_iterations=10,
31
+ max_iterations=20,
32
32
  use_history=True,
33
33
  )
34
34
 
@@ -85,7 +85,9 @@ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
85
85
  return self._structured_from_multipart(result, model)
86
86
 
87
87
  @classmethod
88
- def convert_message_to_message_param(cls, message: ChatCompletionMessage, **kwargs) -> ChatCompletionAssistantMessageParam:
88
+ def convert_message_to_message_param(
89
+ cls, message: ChatCompletionMessage, **kwargs
90
+ ) -> ChatCompletionAssistantMessageParam:
89
91
  """Convert a response object to an input parameter object to allow LLM calls to be chained."""
90
92
  if hasattr(message, "reasoning_content"):
91
93
  message = copy(message)
@@ -18,7 +18,7 @@ class GoogleOaiAugmentedLLM(OpenAIAugmentedLLM):
18
18
  model=chosen_model,
19
19
  systemPrompt=self.instruction,
20
20
  parallel_tool_calls=False,
21
- max_iterations=10,
21
+ max_iterations=20,
22
22
  use_history=True,
23
23
  )
24
24
 
@@ -32,7 +32,7 @@ class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
32
32
  model=chosen_model, # Will be validated by base class
33
33
  systemPrompt=self.instruction,
34
34
  parallel_tool_calls=True, # Default based on OpenAI provider
35
- max_iterations=10, # Default based on OpenAI provider
35
+ max_iterations=20, # Default based on OpenAI provider
36
36
  use_history=True, # Default based on OpenAI provider
37
37
  )
38
38
 
@@ -92,7 +92,7 @@ class TensorZeroAugmentedLLM(AugmentedLLM[Dict[str, Any], Any]):
92
92
  systemPrompt=self.instruction,
93
93
  maxTokens=4096,
94
94
  use_history=True,
95
- max_iterations=10, # Max iterations for tool use loop
95
+ max_iterations=20, # Max iterations for tool use loop
96
96
  parallel_tool_calls=True,
97
97
  )
98
98
 
@@ -22,7 +22,7 @@ class XAIAugmentedLLM(OpenAIAugmentedLLM):
22
22
  model=chosen_model,
23
23
  systemPrompt=self.instruction,
24
24
  parallel_tool_calls=False,
25
- max_iterations=10,
25
+ max_iterations=20,
26
26
  use_history=True,
27
27
  )
28
28
 
@@ -43,8 +43,11 @@ fast = FastAgent("Orchestrator-Workers")
43
43
  model="gpt-4.1",
44
44
  )
45
45
  # Define the orchestrator to coordinate the other agents
46
- @fast.orchestrator(
47
- name="orchestrate", agents=["finder", "writer", "proofreader"], plan_type="full", model="sonnet"
46
+ @fast.iterative_planner(
47
+ name="orchestrate",
48
+ agents=["finder", "writer", "proofreader"],
49
+ model="sonnet",
50
+ plan_iterations=5,
48
51
  )
49
52
  async def main() -> None:
50
53
  async with fast.run() as agent:
@@ -7,6 +7,11 @@ from rich.panel import Panel
7
7
  from rich.text import Text
8
8
 
9
9
  from mcp_agent import console
10
+ from mcp_agent.core.mermaid_utils import (
11
+ create_mermaid_live_link,
12
+ detect_diagram_type,
13
+ extract_mermaid_diagrams,
14
+ )
10
15
  from mcp_agent.mcp.common import SEP
11
16
  from mcp_agent.mcp.mcp_aggregator import MCPAggregator
12
17
 
@@ -158,29 +163,29 @@ class ConsoleDisplay:
158
163
  content = content[:360] + "..."
159
164
  console.console.print(content, style="dim", markup=self._markup)
160
165
 
161
- # Bottom separator with tool list: [tool1] [tool2] ────────
166
+ # Bottom separator with tool list using pipe separators (matching server style)
162
167
  console.console.print()
168
+
169
+ # Use existing tool list formatting with pipe separators
163
170
  if display_tool_list and len(display_tool_list) > 0:
164
- # Truncate tool list if needed (leave space for " " prefix and some separator)
171
+ # Truncate tool list if needed (leave space for "─| " prefix and " |" suffix)
165
172
  max_tool_width = console.console.size.width - 10 # Reserve space for separators
166
173
  truncated_tool_list = self._truncate_list_if_needed(display_tool_list, max_tool_width)
167
- tool_width = truncated_tool_list.cell_len
168
-
169
- # Calculate how much space is left for separator line on the right
170
- total_width = console.console.size.width
171
- remaining_width = max(0, total_width - tool_width - 2) # -2 for "─ " prefix
172
- right_sep = "─" * remaining_width if remaining_width > 0 else ""
173
-
174
- # Create the separator line: ─ [tools] ────────
175
- combined = Text()
176
- combined.append("─ ", style="dim")
177
- combined.append_text(truncated_tool_list)
178
- combined.append(right_sep, style="dim")
179
174
 
180
- console.console.print(combined, markup=self._markup)
175
+ # Create the separator line: ─| [tools] |──────
176
+ line1 = Text()
177
+ line1.append("─| ", style="dim")
178
+ line1.append_text(truncated_tool_list)
179
+ line1.append(" |", style="dim")
180
+ remaining = console.console.size.width - line1.cell_len
181
+ if remaining > 0:
182
+ line1.append("─" * remaining, style="dim")
181
183
  else:
182
- # Full separator if no tools
183
- console.console.print("─" * console.console.size.width, style="dim")
184
+ # No tools - continuous bar
185
+ line1 = Text()
186
+ line1.append("─" * console.console.size.width, style="dim")
187
+
188
+ console.console.print(line1, markup=self._markup)
184
189
  console.console.print()
185
190
 
186
191
  async def show_tool_update(self, aggregator: MCPAggregator | None, updated_server: str) -> None:
@@ -224,6 +229,8 @@ class ConsoleDisplay:
224
229
  def _format_tool_list(self, available_tools, selected_tool_name):
225
230
  """Format the list of available tools, highlighting the selected one."""
226
231
  display_tool_list = Text()
232
+ matching_tools = []
233
+
227
234
  for display_tool in available_tools:
228
235
  # Handle both OpenAI and Anthropic tool formats
229
236
  if isinstance(display_tool, dict):
@@ -248,9 +255,15 @@ class ConsoleDisplay:
248
255
  )
249
256
 
250
257
  if selected_tool_name.split(SEP)[0] == parts[0]:
251
- style = "magenta" if tool_call_name == selected_tool_name else "dim white"
252
258
  shortened_name = parts[1] if len(parts[1]) <= 12 else parts[1][:11] + "…"
253
- display_tool_list.append(f"[{shortened_name}] ", style)
259
+ matching_tools.append((shortened_name, tool_call_name))
260
+
261
+ # Format with pipe separators instead of brackets
262
+ for i, (shortened_name, tool_call_name) in enumerate(matching_tools):
263
+ if i > 0:
264
+ display_tool_list.append(" | ", style="dim")
265
+ style = "magenta" if tool_call_name == selected_tool_name else "dim"
266
+ display_tool_list.append(shortened_name, style)
254
267
 
255
268
  return display_tool_list
256
269
 
@@ -379,31 +392,83 @@ class ConsoleDisplay:
379
392
  # Handle Rich Text objects directly
380
393
  console.console.print(message_text, markup=self._markup)
381
394
 
382
- # Bottom separator with server list: [server1] [server2] ────────
395
+ # Bottom separator with server list and diagrams
383
396
  console.console.print()
397
+
398
+ # Check for mermaid diagrams in the message content
399
+ diagrams = []
400
+ if isinstance(message_text, str):
401
+ diagrams = extract_mermaid_diagrams(message_text)
402
+
403
+ # Create server list with pipe separators (no "mcp:" prefix)
404
+ server_content = Text()
384
405
  if display_server_list and len(display_server_list) > 0:
385
- # Truncate server list if needed (leave space for "─ " prefix and some separator)
386
- max_server_width = console.console.size.width - 10 # Reserve space for separators
387
- truncated_server_list = self._truncate_list_if_needed(
388
- display_server_list, max_server_width
389
- )
390
- server_width = truncated_server_list.cell_len
406
+ # Convert the existing server list to pipe-separated format
407
+ servers = []
408
+ if aggregator:
409
+ for server_name in await aggregator.list_servers():
410
+ servers.append(server_name)
411
+
412
+ # Create pipe-separated server list
413
+ for i, server_name in enumerate(servers):
414
+ if i > 0:
415
+ server_content.append(" | ", style="dim")
416
+ # Highlight active server, dim inactive ones
417
+ mcp_server_name = (
418
+ highlight_namespaced_tool.split(SEP)[0]
419
+ if SEP in highlight_namespaced_tool
420
+ else highlight_namespaced_tool
421
+ )
422
+ style = "bright_green" if server_name == mcp_server_name else "dim"
423
+ server_content.append(server_name, style)
424
+
425
+ # Create main separator line
426
+ line1 = Text()
427
+ if server_content.cell_len > 0:
428
+ line1.append("─| ", style="dim")
429
+ line1.append_text(server_content)
430
+ line1.append(" |", style="dim")
431
+ remaining = console.console.size.width - line1.cell_len
432
+ if remaining > 0:
433
+ line1.append("─" * remaining, style="dim")
434
+ else:
435
+ # No servers - continuous bar (no break)
436
+ line1.append("─" * console.console.size.width, style="dim")
391
437
 
392
- # Calculate how much space is left for separator line on the right
393
- total_width = console.console.size.width
394
- remaining_width = max(0, total_width - server_width - 2) # -2 for "─ " prefix
395
- right_sep = "─" * remaining_width if remaining_width > 0 else ""
438
+ console.console.print(line1, markup=self._markup)
396
439
 
397
- # Create the separator line: [servers] ────────
398
- combined = Text()
399
- combined.append("─ ", style="dim")
400
- combined.append_text(truncated_server_list)
401
- combined.append(right_sep, style="dim")
440
+ # Add diagram links in panel if any diagrams found
441
+ if diagrams:
442
+ diagram_content = Text()
443
+ # Add bullet at the beginning
444
+ diagram_content.append("● ", style="dim")
445
+
446
+ for i, diagram in enumerate(diagrams, 1):
447
+ if i > 1:
448
+ diagram_content.append(" • ", style="dim")
449
+
450
+ # Generate URL
451
+ url = create_mermaid_live_link(diagram.content)
452
+
453
+ # Format: "1 - Title" or "1 - Flowchart" or "Diagram 1"
454
+ if diagram.title:
455
+ diagram_content.append(
456
+ f"{i} - {diagram.title}", style=f"bright_blue link {url}"
457
+ )
458
+ else:
459
+ # Try to detect diagram type, fallback to "Diagram N"
460
+ diagram_type = detect_diagram_type(diagram.content)
461
+ if diagram_type != "Diagram":
462
+ diagram_content.append(
463
+ f"{i} - {diagram_type}", style=f"bright_blue link {url}"
464
+ )
465
+ else:
466
+ diagram_content.append(f"Diagram {i}", style=f"bright_blue link {url}")
467
+
468
+ # Display diagrams on a simple new line (more space efficient)
469
+ console.console.print()
470
+ console.console.print(diagram_content, markup=self._markup)
402
471
 
403
- console.console.print(combined, markup=self._markup)
404
- else:
405
- # Full separator if no servers
406
- console.console.print("─" * console.console.size.width, style="dim")
407
472
  console.console.print()
408
473
 
409
474
  def show_user_message(