fast-agent-mcp 0.2.3__py3-none-any.whl → 0.2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/METADATA +13 -9
  2. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/RECORD +42 -40
  3. mcp_agent/__init__.py +2 -2
  4. mcp_agent/agents/agent.py +5 -0
  5. mcp_agent/agents/base_agent.py +152 -36
  6. mcp_agent/agents/workflow/chain_agent.py +9 -13
  7. mcp_agent/agents/workflow/evaluator_optimizer.py +3 -3
  8. mcp_agent/agents/workflow/orchestrator_agent.py +9 -7
  9. mcp_agent/agents/workflow/parallel_agent.py +2 -2
  10. mcp_agent/agents/workflow/router_agent.py +7 -5
  11. mcp_agent/cli/main.py +11 -0
  12. mcp_agent/config.py +29 -7
  13. mcp_agent/context.py +2 -0
  14. mcp_agent/core/{direct_agent_app.py → agent_app.py} +115 -15
  15. mcp_agent/core/direct_factory.py +9 -18
  16. mcp_agent/core/enhanced_prompt.py +3 -3
  17. mcp_agent/core/fastagent.py +218 -49
  18. mcp_agent/core/mcp_content.py +38 -5
  19. mcp_agent/core/prompt.py +70 -8
  20. mcp_agent/core/validation.py +1 -1
  21. mcp_agent/llm/augmented_llm.py +44 -16
  22. mcp_agent/llm/augmented_llm_passthrough.py +3 -1
  23. mcp_agent/llm/model_factory.py +16 -28
  24. mcp_agent/llm/providers/augmented_llm_openai.py +3 -3
  25. mcp_agent/llm/providers/multipart_converter_anthropic.py +8 -8
  26. mcp_agent/llm/providers/multipart_converter_openai.py +9 -9
  27. mcp_agent/mcp/helpers/__init__.py +3 -0
  28. mcp_agent/mcp/helpers/content_helpers.py +116 -0
  29. mcp_agent/mcp/interfaces.py +39 -16
  30. mcp_agent/mcp/mcp_aggregator.py +117 -13
  31. mcp_agent/mcp/prompt_message_multipart.py +29 -22
  32. mcp_agent/mcp/prompt_render.py +18 -15
  33. mcp_agent/mcp/prompt_serialization.py +42 -0
  34. mcp_agent/mcp/prompts/prompt_helpers.py +22 -112
  35. mcp_agent/mcp/prompts/prompt_load.py +51 -3
  36. mcp_agent/mcp_server/agent_server.py +62 -13
  37. mcp_agent/resources/examples/internal/agent.py +2 -2
  38. mcp_agent/resources/examples/internal/fastagent.config.yaml +5 -0
  39. mcp_agent/resources/examples/internal/history_transfer.py +35 -0
  40. mcp_agent/mcp/mcp_agent_server.py +0 -56
  41. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/WHEEL +0 -0
  42. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/entry_points.txt +0 -0
  43. {fast_agent_mcp-0.2.3.dist-info → fast_agent_mcp-0.2.5.dist-info}/licenses/LICENSE +0 -0
@@ -5,6 +5,7 @@ from typing import (
5
5
  Generic,
6
6
  List,
7
7
  Optional,
8
+ Tuple,
8
9
  Type,
9
10
  TypeVar,
10
11
  cast,
@@ -38,7 +39,6 @@ from mcp_agent.mcp.interfaces import (
38
39
  from mcp_agent.mcp.mcp_aggregator import MCPAggregator
39
40
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
40
41
  from mcp_agent.mcp.prompt_render import render_multipart_message
41
- from mcp_agent.mcp.prompt_serialization import multipart_messages_to_delimited_format
42
42
  from mcp_agent.ui.console_display import ConsoleDisplay
43
43
 
44
44
  # Define type variables locally
@@ -76,12 +76,24 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
76
76
  ProviderFormatConverter[MessageParamT, MessageT]
77
77
  ] = BasicFormatConverter,
78
78
  context: Optional["Context"] = None,
79
+ model: Optional[str] = None,
79
80
  **kwargs: dict[str, Any],
80
81
  ) -> None:
81
82
  """
82
83
  Initialize the LLM with a list of server names and an instruction.
83
84
  If a name is provided, it will be used to identify the LLM.
84
85
  If an agent is provided, all other properties are optional
86
+
87
+ Args:
88
+ agent: Optional Agent that owns this LLM
89
+ server_names: List of MCP server names to connect to
90
+ instruction: System prompt for the LLM
91
+ name: Optional name identifier for the LLM
92
+ request_params: RequestParams to configure LLM behavior
93
+ type_converter: Provider-specific format converter class
94
+ context: Application context
95
+ model: Optional model name override
96
+ **kwargs: Additional provider-specific parameters
85
97
  """
86
98
  # Extract request_params before super() call
87
99
  self._init_request_params = request_params
@@ -95,7 +107,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
95
107
  # memory contains provider specific API types.
96
108
  self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
97
109
 
98
- self.message_history: List[PromptMessageMultipart] = []
110
+ self._message_history: List[PromptMessageMultipart] = []
99
111
 
100
112
  # Initialize the display component
101
113
  self.display = ConsoleDisplay(config=self.context.config)
@@ -103,6 +115,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
103
115
  # Initialize default parameters
104
116
  self.default_request_params = self._initialize_default_params(kwargs)
105
117
 
118
+ # Apply model override if provided
119
+ if model:
120
+ self.default_request_params.model = model
121
+
106
122
  # Merge with provided params if any
107
123
  if self._init_request_params:
108
124
  self.default_request_params = self._merge_request_params(
@@ -127,17 +143,17 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
127
143
  prompt: List[PromptMessageMultipart],
128
144
  model: Type[ModelT],
129
145
  request_params: RequestParams | None = None,
130
- ) -> ModelT | None:
146
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
131
147
  """Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
132
148
  try:
133
149
  result: PromptMessageMultipart = await self.generate(prompt, request_params)
134
150
  json_data = from_json(result.first_text(), allow_partial=True)
135
151
  validated_model = model.model_validate(json_data)
136
- return cast("ModelT", validated_model)
152
+ return cast("ModelT", validated_model), Prompt.assistant(json_data)
137
153
  except Exception as e:
138
154
  logger = get_logger(__name__)
139
155
  logger.error(f"Failed to parse structured response: {str(e)}")
140
- return None
156
+ return None, Prompt.assistant(f"Failed to parse structured response: {str(e)}")
141
157
 
142
158
  async def generate(
143
159
  self,
@@ -158,7 +174,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
158
174
  )
159
175
  return Prompt.assistant(f"History saved to {filename}")
160
176
 
161
- self.message_history.extend(multipart_messages)
177
+ self._message_history.extend(multipart_messages)
162
178
 
163
179
  if multipart_messages[-1].role == "user":
164
180
  self.show_user_message(
@@ -171,12 +187,12 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
171
187
  multipart_messages, request_params
172
188
  )
173
189
 
174
- self.message_history.append(assistant_response)
190
+ self._message_history.append(assistant_response)
175
191
  return assistant_response
176
192
 
177
193
  def chat_turn(self) -> int:
178
194
  """Return the current chat turn number"""
179
- return 1 + sum(1 for message in self.message_history if message.role == "assistant")
195
+ return 1 + sum(1 for message in self._message_history if message.role == "assistant")
180
196
 
181
197
  def _merge_request_params(
182
198
  self, default_params: RequestParams, provided_params: RequestParams
@@ -418,16 +434,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
418
434
 
419
435
  async def _save_history(self, filename: str) -> None:
420
436
  """
421
- Save the Message History to a file in a simple delimeted format.
437
+ Save the Message History to a file in a format determined by the file extension.
438
+
439
+ Uses JSON format for .json files (MCP SDK compatible format) and
440
+ delimited text format for other extensions.
422
441
  """
423
- # Convert to delimited format
424
- delimited_content = multipart_messages_to_delimited_format(
425
- self.message_history,
426
- )
442
+ from mcp_agent.mcp.prompt_serialization import save_messages_to_file
427
443
 
428
- # Write to file
429
- with open(filename, "w", encoding="utf-8") as f:
430
- f.write("\n\n".join(delimited_content))
444
+ # Save messages using the unified save function that auto-detects format
445
+ save_messages_to_file(self._message_history, filename)
431
446
 
432
447
  @abstractmethod
433
448
  async def _apply_prompt_provider_specific(
@@ -448,3 +463,16 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
448
463
  String representation of the assistant's response if generated,
449
464
  or the last assistant message in the prompt
450
465
  """
466
+
467
+ @property
468
+ def message_history(self) -> List[PromptMessageMultipart]:
469
+ """
470
+ Return the agent's message history as PromptMessageMultipart objects.
471
+
472
+ This history can be used to transfer state between agents or for
473
+ analysis and debugging purposes.
474
+
475
+ Returns:
476
+ List of PromptMessageMultipart objects representing the conversation history
477
+ """
478
+ return self._message_history
@@ -143,7 +143,9 @@ class PassthroughLLM(AugmentedLLM):
143
143
 
144
144
  # TODO -- improve when we support Audio/Multimodal gen
145
145
  if self.is_tool_call(last_message):
146
- return Prompt.assistant(await self.generate_str(last_message.first_text()))
146
+ result = Prompt.assistant(await self.generate_str(last_message.first_text()))
147
+ await self.show_assistant_message(result.first_text())
148
+ return result
147
149
 
148
150
  if last_message.first_text().startswith(FIXED_RESPONSE_INDICATOR):
149
151
  self._fixed_response = (
@@ -103,7 +103,7 @@ class ModelFactory:
103
103
  "sonnet": "claude-3-7-sonnet-latest",
104
104
  "sonnet35": "claude-3-5-sonnet-latest",
105
105
  "sonnet37": "claude-3-7-sonnet-latest",
106
- "claude": "claude-3-5-sonnet-latest",
106
+ "claude": "claude-3-7-sonnet-latest",
107
107
  "haiku": "claude-3-5-haiku-latest",
108
108
  "haiku3": "claude-3-haiku-20240307",
109
109
  "haiku35": "claude-3-5-haiku-latest",
@@ -186,37 +186,25 @@ class ModelFactory:
186
186
  else:
187
187
  llm_class = cls.PROVIDER_CLASSES[config.provider]
188
188
 
189
- # Create a factory function matching the attach_llm protocol
190
- def factory(agent: Agent, **kwargs) -> LLMClass:
191
- # Create merged params with parsed model name
192
- factory_params = request_params.model_copy() if request_params else RequestParams()
193
- factory_params.model = config.model_name # Use the parsed model name, not the alias
194
-
195
- # Merge with any provided default_request_params
196
- if "default_request_params" in kwargs and kwargs["default_request_params"]:
197
- params_dict = factory_params.model_dump()
198
- params_dict.update(kwargs["default_request_params"].model_dump(exclude_unset=True))
199
- factory_params = RequestParams(**params_dict)
200
- factory_params.model = (
201
- config.model_name
202
- ) # Ensure parsed model name isn't overwritten
203
-
204
- # Forward all keyword arguments to LLM constructor
205
- llm_args = {
206
- "agent": agent,
207
- "model": config.model_name,
208
- "request_params": factory_params,
209
- "name": kwargs.get("name"),
210
- }
189
+ # Create a factory function matching the updated attach_llm protocol
190
+ def factory(
191
+ agent: Agent, request_params: Optional[RequestParams] = None, **kwargs
192
+ ) -> AugmentedLLMProtocol:
193
+ # Create base params with parsed model name
194
+ base_params = RequestParams()
195
+ base_params.model = config.model_name # Use the parsed model name, not the alias
211
196
 
212
197
  # Add reasoning effort if available
213
198
  if config.reasoning_effort:
214
- llm_args["reasoning_effort"] = config.reasoning_effort.value
199
+ kwargs["reasoning_effort"] = config.reasoning_effort.value
215
200
 
216
- # Forward all other kwargs (including verb)
217
- for key, value in kwargs.items():
218
- if key not in ["agent", "default_request_params", "name"]:
219
- llm_args[key] = value
201
+ # Forward all arguments to LLM constructor
202
+ llm_args = {
203
+ "agent": agent,
204
+ "model": config.model_name,
205
+ "request_params": request_params,
206
+ **kwargs,
207
+ }
220
208
 
221
209
  llm: AugmentedLLMProtocol = llm_class(**llm_args)
222
210
  return llm
@@ -1,5 +1,5 @@
1
1
  import os
2
- from typing import List, Type
2
+ from typing import List, Tuple, Type
3
3
 
4
4
  from mcp.types import (
5
5
  CallToolRequest,
@@ -377,7 +377,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
377
377
  prompt: List[PromptMessageMultipart],
378
378
  model: Type[ModelT],
379
379
  request_params: RequestParams | None = None,
380
- ) -> ModelT | None:
380
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]:
381
381
  """
382
382
  Apply the prompt and return the result as a Pydantic model.
383
383
 
@@ -432,7 +432,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
432
432
 
433
433
  parsed_result = response[0].choices[0].message
434
434
  logger.debug("Successfully used OpenAI beta parse feature for structured output")
435
- return parsed_result.parsed
435
+ return parsed_result.parsed, Prompt.assistant(parsed_result.content)
436
436
 
437
437
  except (ImportError, AttributeError, NotImplementedError) as e:
438
438
  # Beta feature not available, log and continue to fallback
@@ -24,13 +24,7 @@ from mcp.types import (
24
24
  )
25
25
 
26
26
  from mcp_agent.logging.logger import get_logger
27
- from mcp_agent.mcp.mime_utils import (
28
- guess_mime_type,
29
- is_image_mime_type,
30
- is_text_mime_type,
31
- )
32
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
33
- from mcp_agent.mcp.prompts.prompt_helpers import (
27
+ from mcp_agent.mcp.helpers.content_helpers import (
34
28
  get_image_data,
35
29
  get_resource_uri,
36
30
  get_text,
@@ -38,6 +32,12 @@ from mcp_agent.mcp.prompts.prompt_helpers import (
38
32
  is_resource_content,
39
33
  is_text_content,
40
34
  )
35
+ from mcp_agent.mcp.mime_utils import (
36
+ guess_mime_type,
37
+ is_image_mime_type,
38
+ is_text_mime_type,
39
+ )
40
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
41
41
  from mcp_agent.mcp.resource_utils import extract_title_from_uri
42
42
 
43
43
  _logger = get_logger("multipart_converter_anthropic")
@@ -450,4 +450,4 @@ class AnthropicConverter:
450
450
  # Add separate blocks directly to the message
451
451
  content_blocks.extend(separate_blocks)
452
452
 
453
- return MessageParam(role="user", content=content_blocks)
453
+ return MessageParam(role="user", content=content_blocks)
@@ -9,14 +9,7 @@ from mcp.types import (
9
9
  )
10
10
 
11
11
  from mcp_agent.logging.logger import get_logger
12
- from mcp_agent.mcp.mime_utils import (
13
- guess_mime_type,
14
- is_image_mime_type,
15
- is_text_mime_type,
16
- )
17
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
18
- from mcp_agent.mcp.prompts.prompt_helpers import (
19
- MessageContent,
12
+ from mcp_agent.mcp.helpers.content_helpers import (
20
13
  get_image_data,
21
14
  get_resource_uri,
22
15
  get_text,
@@ -24,6 +17,13 @@ from mcp_agent.mcp.prompts.prompt_helpers import (
24
17
  is_resource_content,
25
18
  is_text_content,
26
19
  )
20
+ from mcp_agent.mcp.mime_utils import (
21
+ guess_mime_type,
22
+ is_image_mime_type,
23
+ is_text_mime_type,
24
+ )
25
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
26
+ from mcp_agent.mcp.prompts.prompt_helpers import MessageContent
27
27
  from mcp_agent.mcp.resource_utils import extract_title_from_uri
28
28
 
29
29
  _logger = get_logger("multipart_converter_openai")
@@ -462,4 +462,4 @@ class OpenAIConverter:
462
462
  # Single message case (text-only)
463
463
  messages.append(converted)
464
464
 
465
- return messages
465
+ return messages
@@ -0,0 +1,3 @@
1
+ """
2
+ Helper modules for working with MCP content.
3
+ """
@@ -0,0 +1,116 @@
1
+ """
2
+ Helper functions for working with content objects.
3
+
4
+ These utilities simplify extracting content from content structures
5
+ without repetitive type checking.
6
+ """
7
+
8
+ from typing import Optional, Union
9
+
10
+ from mcp.types import (
11
+ BlobResourceContents,
12
+ EmbeddedResource,
13
+ ImageContent,
14
+ TextContent,
15
+ TextResourceContents,
16
+ )
17
+
18
+
19
+ def get_text(content: Union[TextContent, ImageContent, EmbeddedResource]) -> Optional[str]:
20
+ """
21
+ Extract text content from a content object if available.
22
+
23
+ Args:
24
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
25
+
26
+ Returns:
27
+ The text content as a string or None if not a text content
28
+ """
29
+ if isinstance(content, TextContent):
30
+ return content.text
31
+
32
+ if isinstance(content, TextResourceContents):
33
+ return content.text
34
+
35
+ if isinstance(content, EmbeddedResource):
36
+ if isinstance(content.resource, TextResourceContents):
37
+ return content.resource.text
38
+
39
+ return None
40
+
41
+
42
+ def get_image_data(content: Union[TextContent, ImageContent, EmbeddedResource]) -> Optional[str]:
43
+ """
44
+ Extract image data from a content object if available.
45
+
46
+ Args:
47
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
48
+
49
+ Returns:
50
+ The image data as a base64 string or None if not an image content
51
+ """
52
+ if isinstance(content, ImageContent):
53
+ return content.data
54
+
55
+ if isinstance(content, EmbeddedResource):
56
+ if isinstance(content.resource, BlobResourceContents):
57
+ # This assumes the blob might be an image, which isn't always true
58
+ # Consider checking the mimeType if needed
59
+ return content.resource.blob
60
+
61
+ return None
62
+
63
+
64
+ def get_resource_uri(content: Union[TextContent, ImageContent, EmbeddedResource]) -> Optional[str]:
65
+ """
66
+ Extract resource URI from an EmbeddedResource if available.
67
+
68
+ Args:
69
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
70
+
71
+ Returns:
72
+ The resource URI as a string or None if not an embedded resource
73
+ """
74
+ if isinstance(content, EmbeddedResource):
75
+ return str(content.resource.uri)
76
+
77
+ return None
78
+
79
+
80
+ def is_text_content(content: Union[TextContent, ImageContent, EmbeddedResource]) -> bool:
81
+ """
82
+ Check if the content is text content.
83
+
84
+ Args:
85
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
86
+
87
+ Returns:
88
+ True if the content is TextContent, False otherwise
89
+ """
90
+ return isinstance(content, TextContent) or isinstance(content, TextResourceContents)
91
+
92
+
93
+ def is_image_content(content: Union[TextContent, ImageContent, EmbeddedResource]) -> bool:
94
+ """
95
+ Check if the content is image content.
96
+
97
+ Args:
98
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
99
+
100
+ Returns:
101
+ True if the content is ImageContent, False otherwise
102
+ """
103
+ return isinstance(content, ImageContent)
104
+
105
+
106
+ def is_resource_content(content: Union[TextContent, ImageContent, EmbeddedResource]) -> bool:
107
+ """
108
+ Check if the content is an embedded resource.
109
+
110
+ Args:
111
+ content: A content object (TextContent, ImageContent, or EmbeddedResource)
112
+
113
+ Returns:
114
+ True if the content is EmbeddedResource, False otherwise
115
+ """
116
+ return isinstance(content, EmbeddedResource)
@@ -10,6 +10,7 @@ from typing import (
10
10
  Callable,
11
11
  Dict,
12
12
  List,
13
+ Mapping,
13
14
  Optional,
14
15
  Protocol,
15
16
  Tuple,
@@ -21,10 +22,10 @@ from typing import (
21
22
 
22
23
  from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
23
24
  from deprecated import deprecated
24
- from mcp import ClientSession, GetPromptResult, ReadResourceResult
25
+ from mcp import ClientSession
26
+ from mcp.types import GetPromptResult, Prompt, PromptMessage, ReadResourceResult
25
27
  from pydantic import BaseModel
26
28
 
27
- from mcp_agent.core.prompt import Prompt
28
29
  from mcp_agent.core.request_params import RequestParams
29
30
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
30
31
 
@@ -119,44 +120,66 @@ class AugmentedLLMProtocol(Protocol):
119
120
  """
120
121
  ...
121
122
 
123
+ @property
124
+ def message_history(self) -> List[PromptMessageMultipart]:
125
+ """
126
+ Return the LLM's message history as PromptMessageMultipart objects.
127
+
128
+ Returns:
129
+ List of PromptMessageMultipart objects representing the conversation history
130
+ """
131
+ ...
132
+
122
133
 
123
134
  class AgentProtocol(AugmentedLLMProtocol, Protocol):
124
135
  """Protocol defining the standard agent interface"""
125
136
 
126
137
  name: str
127
138
 
128
- async def __call__(self, message: Union[str, PromptMessageMultipart] | None = None) -> str:
139
+ async def __call__(self, message: Union[str, PromptMessage, PromptMessageMultipart]) -> str:
129
140
  """Make the agent callable for sending messages directly."""
130
141
  ...
131
142
 
132
- async def send(self, message: Union[str, PromptMessageMultipart]) -> str:
143
+ async def send(self, message: Union[str, PromptMessage, PromptMessageMultipart]) -> str:
133
144
  """Send a message to the agent and get a response"""
134
145
  ...
135
146
 
136
- async def prompt(self, default_prompt: str = "") -> str:
137
- """Start an interactive prompt session with the agent"""
138
- ...
139
-
140
147
  async def apply_prompt(self, prompt_name: str, arguments: Dict[str, str] | None = None) -> str:
141
148
  """Apply an MCP prompt template by name"""
142
149
  ...
143
150
 
144
- async def get_prompt(self, prompt_name: str) -> GetPromptResult: ...
151
+ async def get_prompt(
152
+ self,
153
+ prompt_name: str,
154
+ arguments: Dict[str, str] | None = None,
155
+ server_name: str | None = None,
156
+ ) -> GetPromptResult: ...
145
157
 
146
- async def list_prompts(self, server_name: str | None) -> Dict[str, List[Prompt]]: ...
158
+ async def list_prompts(self, server_name: str | None = None) -> Mapping[str, List[Prompt]]: ...
147
159
 
148
- async def get_resource(self, server_name: str, resource_uri: str) -> ReadResourceResult: ...
160
+ async def list_resources(self, server_name: str | None = None) -> Mapping[str, List[str]]: ...
161
+
162
+ async def get_resource(
163
+ self, resource_uri: str, server_name: str | None = None
164
+ ) -> ReadResourceResult:
165
+ """Get a resource from a specific server or search all servers"""
166
+ ...
149
167
 
150
168
  @deprecated
151
- async def generate_str(self, message: str, request_params: RequestParams | None) -> str:
152
- """Generate a response. Deprecated: please use send instead"""
169
+ async def generate_str(self, message: str, request_params: RequestParams | None = None) -> str:
170
+ """Generate a response. Deprecated: Use send(), generate() or structured() instead"""
171
+ ...
172
+
173
+ @deprecated
174
+ async def prompt(self, default_prompt: str = "") -> str:
175
+ """Start an interactive prompt session with the agent. Deprecated. Use agent_app.interactive() instead."""
153
176
  ...
154
177
 
155
178
  async def with_resource(
156
179
  self,
157
- prompt_content: Union[str, PromptMessageMultipart],
158
- server_name: str,
159
- resource_name: str,
180
+ prompt_content: Union[str, PromptMessage, PromptMessageMultipart],
181
+ resource_uri: str,
182
+ server_name: str | None = None,
160
183
  ) -> str:
161
184
  """Send a message with an attached MCP resource"""
162
185
  ...