fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (169) hide show
  1. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +61 -415
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +15 -19
  14. mcp_agent/cli/commands/bootstrap.py +19 -38
  15. mcp_agent/cli/commands/config.py +4 -4
  16. mcp_agent/cli/commands/setup.py +7 -14
  17. mcp_agent/cli/main.py +7 -10
  18. mcp_agent/cli/terminal.py +3 -3
  19. mcp_agent/config.py +25 -40
  20. mcp_agent/context.py +12 -21
  21. mcp_agent/context_dependent.py +3 -5
  22. mcp_agent/core/agent_types.py +10 -7
  23. mcp_agent/core/direct_agent_app.py +179 -0
  24. mcp_agent/core/direct_decorators.py +443 -0
  25. mcp_agent/core/direct_factory.py +476 -0
  26. mcp_agent/core/enhanced_prompt.py +23 -55
  27. mcp_agent/core/exceptions.py +8 -8
  28. mcp_agent/core/fastagent.py +145 -371
  29. mcp_agent/core/interactive_prompt.py +424 -0
  30. mcp_agent/core/mcp_content.py +17 -17
  31. mcp_agent/core/prompt.py +6 -9
  32. mcp_agent/core/request_params.py +6 -3
  33. mcp_agent/core/validation.py +92 -18
  34. mcp_agent/executor/decorator_registry.py +9 -17
  35. mcp_agent/executor/executor.py +8 -17
  36. mcp_agent/executor/task_registry.py +2 -4
  37. mcp_agent/executor/temporal.py +19 -41
  38. mcp_agent/executor/workflow.py +3 -5
  39. mcp_agent/executor/workflow_signal.py +15 -21
  40. mcp_agent/human_input/handler.py +4 -7
  41. mcp_agent/human_input/types.py +2 -3
  42. mcp_agent/llm/__init__.py +2 -0
  43. mcp_agent/llm/augmented_llm.py +450 -0
  44. mcp_agent/llm/augmented_llm_passthrough.py +162 -0
  45. mcp_agent/llm/augmented_llm_playback.py +83 -0
  46. mcp_agent/llm/memory.py +103 -0
  47. mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
  48. mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
  49. mcp_agent/llm/providers/__init__.py +8 -0
  50. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
  51. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
  52. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  53. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
  54. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
  55. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
  56. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
  57. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
  58. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
  59. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
  60. mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
  61. mcp_agent/llm/sampling_format_converter.py +37 -0
  62. mcp_agent/logging/events.py +1 -5
  63. mcp_agent/logging/json_serializer.py +7 -6
  64. mcp_agent/logging/listeners.py +20 -23
  65. mcp_agent/logging/logger.py +17 -19
  66. mcp_agent/logging/rich_progress.py +10 -8
  67. mcp_agent/logging/tracing.py +4 -6
  68. mcp_agent/logging/transport.py +22 -22
  69. mcp_agent/mcp/gen_client.py +1 -3
  70. mcp_agent/mcp/interfaces.py +117 -110
  71. mcp_agent/mcp/logger_textio.py +97 -0
  72. mcp_agent/mcp/mcp_agent_client_session.py +7 -7
  73. mcp_agent/mcp/mcp_agent_server.py +8 -8
  74. mcp_agent/mcp/mcp_aggregator.py +102 -143
  75. mcp_agent/mcp/mcp_connection_manager.py +20 -27
  76. mcp_agent/mcp/prompt_message_multipart.py +68 -16
  77. mcp_agent/mcp/prompt_render.py +77 -0
  78. mcp_agent/mcp/prompt_serialization.py +30 -48
  79. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  80. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  81. mcp_agent/mcp/prompts/prompt_load.py +109 -0
  82. mcp_agent/mcp/prompts/prompt_server.py +155 -195
  83. mcp_agent/mcp/prompts/prompt_template.py +35 -66
  84. mcp_agent/mcp/resource_utils.py +7 -14
  85. mcp_agent/mcp/sampling.py +17 -17
  86. mcp_agent/mcp_server/agent_server.py +13 -17
  87. mcp_agent/mcp_server_registry.py +13 -22
  88. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
  89. mcp_agent/resources/examples/in_dev/slides.py +110 -0
  90. mcp_agent/resources/examples/internal/agent.py +6 -3
  91. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  92. mcp_agent/resources/examples/internal/job.py +2 -1
  93. mcp_agent/resources/examples/internal/prompt_category.py +1 -1
  94. mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
  95. mcp_agent/resources/examples/internal/sizer.py +2 -1
  96. mcp_agent/resources/examples/internal/social.py +2 -1
  97. mcp_agent/resources/examples/prompting/agent.py +2 -1
  98. mcp_agent/resources/examples/prompting/image_server.py +4 -8
  99. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  100. mcp_agent/ui/console_display.py +16 -20
  101. fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
  102. mcp_agent/core/agent_app.py +0 -646
  103. mcp_agent/core/agent_utils.py +0 -71
  104. mcp_agent/core/decorators.py +0 -455
  105. mcp_agent/core/factory.py +0 -463
  106. mcp_agent/core/proxies.py +0 -269
  107. mcp_agent/core/types.py +0 -24
  108. mcp_agent/eval/__init__.py +0 -0
  109. mcp_agent/mcp/stdio.py +0 -111
  110. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  111. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  112. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  113. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  114. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  115. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  116. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  117. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
  118. mcp_agent/resources/examples/researcher/researcher.py +0 -38
  119. mcp_agent/resources/examples/workflows/chaining.py +0 -44
  120. mcp_agent/resources/examples/workflows/evaluator.py +0 -78
  121. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  122. mcp_agent/resources/examples/workflows/human_input.py +0 -25
  123. mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
  124. mcp_agent/resources/examples/workflows/parallel.py +0 -78
  125. mcp_agent/resources/examples/workflows/router.py +0 -53
  126. mcp_agent/resources/examples/workflows/sse.py +0 -23
  127. mcp_agent/telemetry/__init__.py +0 -0
  128. mcp_agent/telemetry/usage_tracking.py +0 -18
  129. mcp_agent/workflows/__init__.py +0 -0
  130. mcp_agent/workflows/embedding/__init__.py +0 -0
  131. mcp_agent/workflows/embedding/embedding_base.py +0 -61
  132. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  133. mcp_agent/workflows/embedding/embedding_openai.py +0 -46
  134. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  135. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
  136. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  137. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
  138. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
  139. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
  140. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
  141. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
  142. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  143. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
  144. mcp_agent/workflows/llm/__init__.py +0 -0
  145. mcp_agent/workflows/llm/augmented_llm.py +0 -753
  146. mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
  147. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
  148. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  149. mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
  150. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  151. mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
  152. mcp_agent/workflows/parallel/__init__.py +0 -0
  153. mcp_agent/workflows/parallel/fan_in.py +0 -350
  154. mcp_agent/workflows/parallel/fan_out.py +0 -187
  155. mcp_agent/workflows/parallel/parallel_llm.py +0 -166
  156. mcp_agent/workflows/router/__init__.py +0 -0
  157. mcp_agent/workflows/router/router_base.py +0 -368
  158. mcp_agent/workflows/router/router_embedding.py +0 -240
  159. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  160. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  161. mcp_agent/workflows/router/router_llm.py +0 -320
  162. mcp_agent/workflows/swarm/__init__.py +0 -0
  163. mcp_agent/workflows/swarm/swarm.py +0 -320
  164. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  165. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  166. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  167. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  168. {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  169. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -1,15 +1,17 @@
1
1
  import os
2
- from typing import List, Type, TYPE_CHECKING
2
+ from typing import TYPE_CHECKING, List, Type
3
3
 
4
- from mcp_agent.workflows.llm.providers.multipart_converter_anthropic import (
4
+ from mcp_agent.core.prompt import Prompt
5
+ from mcp_agent.llm.providers.multipart_converter_anthropic import (
5
6
  AnthropicConverter,
6
7
  )
7
- from mcp_agent.workflows.llm.providers.sampling_converter_anthropic import (
8
+ from mcp_agent.llm.providers.sampling_converter_anthropic import (
8
9
  AnthropicSamplingConverter,
9
10
  )
11
+ from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
10
12
 
11
13
  if TYPE_CHECKING:
12
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
14
+ from mcp import ListToolsResult
13
15
 
14
16
 
15
17
  from anthropic import Anthropic, AuthenticationError
@@ -20,21 +22,21 @@ from anthropic.types import (
20
22
  TextBlockParam,
21
23
  ToolParam,
22
24
  ToolUseBlockParam,
25
+ Usage,
23
26
  )
24
27
  from mcp.types import (
25
- CallToolRequestParams,
26
28
  CallToolRequest,
29
+ CallToolRequestParams,
27
30
  )
28
31
  from pydantic_core import from_json
32
+ from rich.text import Text
29
33
 
30
- from mcp_agent.workflows.llm.augmented_llm import (
34
+ from mcp_agent.core.exceptions import ProviderKeyError
35
+ from mcp_agent.llm.augmented_llm import (
31
36
  AugmentedLLM,
32
37
  ModelT,
33
38
  RequestParams,
34
39
  )
35
- from mcp_agent.core.exceptions import ProviderKeyError
36
- from rich.text import Text
37
-
38
40
  from mcp_agent.logging.logger import get_logger
39
41
 
40
42
  DEFAULT_ANTHROPIC_MODEL = "claude-3-7-sonnet-latest"
@@ -48,7 +50,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
48
50
  selecting appropriate tools, and determining what information to retain.
49
51
  """
50
52
 
51
- def __init__(self, *args, **kwargs):
53
+ def __init__(self, *args, **kwargs) -> None:
52
54
  self.provider = "Anthropic"
53
55
  # Initialize logger - keep it simple without name reference
54
56
  self.logger = get_logger(__name__)
@@ -67,9 +69,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
67
69
  use_history=True,
68
70
  )
69
71
 
70
- async def generate(
72
+ def _base_url(self) -> str:
73
+ return self.context.config.anthropic.base_url if self.context.config.anthropic else None
74
+
75
+ async def generate_internal(
71
76
  self,
72
- message,
77
+ message_param,
73
78
  request_params: RequestParams | None = None,
74
79
  ):
75
80
  """
@@ -78,46 +83,42 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
78
83
  """
79
84
 
80
85
  api_key = self._api_key(self.context.config)
86
+ base_url = self._base_url()
87
+ if base_url and base_url.endswith("/v1"):
88
+ base_url = base_url.rstrip("/v1")
89
+
81
90
  try:
82
- anthropic = Anthropic(api_key=api_key)
91
+ anthropic = Anthropic(api_key=api_key, base_url=base_url)
83
92
  messages: List[MessageParam] = []
84
93
  params = self.get_request_params(request_params)
85
94
  except AuthenticationError as e:
86
95
  raise ProviderKeyError(
87
96
  "Invalid Anthropic API key",
88
- "The configured Anthropic API key was rejected.\n"
89
- "Please check that your API key is valid and not expired.",
97
+ "The configured Anthropic API key was rejected.\nPlease check that your API key is valid and not expired.",
90
98
  ) from e
91
99
 
92
100
  # Always include prompt messages, but only include conversation history
93
101
  # if use_history is True
94
102
  messages.extend(self.history.get(include_history=params.use_history))
95
103
 
96
- if isinstance(message, str):
97
- messages.append({"role": "user", "content": message})
98
- elif isinstance(message, list):
99
- messages.extend(message)
100
- else:
101
- messages.append(message)
104
+ messages.append(message_param)
102
105
 
103
- response = await self.aggregator.list_tools()
106
+ tool_list: ListToolsResult = await self.aggregator.list_tools()
104
107
  available_tools: List[ToolParam] = [
105
- {
106
- "name": tool.name,
107
- "description": tool.description,
108
- "input_schema": tool.inputSchema,
109
- }
110
- for tool in response.tools
108
+ ToolParam(
109
+ name=tool.name,
110
+ description=tool.description or "",
111
+ input_schema=tool.inputSchema,
112
+ )
113
+ for tool in tool_list.tools
111
114
  ]
112
115
 
113
116
  responses: List[Message] = []
114
- model = await self.select_model(params)
115
- chat_turn = (len(messages) + 1) // 2
116
- self.show_user_message(str(message), model, chat_turn)
117
+
118
+ model = self.default_request_params.model
117
119
 
118
120
  for i in range(params.max_iterations):
119
- chat_turn = (len(messages) + 1) // 2
120
- self._log_chat_progress(chat_turn, model=model)
121
+ self._log_chat_progress(self.chat_turn(), model=model)
121
122
  arguments = {
122
123
  "model": model,
123
124
  "messages": messages,
@@ -134,17 +135,14 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
134
135
 
135
136
  self.logger.debug(f"{arguments}")
136
137
 
137
- executor_result = await self.executor.execute(
138
- anthropic.messages.create, **arguments
139
- )
138
+ executor_result = await self.executor.execute(anthropic.messages.create, **arguments)
140
139
 
141
140
  response = executor_result[0]
142
141
 
143
142
  if isinstance(response, AuthenticationError):
144
143
  raise ProviderKeyError(
145
144
  "Invalid Anthropic API key",
146
- "The configured Anthropic API key was rejected.\n"
147
- "Please check that your API key is valid and not expired.",
145
+ "The configured Anthropic API key was rejected.\nPlease check that your API key is valid and not expired.",
148
146
  ) from response
149
147
  elif isinstance(response, BaseException):
150
148
  error_details = str(response)
@@ -154,13 +152,9 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
154
152
  if hasattr(response, "status_code") and hasattr(response, "response"):
155
153
  try:
156
154
  error_json = response.response.json()
157
- error_details = (
158
- f"Error code: {response.status_code} - {error_json}"
159
- )
155
+ error_details = f"Error code: {response.status_code} - {error_json}"
160
156
  except: # noqa: E722
161
- error_details = (
162
- f"Error code: {response.status_code} - {str(response)}"
163
- )
157
+ error_details = f"Error code: {response.status_code} - {str(response)}"
164
158
 
165
159
  # Convert other errors to text response
166
160
  error_message = f"Error during generation: {error_details}"
@@ -171,7 +165,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
171
165
  type="message",
172
166
  content=[TextBlock(type="text", text=error_message)],
173
167
  stop_reason="end_turn", # Must be one of the allowed values
174
- usage={"input_tokens": 0, "output_tokens": 0}, # Required field
168
+ usage=Usage(input_tokens=0, output_tokens=0), # Required field
175
169
  )
176
170
 
177
171
  self.logger.debug(
@@ -193,9 +187,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
193
187
 
194
188
  await self.show_assistant_message(message_text)
195
189
 
196
- self.logger.debug(
197
- f"Iteration {i}: Stopping because finish_reason is 'end_turn'"
198
- )
190
+ self.logger.debug(f"Iteration {i}: Stopping because finish_reason is 'end_turn'")
199
191
  break
200
192
  elif response.stop_reason == "stop_sequence":
201
193
  # We have reached a stop sequence
@@ -206,9 +198,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
206
198
  elif response.stop_reason == "max_tokens":
207
199
  # We have reached the max tokens limit
208
200
 
209
- self.logger.debug(
210
- f"Iteration {i}: Stopping because finish_reason is 'max_tokens'"
211
- )
201
+ self.logger.debug(f"Iteration {i}: Stopping because finish_reason is 'max_tokens'")
212
202
  if params.maxTokens is not None:
213
203
  message_text = Text(
214
204
  f"the assistant has reached the maximum token limit ({params.maxTokens})",
@@ -255,9 +245,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
255
245
  self.show_tool_call(available_tools, tool_name, tool_args)
256
246
  tool_call_request = CallToolRequest(
257
247
  method="tools/call",
258
- params=CallToolRequestParams(
259
- name=tool_name, arguments=tool_args
260
- ),
248
+ params=CallToolRequestParams(name=tool_name, arguments=tool_args),
261
249
  )
262
250
  # TODO -- support MCP isError etc.
263
251
  result = await self.call_tool(
@@ -268,9 +256,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
268
256
  # Add each result to our collection
269
257
  tool_results.append((tool_use_id, result))
270
258
 
271
- messages.append(
272
- AnthropicConverter.create_tool_results_message(tool_results)
273
- )
259
+ messages.append(AnthropicConverter.create_tool_results_message(tool_results))
274
260
 
275
261
  # Only save the new conversation messages to history if use_history is true
276
262
  # Keep the prompt messages separate
@@ -311,7 +297,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
311
297
 
312
298
  async def generate_str(
313
299
  self,
314
- message,
300
+ message_param,
315
301
  request_params: RequestParams | None = None,
316
302
  ) -> str:
317
303
  """
@@ -319,16 +305,10 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
319
305
  The default implementation uses Claude as the LLM.
320
306
  Override this method to use a different LLM.
321
307
 
322
- Special commands:
323
- - "***SAVE_HISTORY <filename.md>" - Saves the conversation history to the specified file
324
- in MCP prompt format with user/assistant delimiters.
325
308
  """
326
- # Check if this is a special command to save history
327
- if isinstance(message, str) and message.startswith("***SAVE_HISTORY "):
328
- return await self._save_history_to_file(message)
329
309
 
330
- responses: List[Message] = await self.generate(
331
- message=message,
310
+ responses: List[Message] = await self.generate_internal(
311
+ message_param=message_param,
332
312
  request_params=request_params,
333
313
  )
334
314
 
@@ -351,114 +331,36 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
351
331
  # Join all collected text
352
332
  return "\n".join(final_text)
353
333
 
354
- async def generate_prompt(
355
- self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
356
- ) -> str:
357
- return await self.generate_str(
358
- AnthropicConverter.convert_to_anthropic(prompt), request_params
359
- )
360
-
361
- async def _apply_prompt_template_provider_specific(
334
+ async def _apply_prompt_provider_specific(
362
335
  self,
363
336
  multipart_messages: List["PromptMessageMultipart"],
364
337
  request_params: RequestParams | None = None,
365
- ) -> str:
366
- """
367
- Anthropic-specific implementation of apply_prompt_template that handles
368
- multimodal content natively.
369
-
370
- Args:
371
- multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
372
-
373
- Returns:
374
- String representation of the assistant's response if generated,
375
- or the last assistant message in the prompt
376
- """
338
+ ) -> PromptMessageMultipart:
377
339
  # Check the last message role
378
340
  last_message = multipart_messages[-1]
379
341
 
380
342
  # Add all previous messages to history (or all messages if last is from assistant)
381
343
  messages_to_add = (
382
- multipart_messages[:-1]
383
- if last_message.role == "user"
384
- else multipart_messages
344
+ multipart_messages[:-1] if last_message.role == "user" else multipart_messages
385
345
  )
386
346
  converted = []
387
347
  for msg in messages_to_add:
388
348
  converted.append(AnthropicConverter.convert_to_anthropic(msg))
349
+
389
350
  self.history.extend(converted, is_prompt=True)
390
351
 
391
352
  if last_message.role == "user":
392
- # For user messages: Generate response to the last one
393
- self.logger.debug(
394
- "Last message in prompt is from user, generating assistant response"
395
- )
353
+ self.logger.debug("Last message in prompt is from user, generating assistant response")
396
354
  message_param = AnthropicConverter.convert_to_anthropic(last_message)
397
- return await self.generate_str(message_param, request_params)
355
+ return Prompt.assistant(await self.generate_str(message_param, request_params))
398
356
  else:
399
357
  # For assistant messages: Return the last message content as text
400
- self.logger.debug(
401
- "Last message in prompt is from assistant, returning it directly"
402
- )
403
- return str(last_message)
404
-
405
- async def _save_history_to_file(self, command: str) -> str:
406
- """
407
- Save the conversation history to a file in MCP prompt format.
408
-
409
- Args:
410
- command: The command string, expected format: "***SAVE_HISTORY <filename.md>"
411
-
412
- Returns:
413
- Success or error message
414
- """
415
- try:
416
- # Extract the filename from the command
417
- parts = command.split(" ", 1)
418
- if len(parts) != 2 or not parts[1].strip():
419
- return "Error: Invalid format. Expected '***SAVE_HISTORY <filename.md>'"
420
-
421
- filename = parts[1].strip()
422
-
423
- # Get all messages from history
424
- messages = self.history.get(include_history=True)
425
-
426
- # Import required utilities
427
- from mcp_agent.workflows.llm.anthropic_utils import (
428
- anthropic_message_param_to_prompt_message_multipart,
429
- )
430
- from mcp_agent.mcp.prompt_serialization import (
431
- multipart_messages_to_delimited_format,
432
- )
433
-
434
- # Convert message params to PromptMessageMultipart objects
435
- multipart_messages = []
436
- for msg in messages:
437
- multipart_messages.append(
438
- anthropic_message_param_to_prompt_message_multipart(msg)
439
- )
440
-
441
- # Convert to delimited format
442
- delimited_content = multipart_messages_to_delimited_format(
443
- multipart_messages,
444
- user_delimiter="---USER",
445
- assistant_delimiter="---ASSISTANT",
446
- )
447
-
448
- # Write to file
449
- with open(filename, "w", encoding="utf-8") as f:
450
- f.write("\n\n".join(delimited_content))
451
-
452
- self.logger.info(f"Saved conversation history to {filename}")
453
- return f"Done. Saved conversation history to {filename}"
454
-
455
- except Exception as e:
456
- self.logger.error(f"Error saving history: {str(e)}")
457
- return f"Error saving history: {str(e)}"
358
+ self.logger.debug("Last message in prompt is from assistant, returning it directly")
359
+ return last_message
458
360
 
459
361
  async def generate_structured(
460
362
  self,
461
- message,
363
+ message: str,
462
364
  response_model: Type[ModelT],
463
365
  request_params: RequestParams | None = None,
464
366
  ) -> ModelT:
@@ -475,9 +377,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
475
377
  return response_model.model_validate(from_json(response, allow_partial=True))
476
378
 
477
379
  @classmethod
478
- def convert_message_to_message_param(
479
- cls, message: Message, **kwargs
480
- ) -> MessageParam:
380
+ def convert_message_to_message_param(cls, message: Message, **kwargs) -> MessageParam:
481
381
  """Convert a response object to an input parameter object to allow LLM calls to be chained."""
482
382
  content = []
483
383
 
@@ -495,41 +395,3 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
495
395
  )
496
396
 
497
397
  return MessageParam(role="assistant", content=content, **kwargs)
498
-
499
- def message_param_str(self, message: MessageParam) -> str:
500
- """Convert an input message to a string representation."""
501
-
502
- if message.get("content"):
503
- content = message["content"]
504
- if isinstance(content, str):
505
- return content
506
- else:
507
- final_text: List[str] = []
508
- for block in content:
509
- if block.text:
510
- final_text.append(str(block.text))
511
- else:
512
- final_text.append(str(block))
513
-
514
- return "\n".join(final_text)
515
-
516
- return str(message)
517
-
518
- def message_str(self, message: Message) -> str:
519
- """Convert an output message to a string representation."""
520
- content = message.content
521
-
522
- if content:
523
- if isinstance(content, list):
524
- final_text: List[str] = []
525
- for block in content:
526
- if block.text:
527
- final_text.append(str(block.text))
528
- else:
529
- final_text.append(str(block))
530
-
531
- return "\n".join(final_text)
532
- else:
533
- return str(content)
534
-
535
- return str(message)
@@ -0,0 +1,53 @@
1
+ import os
2
+
3
+ from mcp_agent.core.exceptions import ProviderKeyError
4
+ from mcp_agent.core.request_params import RequestParams
5
+ from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
6
+
7
+ DEEPSEEK_BASE_URL = "https://api.deepseek.com"
8
+ DEFAULT_DEEPSEEK_MODEL = "deepseekchat" # current Deepseek only has two type models
9
+
10
+
11
+ class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
12
+ def __init__(self, *args, **kwargs) -> None:
13
+ kwargs["provider_name"] = "Deepseek" # Set provider name in kwargs
14
+ super().__init__(*args, **kwargs) # Properly pass args and kwargs to parent
15
+
16
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
17
+ """Initialize Deepseek-specific default parameters"""
18
+ chosen_model = kwargs.get("model", DEFAULT_DEEPSEEK_MODEL)
19
+
20
+ return RequestParams(
21
+ model=chosen_model,
22
+ systemPrompt=self.instruction,
23
+ parallel_tool_calls=True,
24
+ max_iterations=10,
25
+ use_history=True,
26
+ )
27
+
28
+ def _api_key(self) -> str:
29
+ config = self.context.config
30
+ api_key = None
31
+
32
+ if config and config.deepseek:
33
+ api_key = config.deepseek.api_key
34
+ if api_key == "<your-api-key-here>":
35
+ api_key = None
36
+
37
+ if api_key is None:
38
+ api_key = os.getenv("DEEPSEEK_API_KEY")
39
+
40
+ if not api_key:
41
+ raise ProviderKeyError(
42
+ "DEEPSEEK API key not configured",
43
+ "The DEEKSEEK API key is required but not set.\n"
44
+ "Add it to your configuration file under deepseek.api_key\n"
45
+ "Or set the DEEPSEEK_API_KEY environment variable",
46
+ )
47
+ return api_key
48
+
49
+ def _base_url(self) -> str:
50
+ if self.context.config and self.context.config.deepseek:
51
+ base_url = self.context.config.deepseek.base_url
52
+
53
+ return base_url if base_url else DEEPSEEK_BASE_URL